From e3dcfa1b8944ab48326566538795a15344934f65 Mon Sep 17 00:00:00 2001 From: guopeian Date: Thu, 31 Aug 2023 10:52:39 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dopensdk?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CMakeLists.txt | 6 +- inc/external/acl/acl.h | 76 - inc/external/acl/acl_base.h | 642 ------ inc/external/acl/acl_mdl.h | 1256 ------------ inc/external/acl/acl_op.h | 570 ------ inc/external/acl/acl_op_compiler.h | 126 -- inc/external/acl/acl_rt.h | 1084 ---------- inc/external/acl/acl_tdt.h | 307 --- inc/external/acl/acl_tdt_queue.h | 445 ---- inc/external/acl/error_codes/ge_error_codes.h | 75 - inc/external/acl/error_codes/rt_error_codes.h | 105 - inc/external/hccl/hccl_types.h | 96 - inc/graphengine/inc/external/ge/ge_api.h | 199 -- .../inc/external/ge/ge_api_error_codes.h | 133 -- .../inc/external/ge/ge_api_types.h | 512 ----- .../inc/external/ge/ge_error_codes.h | 75 - inc/graphengine/inc/external/ge/ge_ir_build.h | 159 -- .../inc/framework/common/aicpu_op.h | 22 - .../inc/framework/common/debug/ge_log.h | 101 - .../inc/framework/common/debug/log.h | 294 --- .../inc/framework/common/fmk_error_codes.h | 99 - .../inc/framework/common/fmk_types.h | 23 - .../framework/common/ge_compiler_options.h | 32 - .../inc/framework/common/ge_format_util.h | 40 - .../framework/common/ge_inner_error_codes.h | 319 --- .../inc/framework/common/ge_types.h | 303 --- .../framework/common/helper/model_helper.h | 102 - .../framework/common/helper/om_file_helper.h | 109 - .../inc/framework/common/l2_cache_optimize.h | 123 -- .../inc/framework/common/op/attr_value_util.h | 174 -- .../inc/framework/common/op/ge_op_utils.h | 149 -- .../inc/framework/common/op/op_parser_util.h | 419 ---- .../inc/framework/common/op_types.h | 62 - .../framework/common/profiling/ge_profiling.h | 46 - .../common/profiling/ge_runner_profiling.h | 24 - .../inc/framework/common/scope_guard.h | 59 - .../inc/framework/common/string_util.h | 174 -- .../inc/framework/common/taskdown_common.h | 71 - inc/graphengine/inc/framework/common/types.h | 1115 ---------- inc/graphengine/inc/framework/common/util.h | 424 ---- .../inc/framework/engine/dnnengine.h | 57 - .../inc/framework/executor/ge_executor.h | 298 --- .../inc/framework/ge_runtime/davinci_model.h | 113 - .../inc/framework/ge_runtime/model_runner.h | 68 - .../inc/framework/ge_runtime/op_info.h | 72 - .../inc/framework/ge_runtime/task_info.h | 405 ---- .../inc/framework/generator/ge_generator.h | 119 -- .../inc/framework/generator/generator_api.h | 187 -- .../inc/framework/memory/memory_api.h | 72 - .../inc/framework/memory/memory_assigner.h | 42 - inc/graphengine/inc/framework/omg/ge_init.h | 36 - .../inc/framework/omg/model_tool.h | 35 - inc/graphengine/inc/framework/omg/omg.h | 123 -- .../inc/framework/omg/omg_inner_types.h | 149 -- inc/graphengine/inc/framework/omg/omg_types.h | 22 - .../inc/framework/omg/parser/model_parser.h | 183 -- .../inc/framework/omg/parser/op_parser.h | 92 - .../inc/framework/omg/parser/parser_api.h | 31 - .../inc/framework/omg/parser/parser_factory.h | 138 -- .../framework/omg/parser/parser_inner_ctx.h | 74 - .../inc/framework/omg/parser/parser_types.h | 510 ----- .../inc/framework/omg/parser/weights_parser.h | 74 - inc/graphengine/inc/framework/omg/version.h | 45 - inc/hccl/base.h | 99 - inc/hccl/hcom.h | 287 --- inc/metadef/inc/common/blocking_queue.h | 155 -- inc/metadef/inc/common/dynamic_aipp.h | 104 - .../common/fe_executor/ffts_plus_qos_update.h | 30 - inc/metadef/inc/common/npu_error_define.h | 94 - .../inc/common/opskernel/ge_task_info.h | 76 - .../inc/common/opskernel/ops_kernel_builder.h | 67 - .../common/opskernel/ops_kernel_info_store.h | 92 - .../common/opskernel/ops_kernel_info_types.h | 67 - .../inc/common/optimizer/graph_optimizer.h | 80 - .../common/optimizer/graph_optimizer_types.h | 35 - inc/metadef/inc/common/proto/dump_task.proto | 113 - .../inc/common/proto/fusion_model.proto | 21 - .../inc/common/proto/fwk_adapter.proto | 37 - inc/metadef/inc/common/proto/ge_ir.proto | 191 -- inc/metadef/inc/common/proto/insert_op.proto | 139 -- inc/metadef/inc/common/proto/om.proto | 396 ---- .../inc/common/proto/op_mapping_info.proto | 75 - .../common/proto/proto_inner/ge_onnx.proto | 563 ----- inc/metadef/inc/common/proto/task.proto | 179 -- .../aicore_manager/aicore_util_manager.h | 48 - .../ai_core/common/aicore_util_attr_define.h | 49 - .../ai_core/common/aicore_util_constants.h | 55 - .../util/ai_core/common/aicore_util_types.h | 147 -- .../common/util/ai_core/common/graph_comm.h | 128 -- .../common/util/ai_core/common/json_util.h | 54 - .../util/ai_core/common/l2_stream_info.h | 44 - .../util/ai_core/common/scope_allocator.h | 42 - .../param_calculate/tensorsize_calculator.h | 47 - .../inc/common/util/compress/compress.h | 44 - .../common/util/compress/compress_weight.h | 35 - .../common/util/error_manager/error_manager.h | 252 --- inc/metadef/inc/common/util/platform_info.h | 144 -- .../inc/common/util/platform_info_def.h | 142 -- .../inc/common/util/platform_infos_def.h | 63 - .../inc/external/graph/ascend_string.h | 62 - inc/metadef/inc/external/graph/attr_value.h | 78 - .../inc/external/graph/ge_error_codes.h | 46 - inc/metadef/inc/external/graph/gnode.h | 129 -- inc/metadef/inc/external/graph/graph.h | 130 -- .../inc/external/graph/inference_context.h | 136 -- inc/metadef/inc/external/graph/operator.h | 459 ----- .../inc/external/graph/operator_factory.h | 100 - inc/metadef/inc/external/graph/operator_reg.h | 561 ----- .../inc/external/graph/resource_context.h | 27 - inc/metadef/inc/external/graph/tensor.h | 150 -- inc/metadef/inc/external/graph/types.h | 319 --- .../inc/external/register/op_tiling_info.h | 160 -- .../external/register/op_tiling_registry.h | 151 -- inc/metadef/inc/external/register/register.h | 228 --- .../external/register/register_error_codes.h | 39 - .../external/register/register_fmk_types.h | 37 - .../inc/external/register/register_pass.h | 65 - .../inc/external/register/register_types.h | 62 - .../scope/scope_fusion_pass_register.h | 401 ---- inc/metadef/inc/graph/aligned_ptr.h | 49 - inc/metadef/inc/graph/anchor.h | 284 --- inc/metadef/inc/graph/any_value.h | 341 --- inc/metadef/inc/graph/ascend_limits.h | 25 - inc/metadef/inc/graph/attr_store.h | 202 -- .../inc/graph/attr_value_serializable.h | 25 - inc/metadef/inc/graph/buffer.h | 79 - inc/metadef/inc/graph/common_error_codes.h | 28 - inc/metadef/inc/graph/compiler_options.h | 47 - inc/metadef/inc/graph/compute_graph.h | 277 --- inc/metadef/inc/graph/debug/ge_attr_define.h | 1303 ------------ inc/metadef/inc/graph/def_types.h | 57 - inc/metadef/inc/graph/detail/any_map.h | 130 -- .../inc/graph/detail/attributes_holder.h | 172 -- .../inc/graph/detail/model_serialize_imp.h | 105 - inc/metadef/inc/graph/ge_attr_value.h | 75 - inc/metadef/inc/graph/ge_context.h | 51 - inc/metadef/inc/graph/ge_global_options.h | 25 - inc/metadef/inc/graph/ge_local_context.h | 48 - inc/metadef/inc/graph/ge_tensor.h | 316 --- inc/metadef/inc/graph/graph_util.h | 134 -- inc/metadef/inc/graph/model.h | 94 - inc/metadef/inc/graph/model_serialize.h | 51 - inc/metadef/inc/graph/node.h | 195 -- inc/metadef/inc/graph/op_desc.h | 320 --- inc/metadef/inc/graph/op_kernel_bin.h | 49 - inc/metadef/inc/graph/operator_factory_impl.h | 93 - inc/metadef/inc/graph/opsproto_manager.h | 43 - inc/metadef/inc/graph/range_vistor.h | 55 - inc/metadef/inc/graph/ref_relation.h | 79 - inc/metadef/inc/graph/repeated_iterator.h | 65 - inc/metadef/inc/graph/resource_context_mgr.h | 70 - .../inc/graph/runtime_inference_context.h | 49 - inc/metadef/inc/graph/shape_refiner.h | 53 - inc/metadef/inc/graph/small_vector.h | 515 ----- inc/metadef/inc/graph/tuning_utils.h | 133 -- inc/metadef/inc/graph/type_utils.h | 118 -- inc/metadef/inc/graph/usr_types.h | 134 -- inc/metadef/inc/graph/utils/anchor_utils.h | 45 - inc/metadef/inc/graph/utils/attr_utils.h | 155 -- inc/metadef/inc/graph/utils/constant_utils.h | 47 - .../inc/graph/utils/ffts_graph_utils.h | 91 - inc/metadef/inc/graph/utils/file_utils.h | 41 - inc/metadef/inc/graph/utils/graph_utils.h | 832 -------- inc/metadef/inc/graph/utils/graph_utils_ex.h | 47 - inc/metadef/inc/graph/utils/node_adapter.h | 32 - inc/metadef/inc/graph/utils/node_utils.h | 222 -- inc/metadef/inc/graph/utils/op_desc_utils.h | 186 -- inc/metadef/inc/graph/utils/tensor_adapter.h | 45 - inc/metadef/inc/graph/utils/tensor_utils.h | 80 - inc/metadef/inc/graph/utils/type_utils.h | 53 - inc/metadef/inc/register/custom_pass_helper.h | 49 - .../inc/register/ffts_plus_task_update.h | 64 - .../buffer_fusion/buffer_fusion_constant.h | 92 - .../buffer_fusion/buffer_fusion_pass_base.h | 72 - .../buffer_fusion_pass_registry.h | 62 - .../buffer_fusion/buffer_fusion_pattern.h | 103 - .../fusion_common/aicore_util_types.h | 176 -- .../fusion_common/fusion_statistic_recorder.h | 137 -- .../fusion_common/graph_pass_util.h | 267 --- .../fusion_common/op_slice_info.h | 189 -- .../fusion_common/pattern_fusion_base_pass.h | 121 -- .../fusion_pass_registry.h | 65 - .../graph_fusion/fusion_pattern.h | 171 -- .../graph_fusion/graph_fusion_pass_base.h | 113 - .../graph_optimizer/graph_fusion/graph_pass.h | 43 - .../graph_optimizer/graph_fusion/pass.h | 55 - .../graph_optimize_register_error_codes.h | 54 - inc/metadef/inc/register/host_cpu_context.h | 39 - .../inc/register/infer_data_slice_registry.h | 48 - inc/metadef/inc/register/op_kernel_registry.h | 46 - inc/metadef/inc/register/op_registry.h | 97 - inc/metadef/inc/register/op_tiling.h | 29 - .../register/ops_kernel_builder_registry.h | 69 - .../inc/register/proto/caffe/caffe.proto | 1821 ----------------- .../inc/register/proto/dump_task.proto | 113 - .../inc/register/proto/fusion_model.proto | 21 - .../inc/register/proto/fwk_adapter.proto | 37 - inc/metadef/inc/register/proto/ge_ir.proto | 191 -- .../inc/register/proto/insert_op.proto | 139 -- inc/metadef/inc/register/proto/om.proto | 396 ---- .../inc/register/proto/onnx/ge_onnx.proto | 563 ----- .../inc/register/proto/op_mapping_info.proto | 75 - .../register/proto/proto_inner/ge_onnx.proto | 563 ----- inc/metadef/inc/register/proto/task.proto | 179 -- .../proto/tensorflow/attr_value.proto | 62 - .../register/proto/tensorflow/function.proto | 100 - .../inc/register/proto/tensorflow/graph.proto | 56 - .../proto/tensorflow/graph_library.proto | 14 - .../register/proto/tensorflow/node_def.proto | 63 - .../register/proto/tensorflow/op_def.proto | 164 -- .../proto/tensorflow/resource_handle.proto | 29 - .../register/proto/tensorflow/tensor.proto | 94 - .../proto/tensorflow/tensor_shape.proto | 45 - .../inc/register/proto/tensorflow/types.proto | 74 - .../register/proto/tensorflow/versions.proto | 31 - .../inc/register/prototype_pass_registry.h | 69 - inc/metadef/inc/register/register.h | 54 - .../inc/register/register_format_transfer.h | 79 - .../inc/register/scope/scope_graph_impl.h | 198 -- .../inc/register/scope/scope_pass_impl.h | 61 - .../register/scope/scope_pass_registry_impl.h | 40 - .../inc/register/scope/scope_pattern_impl.h | 106 - inc/metadef/inc/register/tensor_assign.h | 103 - inc/mmpa/mmpa_api.h | 141 -- inc/mmpa/sub_inc/mmpa_linux.h | 561 ----- inc/mmpa/sub_inc/mmpa_typedef_linux.h | 98 - inc/mmpa/sub_inc/mmpa_typedef_win.h | 83 - inc/mmpa/sub_inc/mmpa_win.h | 566 ----- inc/parser/inc/external/parser/onnx_parser.h | 47 - inc/runtime/base.h | 358 ---- inc/runtime/config.h | 255 --- inc/runtime/context.h | 165 -- inc/runtime/dev.h | 364 ---- inc/runtime/dvfsprofile.h | 63 - inc/runtime/event.h | 246 --- inc/runtime/kernel.h | 566 ----- inc/runtime/mem.h | 548 ----- inc/runtime/rt.h | 31 - inc/runtime/rt_mem_queue.h | 260 --- inc/runtime/rt_model.h | 456 ----- inc/runtime/stream.h | 196 -- inc/tdt/data_common.h | 99 - inc/tdt/status.h | 755 ------- inc/tdt/tdt_host_interface.h | 211 -- inc/tdt/tsd_client.h | 161 -- inc/toolchain/adx_datadump_server.h | 36 - inc/toolchain/plog.h | 59 - inc/toolchain/prof_callback.h | 133 -- inc/toolchain/prof_engine.h | 207 -- inc/toolchain/prof_mgr_core.h | 84 - inc/toolchain/prof_reporter.h | 85 - inc/toolchain/slog.h | 510 ----- inc/toolchain/tuning_tool/aoe_tuning_api.h | 84 - inc/toolchain/tuning_tool/aoe_types.h | 44 - tf_adapter/kernels/geop_npu.h | 8 +- tf_adapter/tests/depends/aoe/CMakeLists.txt | 3 +- tf_adapter/tests/depends/aoe/src/aoe_stub.cc | 30 +- .../tests/depends/ge_runner/CMakeLists.txt | 8 - .../tests/st/util/testcase/util_test.cc | 2 +- .../tests/ut/util/testcase/util_test.cc | 2 +- tf_adapter/util/mbuf_allocator.cc | 2 +- tf_adapter/util/util.cc | 2 +- tf_adapter/util/util.h | 2 +- tf_adapter_2.x/cmake/aoe/module.cmake | 8 +- .../cmake/graph_engine/module.cmake | 8 +- tf_adapter_2.x/compat_v1/CMakeLists.txt | 23 +- tf_adapter_2.x/npu_device/core/npu_aoe.cpp | 3 +- tf_adapter_2.x/npu_device/core/npu_aoe.h | 8 +- tf_adapter_2.x/tests/CMakeLists.txt | 6 +- tf_adapter_2.x/tests/cmake/acl/module.cmake | 2 +- tf_adapter_2.x/tests/cmake/aoe/module.cmake | 3 - tf_adapter_2.x/tests/configure.py | 27 + tf_adapter_2.x/tests/stub/aoe_stub.cpp | 28 +- 273 files changed, 108 insertions(+), 43634 deletions(-) delete mode 100644 inc/external/acl/acl.h delete mode 100644 inc/external/acl/acl_base.h delete mode 100644 inc/external/acl/acl_mdl.h delete mode 100644 inc/external/acl/acl_op.h delete mode 100644 inc/external/acl/acl_op_compiler.h delete mode 100644 inc/external/acl/acl_rt.h delete mode 100644 inc/external/acl/acl_tdt.h delete mode 100644 inc/external/acl/acl_tdt_queue.h delete mode 100644 inc/external/acl/error_codes/ge_error_codes.h delete mode 100644 inc/external/acl/error_codes/rt_error_codes.h delete mode 100644 inc/external/hccl/hccl_types.h delete mode 100644 inc/graphengine/inc/external/ge/ge_api.h delete mode 100644 inc/graphengine/inc/external/ge/ge_api_error_codes.h delete mode 100644 inc/graphengine/inc/external/ge/ge_api_types.h delete mode 100644 inc/graphengine/inc/external/ge/ge_error_codes.h delete mode 100644 inc/graphengine/inc/external/ge/ge_ir_build.h delete mode 100644 inc/graphengine/inc/framework/common/aicpu_op.h delete mode 100644 inc/graphengine/inc/framework/common/debug/ge_log.h delete mode 100644 inc/graphengine/inc/framework/common/debug/log.h delete mode 100644 inc/graphengine/inc/framework/common/fmk_error_codes.h delete mode 100644 inc/graphengine/inc/framework/common/fmk_types.h delete mode 100644 inc/graphengine/inc/framework/common/ge_compiler_options.h delete mode 100644 inc/graphengine/inc/framework/common/ge_format_util.h delete mode 100644 inc/graphengine/inc/framework/common/ge_inner_error_codes.h delete mode 100644 inc/graphengine/inc/framework/common/ge_types.h delete mode 100644 inc/graphengine/inc/framework/common/helper/model_helper.h delete mode 100644 inc/graphengine/inc/framework/common/helper/om_file_helper.h delete mode 100644 inc/graphengine/inc/framework/common/l2_cache_optimize.h delete mode 100644 inc/graphengine/inc/framework/common/op/attr_value_util.h delete mode 100644 inc/graphengine/inc/framework/common/op/ge_op_utils.h delete mode 100644 inc/graphengine/inc/framework/common/op/op_parser_util.h delete mode 100644 inc/graphengine/inc/framework/common/op_types.h delete mode 100644 inc/graphengine/inc/framework/common/profiling/ge_profiling.h delete mode 100644 inc/graphengine/inc/framework/common/profiling/ge_runner_profiling.h delete mode 100644 inc/graphengine/inc/framework/common/scope_guard.h delete mode 100644 inc/graphengine/inc/framework/common/string_util.h delete mode 100644 inc/graphengine/inc/framework/common/taskdown_common.h delete mode 100644 inc/graphengine/inc/framework/common/types.h delete mode 100644 inc/graphengine/inc/framework/common/util.h delete mode 100644 inc/graphengine/inc/framework/engine/dnnengine.h delete mode 100644 inc/graphengine/inc/framework/executor/ge_executor.h delete mode 100644 inc/graphengine/inc/framework/ge_runtime/davinci_model.h delete mode 100644 inc/graphengine/inc/framework/ge_runtime/model_runner.h delete mode 100644 inc/graphengine/inc/framework/ge_runtime/op_info.h delete mode 100644 inc/graphengine/inc/framework/ge_runtime/task_info.h delete mode 100644 inc/graphengine/inc/framework/generator/ge_generator.h delete mode 100644 inc/graphengine/inc/framework/generator/generator_api.h delete mode 100644 inc/graphengine/inc/framework/memory/memory_api.h delete mode 100644 inc/graphengine/inc/framework/memory/memory_assigner.h delete mode 100644 inc/graphengine/inc/framework/omg/ge_init.h delete mode 100644 inc/graphengine/inc/framework/omg/model_tool.h delete mode 100644 inc/graphengine/inc/framework/omg/omg.h delete mode 100644 inc/graphengine/inc/framework/omg/omg_inner_types.h delete mode 100644 inc/graphengine/inc/framework/omg/omg_types.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/model_parser.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/op_parser.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/parser_api.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/parser_factory.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/parser_inner_ctx.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/parser_types.h delete mode 100644 inc/graphengine/inc/framework/omg/parser/weights_parser.h delete mode 100644 inc/graphengine/inc/framework/omg/version.h delete mode 100644 inc/hccl/base.h delete mode 100644 inc/hccl/hcom.h delete mode 100644 inc/metadef/inc/common/blocking_queue.h delete mode 100644 inc/metadef/inc/common/dynamic_aipp.h delete mode 100644 inc/metadef/inc/common/fe_executor/ffts_plus_qos_update.h delete mode 100644 inc/metadef/inc/common/npu_error_define.h delete mode 100644 inc/metadef/inc/common/opskernel/ge_task_info.h delete mode 100644 inc/metadef/inc/common/opskernel/ops_kernel_builder.h delete mode 100644 inc/metadef/inc/common/opskernel/ops_kernel_info_store.h delete mode 100644 inc/metadef/inc/common/opskernel/ops_kernel_info_types.h delete mode 100644 inc/metadef/inc/common/optimizer/graph_optimizer.h delete mode 100644 inc/metadef/inc/common/optimizer/graph_optimizer_types.h delete mode 100644 inc/metadef/inc/common/proto/dump_task.proto delete mode 100644 inc/metadef/inc/common/proto/fusion_model.proto delete mode 100644 inc/metadef/inc/common/proto/fwk_adapter.proto delete mode 100644 inc/metadef/inc/common/proto/ge_ir.proto delete mode 100644 inc/metadef/inc/common/proto/insert_op.proto delete mode 100644 inc/metadef/inc/common/proto/om.proto delete mode 100644 inc/metadef/inc/common/proto/op_mapping_info.proto delete mode 100644 inc/metadef/inc/common/proto/proto_inner/ge_onnx.proto delete mode 100644 inc/metadef/inc/common/proto/task.proto delete mode 100644 inc/metadef/inc/common/util/ai_core/aicore_manager/aicore_util_manager.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/aicore_util_attr_define.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/aicore_util_constants.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/aicore_util_types.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/graph_comm.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/json_util.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/l2_stream_info.h delete mode 100644 inc/metadef/inc/common/util/ai_core/common/scope_allocator.h delete mode 100644 inc/metadef/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h delete mode 100644 inc/metadef/inc/common/util/compress/compress.h delete mode 100644 inc/metadef/inc/common/util/compress/compress_weight.h delete mode 100644 inc/metadef/inc/common/util/error_manager/error_manager.h delete mode 100644 inc/metadef/inc/common/util/platform_info.h delete mode 100644 inc/metadef/inc/common/util/platform_info_def.h delete mode 100644 inc/metadef/inc/common/util/platform_infos_def.h delete mode 100644 inc/metadef/inc/external/graph/ascend_string.h delete mode 100644 inc/metadef/inc/external/graph/attr_value.h delete mode 100644 inc/metadef/inc/external/graph/ge_error_codes.h delete mode 100644 inc/metadef/inc/external/graph/gnode.h delete mode 100644 inc/metadef/inc/external/graph/graph.h delete mode 100644 inc/metadef/inc/external/graph/inference_context.h delete mode 100644 inc/metadef/inc/external/graph/operator.h delete mode 100644 inc/metadef/inc/external/graph/operator_factory.h delete mode 100644 inc/metadef/inc/external/graph/operator_reg.h delete mode 100644 inc/metadef/inc/external/graph/resource_context.h delete mode 100644 inc/metadef/inc/external/graph/tensor.h delete mode 100644 inc/metadef/inc/external/graph/types.h delete mode 100644 inc/metadef/inc/external/register/op_tiling_info.h delete mode 100644 inc/metadef/inc/external/register/op_tiling_registry.h delete mode 100644 inc/metadef/inc/external/register/register.h delete mode 100644 inc/metadef/inc/external/register/register_error_codes.h delete mode 100644 inc/metadef/inc/external/register/register_fmk_types.h delete mode 100644 inc/metadef/inc/external/register/register_pass.h delete mode 100644 inc/metadef/inc/external/register/register_types.h delete mode 100644 inc/metadef/inc/external/register/scope/scope_fusion_pass_register.h delete mode 100644 inc/metadef/inc/graph/aligned_ptr.h delete mode 100644 inc/metadef/inc/graph/anchor.h delete mode 100644 inc/metadef/inc/graph/any_value.h delete mode 100644 inc/metadef/inc/graph/ascend_limits.h delete mode 100644 inc/metadef/inc/graph/attr_store.h delete mode 100644 inc/metadef/inc/graph/attr_value_serializable.h delete mode 100644 inc/metadef/inc/graph/buffer.h delete mode 100644 inc/metadef/inc/graph/common_error_codes.h delete mode 100644 inc/metadef/inc/graph/compiler_options.h delete mode 100644 inc/metadef/inc/graph/compute_graph.h delete mode 100644 inc/metadef/inc/graph/debug/ge_attr_define.h delete mode 100644 inc/metadef/inc/graph/def_types.h delete mode 100644 inc/metadef/inc/graph/detail/any_map.h delete mode 100644 inc/metadef/inc/graph/detail/attributes_holder.h delete mode 100644 inc/metadef/inc/graph/detail/model_serialize_imp.h delete mode 100644 inc/metadef/inc/graph/ge_attr_value.h delete mode 100644 inc/metadef/inc/graph/ge_context.h delete mode 100644 inc/metadef/inc/graph/ge_global_options.h delete mode 100644 inc/metadef/inc/graph/ge_local_context.h delete mode 100644 inc/metadef/inc/graph/ge_tensor.h delete mode 100644 inc/metadef/inc/graph/graph_util.h delete mode 100644 inc/metadef/inc/graph/model.h delete mode 100644 inc/metadef/inc/graph/model_serialize.h delete mode 100644 inc/metadef/inc/graph/node.h delete mode 100644 inc/metadef/inc/graph/op_desc.h delete mode 100644 inc/metadef/inc/graph/op_kernel_bin.h delete mode 100644 inc/metadef/inc/graph/operator_factory_impl.h delete mode 100644 inc/metadef/inc/graph/opsproto_manager.h delete mode 100644 inc/metadef/inc/graph/range_vistor.h delete mode 100644 inc/metadef/inc/graph/ref_relation.h delete mode 100644 inc/metadef/inc/graph/repeated_iterator.h delete mode 100644 inc/metadef/inc/graph/resource_context_mgr.h delete mode 100644 inc/metadef/inc/graph/runtime_inference_context.h delete mode 100644 inc/metadef/inc/graph/shape_refiner.h delete mode 100644 inc/metadef/inc/graph/small_vector.h delete mode 100644 inc/metadef/inc/graph/tuning_utils.h delete mode 100644 inc/metadef/inc/graph/type_utils.h delete mode 100644 inc/metadef/inc/graph/usr_types.h delete mode 100644 inc/metadef/inc/graph/utils/anchor_utils.h delete mode 100644 inc/metadef/inc/graph/utils/attr_utils.h delete mode 100644 inc/metadef/inc/graph/utils/constant_utils.h delete mode 100644 inc/metadef/inc/graph/utils/ffts_graph_utils.h delete mode 100644 inc/metadef/inc/graph/utils/file_utils.h delete mode 100644 inc/metadef/inc/graph/utils/graph_utils.h delete mode 100644 inc/metadef/inc/graph/utils/graph_utils_ex.h delete mode 100644 inc/metadef/inc/graph/utils/node_adapter.h delete mode 100644 inc/metadef/inc/graph/utils/node_utils.h delete mode 100644 inc/metadef/inc/graph/utils/op_desc_utils.h delete mode 100644 inc/metadef/inc/graph/utils/tensor_adapter.h delete mode 100644 inc/metadef/inc/graph/utils/tensor_utils.h delete mode 100644 inc/metadef/inc/graph/utils/type_utils.h delete mode 100644 inc/metadef/inc/register/custom_pass_helper.h delete mode 100644 inc/metadef/inc/register/ffts_plus_task_update.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/fusion_common/aicore_util_types.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/fusion_common/fusion_statistic_recorder.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/fusion_common/graph_pass_util.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/fusion_common/op_slice_info.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_pass.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/graph_fusion/pass.h delete mode 100644 inc/metadef/inc/register/graph_optimizer/graph_optimize_register_error_codes.h delete mode 100644 inc/metadef/inc/register/host_cpu_context.h delete mode 100644 inc/metadef/inc/register/infer_data_slice_registry.h delete mode 100644 inc/metadef/inc/register/op_kernel_registry.h delete mode 100644 inc/metadef/inc/register/op_registry.h delete mode 100644 inc/metadef/inc/register/op_tiling.h delete mode 100644 inc/metadef/inc/register/ops_kernel_builder_registry.h delete mode 100644 inc/metadef/inc/register/proto/caffe/caffe.proto delete mode 100644 inc/metadef/inc/register/proto/dump_task.proto delete mode 100644 inc/metadef/inc/register/proto/fusion_model.proto delete mode 100644 inc/metadef/inc/register/proto/fwk_adapter.proto delete mode 100644 inc/metadef/inc/register/proto/ge_ir.proto delete mode 100644 inc/metadef/inc/register/proto/insert_op.proto delete mode 100644 inc/metadef/inc/register/proto/om.proto delete mode 100644 inc/metadef/inc/register/proto/onnx/ge_onnx.proto delete mode 100644 inc/metadef/inc/register/proto/op_mapping_info.proto delete mode 100644 inc/metadef/inc/register/proto/proto_inner/ge_onnx.proto delete mode 100644 inc/metadef/inc/register/proto/task.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/attr_value.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/function.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/graph.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/graph_library.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/node_def.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/op_def.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/resource_handle.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/tensor.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/tensor_shape.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/types.proto delete mode 100644 inc/metadef/inc/register/proto/tensorflow/versions.proto delete mode 100644 inc/metadef/inc/register/prototype_pass_registry.h delete mode 100644 inc/metadef/inc/register/register.h delete mode 100644 inc/metadef/inc/register/register_format_transfer.h delete mode 100644 inc/metadef/inc/register/scope/scope_graph_impl.h delete mode 100644 inc/metadef/inc/register/scope/scope_pass_impl.h delete mode 100644 inc/metadef/inc/register/scope/scope_pass_registry_impl.h delete mode 100644 inc/metadef/inc/register/scope/scope_pattern_impl.h delete mode 100644 inc/metadef/inc/register/tensor_assign.h delete mode 100644 inc/mmpa/mmpa_api.h delete mode 100644 inc/mmpa/sub_inc/mmpa_linux.h delete mode 100644 inc/mmpa/sub_inc/mmpa_typedef_linux.h delete mode 100644 inc/mmpa/sub_inc/mmpa_typedef_win.h delete mode 100644 inc/mmpa/sub_inc/mmpa_win.h delete mode 100644 inc/parser/inc/external/parser/onnx_parser.h delete mode 100644 inc/runtime/base.h delete mode 100644 inc/runtime/config.h delete mode 100644 inc/runtime/context.h delete mode 100644 inc/runtime/dev.h delete mode 100644 inc/runtime/dvfsprofile.h delete mode 100644 inc/runtime/event.h delete mode 100644 inc/runtime/kernel.h delete mode 100644 inc/runtime/mem.h delete mode 100644 inc/runtime/rt.h delete mode 100644 inc/runtime/rt_mem_queue.h delete mode 100644 inc/runtime/rt_model.h delete mode 100644 inc/runtime/stream.h delete mode 100644 inc/tdt/data_common.h delete mode 100644 inc/tdt/status.h delete mode 100644 inc/tdt/tdt_host_interface.h delete mode 100644 inc/tdt/tsd_client.h delete mode 100644 inc/toolchain/adx_datadump_server.h delete mode 100644 inc/toolchain/plog.h delete mode 100644 inc/toolchain/prof_callback.h delete mode 100644 inc/toolchain/prof_engine.h delete mode 100644 inc/toolchain/prof_mgr_core.h delete mode 100644 inc/toolchain/prof_reporter.h delete mode 100644 inc/toolchain/slog.h delete mode 100644 inc/toolchain/tuning_tool/aoe_tuning_api.h delete mode 100644 inc/toolchain/tuning_tool/aoe_types.h diff --git a/CMakeLists.txt b/CMakeLists.txt index a934716bf..43cb4071d 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,6 +32,7 @@ if (ENABLE_OPEN_SRC) include(${CMAKE_CURRENT_LIST_DIR}/cmake/tensorflow.cmake) include_directories(${CMAKE_CURRENT_LIST_DIR}) include_directories(${ASCEND_OPENSDK_DIR}/include) + include_directories(${ASCEND_OPENSDK_DIR}/include/aoe) include_directories(${ASCEND_OPENSDK_DIR}/include/slog) include_directories(${ASCEND_OPENSDK_DIR}/include/runtime) include_directories(${ASCEND_OPENSDK_DIR}/include/ascendcl/external) @@ -43,7 +44,6 @@ if (ENABLE_OPEN_SRC) include_directories(${ASCEND_OPENSDK_DIR}/include/metadef) include_directories(${ASCEND_OPENSDK_DIR}/include/metadef/external) include_directories(${CMAKE_CURRENT_LIST_DIR}/inc) - include_directories(${CMAKE_CURRENT_LIST_DIR}/inc/toolchain/tuning_tool) if (NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/COMPILE_FLAGS OR NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/LINK_FLAGS OR NOT EXISTS @@ -156,8 +156,6 @@ else() target_include_directories(tf_adapter PRIVATE ${BASE_DIR}/ - ${BASE_DIR}/inc/parser/inc/ - ${BASE_DIR}/inc/parser/inc/external/ ${TOP_DIR}/inc/ ${TOP_DIR}/inc/external/ ${TOP_DIR}/inc/common/ @@ -165,6 +163,8 @@ else() ${TOP_DIR}/graphengine/inc/external/ ${TOP_DIR}/metadef/inc/ ${TOP_DIR}/metadef/inc/external/ + ${TOP_DIR}/parser/inc/ + ${TOP_DIR}/parser/inc/external/ ${TOP_DIR}/libc_sec/include/ ${TOP_DIR}/abl/libc_sec/include/ ${CMAKE_CURRENT_LIST_DIR}/inc/third_party/tensorflow/include/tensorflow_core/include/ diff --git a/inc/external/acl/acl.h b/inc/external/acl/acl.h deleted file mode 100644 index cc32cc1f7..000000000 --- a/inc/external/acl/acl.h +++ /dev/null @@ -1,76 +0,0 @@ -/** -* @file acl.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef INC_EXTERNAL_ACL_ACL_H_ -#define INC_EXTERNAL_ACL_ACL_H_ - -#include "acl_rt.h" -#include "acl_op.h" -#include "acl_mdl.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// Current version is 1.1.0 -#define ACL_MAJOR_VERSION 1 -#define ACL_MINOR_VERSION 1 -#define ACL_PATCH_VERSION 0 - -/** - * @ingroup AscendCL - * @brief acl initialize - * - * @par Restriction - * The aclInit interface can be called only once in a process - * @param configPath [IN] the config path,it can be NULL - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclInit(const char *configPath); - -/** - * @ingroup AscendCL - * @brief acl finalize - * - * @par Restriction - * Need to call aclFinalize before the process exits. - * After calling aclFinalize,the services cannot continue to be used normally. - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclFinalize(); - -/** - * @ingroup AscendCL - * @brief query ACL interface version - * - * @param majorVersion[OUT] ACL interface major version - * @param minorVersion[OUT] ACL interface minor version - * @param patchVersion[OUT] ACL interface patch version - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetVersion(int32_t *majorVersion, int32_t *minorVersion, int32_t *patchVersion); - -/** - * @ingroup AscendCL - * @brief get recent error message - * - * @retval null for failed - * @retval OtherValues success -*/ -ACL_FUNC_VISIBILITY const char *aclGetRecentErrMsg(); - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_H_ diff --git a/inc/external/acl/acl_base.h b/inc/external/acl/acl_base.h deleted file mode 100644 index 7a5be38c5..000000000 --- a/inc/external/acl/acl_base.h +++ /dev/null @@ -1,642 +0,0 @@ -/** -* @file acl_base.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef INC_EXTERNAL_ACL_ACL_BASE_H_ -#define INC_EXTERNAL_ACL_ACL_BASE_H_ - -#include -#include -#include "error_codes/rt_error_codes.h" -#include "error_codes/ge_error_codes.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define ACL_FUNC_VISIBILITY _declspec(dllexport) -#else -#define ACL_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define ACL_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define ACL_FUNC_VISIBILITY -#endif -#endif - -#ifdef __GNUC__ -#define ACL_DEPRECATED __attribute__((deprecated)) -#define ACL_DEPRECATED_MESSAGE(message) __attribute__((deprecated(message))) -#elif defined(_MSC_VER) -#define ACL_DEPRECATED __declspec(deprecated) -#define ACL_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) -#else -#define ACL_DEPRECATED -#define ACL_DEPRECATED_MESSAGE(message) -#endif - -typedef void *aclrtStream; -typedef void *aclrtEvent; -typedef void *aclrtContext; -typedef int aclError; -typedef uint16_t aclFloat16; -typedef struct aclDataBuffer aclDataBuffer; -typedef struct aclTensorDesc aclTensorDesc; - -static const int ACL_ERROR_NONE = 0; -static const int ACL_SUCCESS = 0; - -static const int ACL_ERROR_INVALID_PARAM = 100000; -static const int ACL_ERROR_UNINITIALIZE = 100001; -static const int ACL_ERROR_REPEAT_INITIALIZE = 100002; -static const int ACL_ERROR_INVALID_FILE = 100003; -static const int ACL_ERROR_WRITE_FILE = 100004; -static const int ACL_ERROR_INVALID_FILE_SIZE = 100005; -static const int ACL_ERROR_PARSE_FILE = 100006; -static const int ACL_ERROR_FILE_MISSING_ATTR = 100007; -static const int ACL_ERROR_FILE_ATTR_INVALID = 100008; -static const int ACL_ERROR_INVALID_DUMP_CONFIG = 100009; -static const int ACL_ERROR_INVALID_PROFILING_CONFIG = 100010; -static const int ACL_ERROR_INVALID_MODEL_ID = 100011; -static const int ACL_ERROR_DESERIALIZE_MODEL = 100012; -static const int ACL_ERROR_PARSE_MODEL = 100013; -static const int ACL_ERROR_READ_MODEL_FAILURE = 100014; -static const int ACL_ERROR_MODEL_SIZE_INVALID = 100015; -static const int ACL_ERROR_MODEL_MISSING_ATTR = 100016; -static const int ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017; -static const int ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018; -static const int ACL_ERROR_MODEL_NOT_DYNAMIC = 100019; -static const int ACL_ERROR_OP_TYPE_NOT_MATCH = 100020; -static const int ACL_ERROR_OP_INPUT_NOT_MATCH = 100021; -static const int ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022; -static const int ACL_ERROR_OP_ATTR_NOT_MATCH = 100023; -static const int ACL_ERROR_OP_NOT_FOUND = 100024; -static const int ACL_ERROR_OP_LOAD_FAILED = 100025; -static const int ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026; -static const int ACL_ERROR_FORMAT_NOT_MATCH = 100027; -static const int ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028; -static const int ACL_ERROR_KERNEL_NOT_FOUND = 100029; -static const int ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030; -static const int ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031; -static const int ACL_ERROR_INVALID_QUEUE_ID = 100032; -static const int ACL_ERROR_REPEAT_SUBSCRIBE = 100033; -static const int ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034; -static const int ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035; -static const int ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036; -static const int ACL_ERROR_REPEAT_FINALIZE = 100037; -static const int ACL_ERROR_NOT_STATIC_AIPP = 100038; -static const int ACL_ERROR_COMPILING_STUB_MODE = 100039; -static const int ACL_ERROR_GROUP_NOT_SET = 100040; -static const int ACL_ERROR_GROUP_NOT_CREATE = 100041; -static const int ACL_ERROR_PROF_ALREADY_RUN = 100042; -static const int ACL_ERROR_PROF_NOT_RUN = 100043; -static const int ACL_ERROR_DUMP_ALREADY_RUN = 100044; -static const int ACL_ERROR_DUMP_NOT_RUN = 100045; -static const int ACL_ERROR_PROF_REPEAT_SUBSCRIBE = 148046; -static const int ACL_ERROR_PROF_API_CONFLICT = 148047; -static const int ACL_ERROR_INVALID_MAX_OPQUEUE_NUM_CONFIG = 148048; -static const int ACL_ERROR_INVALID_OPP_PATH = 148049; -static const int ACL_ERROR_OP_UNSUPPORTED_DYNAMIC = 148050; - -static const int ACL_ERROR_BAD_ALLOC = 200000; -static const int ACL_ERROR_API_NOT_SUPPORT = 200001; -static const int ACL_ERROR_INVALID_DEVICE = 200002; -static const int ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003; -static const int ACL_ERROR_RESOURCE_NOT_MATCH = 200004; -static const int ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005; -static const int ACL_ERROR_FEATURE_UNSUPPORTED = 200006; -static const int ACL_ERROR_PROF_MODULES_UNSUPPORTED = 200007; - -static const int ACL_ERROR_STORAGE_OVER_LIMIT = 300000; - -static const int ACL_ERROR_INTERNAL_ERROR = 500000; -static const int ACL_ERROR_FAILURE = 500001; -static const int ACL_ERROR_GE_FAILURE = 500002; -static const int ACL_ERROR_RT_FAILURE = 500003; -static const int ACL_ERROR_DRV_FAILURE = 500004; -static const int ACL_ERROR_PROFILING_FAILURE = 500005; - - -#define ACL_TENSOR_SHAPE_RANGE_NUM 2 -#define ACL_UNKNOWN_RANK 0xFFFFFFFFFFFFFFFE - -typedef enum { - ACL_DT_UNDEFINED = -1, - ACL_FLOAT = 0, - ACL_FLOAT16 = 1, - ACL_INT8 = 2, - ACL_INT32 = 3, - ACL_UINT8 = 4, - ACL_INT16 = 6, - ACL_UINT16 = 7, - ACL_UINT32 = 8, - ACL_INT64 = 9, - ACL_UINT64 = 10, - ACL_DOUBLE = 11, - ACL_BOOL = 12, - ACL_STRING = 13, - ACL_COMPLEX64 = 16, - ACL_COMPLEX128 = 17 -} aclDataType; - -typedef enum { - ACL_FORMAT_UNDEFINED = -1, - ACL_FORMAT_NCHW = 0, - ACL_FORMAT_NHWC = 1, - ACL_FORMAT_ND = 2, - ACL_FORMAT_NC1HWC0 = 3, - ACL_FORMAT_FRACTAL_Z = 4, - ACL_FORMAT_NC1HWC0_C04 = 12, - ACL_FORMAT_NDHWC = 27, - ACL_FORMAT_FRACTAL_NZ = 29, - ACL_FORMAT_NCDHW = 30, - ACL_FORMAT_NDC1HWC0 = 32, - ACL_FRACTAL_Z_3D = 33 -} aclFormat; - -typedef enum { - ACL_DEBUG = 0, - ACL_INFO = 1, - ACL_WARNING = 2, - ACL_ERROR = 3, -} aclLogLevel; - -typedef enum { - ACL_MEMTYPE_DEVICE = 0, - ACL_MEMTYPE_HOST = 1, -} aclMemType; - - -/** - * @ingroup AscendCL - * @brief Converts data of type aclFloat16 to data of type float - * - * @param value [IN] Data to be converted - * - * @retval Transformed data - */ -ACL_FUNC_VISIBILITY float aclFloat16ToFloat(aclFloat16 value); - -/** - * @ingroup AscendCL - * @brief Converts data of type float to data of type aclFloat16 - * - * @param value [IN] Data to be converted - * - * @retval Transformed data - */ -ACL_FUNC_VISIBILITY aclFloat16 aclFloatToFloat16(float value); - -/** - * @ingroup AscendCL - * @brief create data of aclDataBuffer - * - * @param data [IN] pointer to data - * @li Need to be managed by the user, - * call aclrtMalloc interface to apply for memory, - * call aclrtFree interface to release memory - * - * @param size [IN] size of data in bytes - * - * @retval pointer to created instance. nullptr if run out of memory - * - * @see aclrtMalloc | aclrtFree - */ -ACL_FUNC_VISIBILITY aclDataBuffer *aclCreateDataBuffer(void *data, size_t size); - -/** - * @ingroup AscendCL - * @brief destroy data of aclDataBuffer - * - * @par Function - * Only the aclDataBuffer type data is destroyed here. - * The memory of the data passed in when the aclDataDataBuffer interface - * is called to create aclDataBuffer type data must be released by the user - * - * @param dataBuffer [IN] pointer to the aclDataBuffer - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclCreateDataBuffer - */ -ACL_FUNC_VISIBILITY aclError aclDestroyDataBuffer(const aclDataBuffer *dataBuffer); - -/** - * @ingroup AscendCL - * @brief update new data of aclDataBuffer - * - * @param dataBuffer [OUT] pointer to aclDataBuffer - * @li The old data need to be released by the user, otherwise it may occur memory leak leakage - * call aclGetDataBufferAddr interface to get old data address - * call aclrtFree interface to release memory - * - * @param data [IN] pointer to new data - * @li Need to be managed by the user, - * call aclrtMalloc interface to apply for memory, - * call aclrtFree interface to release memory - * - * @param size [IN] size of data in bytes - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtMalloc | aclrtFree | aclGetDataBufferAddr - */ -ACL_FUNC_VISIBILITY aclError aclUpdateDataBuffer(aclDataBuffer *dataBuffer, void *data, size_t size); - -/** - * @ingroup AscendCL - * @brief get data address from aclDataBuffer - * - * @param dataBuffer [IN] pointer to the data of aclDataBuffer - * - * @retval data address - */ -ACL_FUNC_VISIBILITY void *aclGetDataBufferAddr(const aclDataBuffer *dataBuffer); - -/** - * @ingroup AscendCL - * @brief get data size of aclDataBuffer - * - * @param dataBuffer [IN] pointer to the data of aclDataBuffer - * - * @retval data size - */ -ACL_DEPRECATED_MESSAGE("aclGetDataBufferSize is deprecated, use aclGetDataBufferSizeV2 instead") -ACL_FUNC_VISIBILITY uint32_t aclGetDataBufferSize(const aclDataBuffer *dataBuffer); - -/** - * @ingroup AscendCL - * @brief get data size of aclDataBuffer to replace aclGetDataBufferSize - * - * @param dataBuffer [IN] pointer to the data of aclDataBuffer - * - * @retval data size - */ -ACL_FUNC_VISIBILITY size_t aclGetDataBufferSizeV2(const aclDataBuffer *dataBuffer); - -/** - * @ingroup AscendCL - * @brief get size of aclDataType - * - * @param dataType [IN] aclDataType data the size to get - * - * @retval size of the aclDataType - */ -ACL_FUNC_VISIBILITY size_t aclDataTypeSize(aclDataType dataType); - -// interfaces of tensor desc -/** - * @ingroup AscendCL - * @brief create data aclTensorDesc - * - * @param dataType [IN] Data types described by tensor - * @param numDims [IN] the number of dimensions of the shape - * @param dims [IN] the size of the specified dimension - * @param format [IN] tensor format - * - * @retval aclTensorDesc pointer. - * @retval nullptr if param is invalid or run out of memory - */ -ACL_FUNC_VISIBILITY aclTensorDesc *aclCreateTensorDesc(aclDataType dataType, - int numDims, - const int64_t *dims, - aclFormat format); - -/** - * @ingroup AscendCL - * @brief destroy data aclTensorDesc - * - * @param desc [IN] pointer to the data of aclTensorDesc to destroy - */ -ACL_FUNC_VISIBILITY void aclDestroyTensorDesc(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief set tensor shape range for aclTensorDesc - * - * @param desc [OUT] pointer to the data of aclTensorDesc - * @param dimsCount [IN] the number of dimensions of the shape - * @param dimsRange [IN] the range of dimensions of the shape - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorShapeRange(aclTensorDesc* desc, - size_t dimsCount, - int64_t dimsRange[][ACL_TENSOR_SHAPE_RANGE_NUM]); - -/** - * @ingroup AscendCL - * @brief get data type specified by the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * - * @retval data type specified by the tensor description. - * @retval ACL_DT_UNDEFINED if description is null - */ -ACL_FUNC_VISIBILITY aclDataType aclGetTensorDescType(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief get data format specified by the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * - * @retval data format specified by the tensor description. - * @retval ACL_FORMAT_UNDEFINED if description is null - */ -ACL_FUNC_VISIBILITY aclFormat aclGetTensorDescFormat(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief get tensor size specified by the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * - * @retval data size specified by the tensor description. - * @retval 0 if description is null - */ -ACL_FUNC_VISIBILITY size_t aclGetTensorDescSize(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief get element count specified by the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * - * @retval element count specified by the tensor description. - * @retval 0 if description is null - */ -ACL_FUNC_VISIBILITY size_t aclGetTensorDescElementCount(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief get number of dims specified by the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * - * @retval number of dims specified by the tensor description. - * @retval 0 if description is null - * @retval ACL_UNKNOWN_RANK if the tensor dim is -2 - */ -ACL_FUNC_VISIBILITY size_t aclGetTensorDescNumDims(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief Get the size of the specified dim in the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * @param index [IN] index of dims, start from 0. - * - * @retval dim specified by the tensor description and index. - * @retval -1 if description or index is invalid - */ -ACL_DEPRECATED_MESSAGE("aclGetTensorDescDim is deprecated, use aclGetTensorDescDimV2 instead") -ACL_FUNC_VISIBILITY int64_t aclGetTensorDescDim(const aclTensorDesc *desc, size_t index); - -/** - * @ingroup AscendCL - * @brief Get the size of the specified dim in the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * @param index [IN] index of dims, start from 0. - * @param dimSize [OUT] size of the specified dim. - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclGetTensorDescDimV2(const aclTensorDesc *desc, size_t index, int64_t *dimSize); - -/** - * @ingroup AscendCL - * @brief Get the range of the specified dim in the tensor description - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * @param index [IN] index of dims, start from 0. - * @param dimRangeNum [IN] number of dimRange. - * @param dimRange [OUT] range of the specified dim. - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclGetTensorDescDimRange(const aclTensorDesc *desc, - size_t index, - size_t dimRangeNum, - int64_t *dimRange); - -/** - * @ingroup AscendCL - * @brief set tensor description name - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param name [IN] tensor description name - */ -ACL_FUNC_VISIBILITY void aclSetTensorDescName(aclTensorDesc *desc, const char *name); - -/** - * @ingroup AscendCL - * @brief get tensor description name - * - * @param desc [IN] pointer to the instance of aclTensorDesc - * - * @retval tensor description name. - * @retval empty string if description is null - */ -ACL_FUNC_VISIBILITY const char *aclGetTensorDescName(aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief Convert the format in the source aclTensorDesc according to - * the specified dstFormat to generate a new target aclTensorDesc. - * The format in the source aclTensorDesc remains unchanged. - * - * @param srcDesc [IN] pointer to the source tensor desc - * @param dstFormat [IN] destination format - * @param dstDesc [OUT] pointer to the pointer to the destination tensor desc - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclTransTensorDescFormat(const aclTensorDesc *srcDesc, aclFormat dstFormat, - aclTensorDesc **dstDesc); - -/** - * @ingroup AscendCL - * @brief Set the storage format specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param format [IN] the storage format - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_DEPRECATED_MESSAGE("aclSetTensorStorageFormat is deprecated, use aclSetTensorFormat instead") -ACL_FUNC_VISIBILITY aclError aclSetTensorStorageFormat(aclTensorDesc *desc, aclFormat format); - -/** - * @ingroup AscendCL - * @brief Set the storage shape specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param numDims [IN] the number of dimensions of the shape - * @param dims [IN] the size of the specified dimension - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_DEPRECATED_MESSAGE("aclSetTensorStorageShape is deprecated, use aclSetTensorShape instead") -ACL_FUNC_VISIBILITY aclError aclSetTensorStorageShape(aclTensorDesc *desc, int numDims, const int64_t *dims); - -/** - * @ingroup AscendCL - * @brief Set the format specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param format [IN] the storage format - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorFormat(aclTensorDesc *desc, aclFormat format); - -/** - * @ingroup AscendCL - * @brief Set the shape specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param numDims [IN] the number of dimensions of the shape - * @param dims [IN] the size of the specified dimension - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorShape(aclTensorDesc *desc, int numDims, const int64_t *dims); - -/** - * @ingroup AscendCL - * @brief Set the original format specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param format [IN] the storage format - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorOriginFormat(aclTensorDesc *desc, aclFormat format); - -/** - * @ingroup AscendCL - * @brief Set the original shape specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param numDims [IN] the number of dimensions of the shape - * @param dims [IN] the size of the specified dimension - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorOriginShape(aclTensorDesc *desc, int numDims, const int64_t *dims); - -/** - * @ingroup AscendCL - * @brief get op description info - * - * @param desc [IN] pointer to tensor description - * @param index [IN] index of tensor - * - * @retval null for failed. - * @retval OtherValues success. -*/ -ACL_FUNC_VISIBILITY aclTensorDesc *aclGetTensorDescByIndex(aclTensorDesc *desc, size_t index); - -/** - * @ingroup AscendCL - * @brief get address of tensor - * - * @param desc [IN] pointer to tensor description - * - * @retval null for failed - * @retval OtherValues success -*/ -ACL_FUNC_VISIBILITY void *aclGetTensorDescAddress(const aclTensorDesc *desc); - -/** - * @ingroup AscendCL - * @brief Set the dynamic input name specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param dynamicInputName [IN] pointer to the dynamic input name - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorDynamicInput(aclTensorDesc *desc, const char *dynamicInputName); - -/** - * @ingroup AscendCL - * @brief Set const data specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param dataBuffer [IN] pointer to the const databuffer - * @param length [IN] the length of const databuffer - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorConst(aclTensorDesc *desc, void *dataBuffer, size_t length); - -/** - * @ingroup AscendCL - * @brief Set tensor memory type specified by the tensor description - * - * @param desc [OUT] pointer to the instance of aclTensorDesc - * @param memType [IN] ACL_MEMTYPE_DEVICE means device, ACL_MEMTYPE_HOST means host - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetTensorPlaceMent(aclTensorDesc *desc, aclMemType memType); - -/** - * @ingroup AscendCL - * @brief an interface for users to output APP logs - * - * @param logLevel [IN] the level of current log - * @param func [IN] the function where the log is located - * @param file [IN] the file where the log is located - * @param line [IN] Number of source lines where the log is located - * @param fmt [IN] the format of current log - * @param ... [IN] the value of current log - */ -ACL_FUNC_VISIBILITY void aclAppLog(aclLogLevel logLevel, const char *func, const char *file, uint32_t line, - const char *fmt, ...); - -/** - * @ingroup AscendCL - * @brief get soc name - * - * @retval null for failed - * @retval OtherValues success -*/ -ACL_FUNC_VISIBILITY const char *aclrtGetSocName(); - -#define ACL_APP_LOG(level, fmt, ...) \ - aclAppLog(level, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__) - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_BASE_H_ diff --git a/inc/external/acl/acl_mdl.h b/inc/external/acl/acl_mdl.h deleted file mode 100644 index fb1112d5f..000000000 --- a/inc/external/acl/acl_mdl.h +++ /dev/null @@ -1,1256 +0,0 @@ -/** -* @file acl_mdl.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef INC_EXTERNAL_ACL_ACL_MODEL_H_ -#define INC_EXTERNAL_ACL_ACL_MODEL_H_ - -#include -#include - -#include "acl_base.h" -#include "acl_rt.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define ACL_MAX_DIM_CNT 128 -#define ACL_MAX_TENSOR_NAME_LEN 128 -#define ACL_MAX_BATCH_NUM 128 -#define ACL_MAX_HW_NUM 128 -#define ACL_MAX_SHAPE_COUNT 128 -#define ACL_INVALID_NODE_INDEX 0xFFFFFFFF - -#define ACL_MDL_LOAD_FROM_FILE 1 -#define ACL_MDL_LOAD_FROM_FILE_WITH_MEM 2 -#define ACL_MDL_LOAD_FROM_MEM 3 -#define ACL_MDL_LOAD_FROM_MEM_WITH_MEM 4 -#define ACL_MDL_LOAD_FROM_FILE_WITH_Q 5 -#define ACL_MDL_LOAD_FROM_MEM_WITH_Q 6 - -#define ACL_DYNAMIC_TENSOR_NAME "ascend_mbatch_shape_data" -#define ACL_DYNAMIC_AIPP_NAME "ascend_dynamic_aipp_data" -#define ACL_ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES "_datadump_original_op_names" - -typedef struct aclmdlDataset aclmdlDataset; -typedef struct aclmdlDesc aclmdlDesc; -typedef struct aclmdlAIPP aclmdlAIPP; -typedef struct aclAippExtendInfo aclAippExtendInfo; -typedef struct aclmdlConfigHandle aclmdlConfigHandle; - -typedef enum { - ACL_YUV420SP_U8 = 1, - ACL_XRGB8888_U8, - ACL_RGB888_U8, - ACL_YUV400_U8, - ACL_NC1HWC0DI_FP16, - ACL_NC1HWC0DI_S8, - ACL_ARGB8888_U8, - ACL_YUYV_U8, - ACL_YUV422SP_U8, - ACL_AYUV444_U8, - ACL_RAW10, - ACL_RAW12, - ACL_RAW16, - ACL_RAW24, - ACL_AIPP_RESERVED = 0xffff, -} aclAippInputFormat; - -typedef enum { - ACL_MDL_PRIORITY_INT32 = 0, - ACL_MDL_LOAD_TYPE_SIZET, - ACL_MDL_PATH_PTR, /**< pointer to model load path with deep copy */ - ACL_MDL_MEM_ADDR_PTR, /**< pointer to model memory with shallow copy */ - ACL_MDL_MEM_SIZET, - ACL_MDL_WEIGHT_ADDR_PTR, /**< pointer to weight memory of model with shallow copy */ - ACL_MDL_WEIGHT_SIZET, - ACL_MDL_WORKSPACE_ADDR_PTR, /**< pointer to worksapce memory of model with shallow copy */ - ACL_MDL_WORKSPACE_SIZET, - ACL_MDL_INPUTQ_NUM_SIZET, - ACL_MDL_INPUTQ_ADDR_PTR, /**< pointer to inputQ with shallow copy */ - ACL_MDL_OUTPUTQ_NUM_SIZET, - ACL_MDL_OUTPUTQ_ADDR_PTR /**< pointer to outputQ with shallow copy */ -} aclmdlConfigAttr; - -typedef enum { - ACL_DATA_WITHOUT_AIPP = 0, - ACL_DATA_WITH_STATIC_AIPP, - ACL_DATA_WITH_DYNAMIC_AIPP, - ACL_DYNAMIC_AIPP_NODE -} aclmdlInputAippType; - -typedef struct aclmdlIODims { - char name[ACL_MAX_TENSOR_NAME_LEN]; /**< tensor name */ - size_t dimCount; /**< dim array count */ - int64_t dims[ACL_MAX_DIM_CNT]; /**< dim data array */ -} aclmdlIODims; - -typedef struct aclAippDims { - aclmdlIODims srcDims; /**< input dims before model transform */ - size_t srcSize; /**< input size before model transform */ - aclmdlIODims aippOutdims; /**< aipp output dims */ - size_t aippOutSize; /**< aipp output size */ -} aclAippDims; - -typedef struct aclmdlBatch { - size_t batchCount; /**< batch array count */ - uint64_t batch[ACL_MAX_BATCH_NUM]; /**< batch data array */ -} aclmdlBatch; - -typedef struct aclmdlHW { - size_t hwCount; /**< height&width array count */ - uint64_t hw[ACL_MAX_HW_NUM][2]; /**< height&width data array */ -} aclmdlHW; - -typedef struct aclAippInfo { - aclAippInputFormat inputFormat; - int32_t srcImageSizeW; - int32_t srcImageSizeH; - int8_t cropSwitch; - int32_t loadStartPosW; - int32_t loadStartPosH; - int32_t cropSizeW; - int32_t cropSizeH; - int8_t resizeSwitch; - int32_t resizeOutputW; - int32_t resizeOutputH; - int8_t paddingSwitch; - int32_t leftPaddingSize; - int32_t rightPaddingSize; - int32_t topPaddingSize; - int32_t bottomPaddingSize; - int8_t cscSwitch; - int8_t rbuvSwapSwitch; - int8_t axSwapSwitch; - int8_t singleLineMode; - int32_t matrixR0C0; - int32_t matrixR0C1; - int32_t matrixR0C2; - int32_t matrixR1C0; - int32_t matrixR1C1; - int32_t matrixR1C2; - int32_t matrixR2C0; - int32_t matrixR2C1; - int32_t matrixR2C2; - int32_t outputBias0; - int32_t outputBias1; - int32_t outputBias2; - int32_t inputBias0; - int32_t inputBias1; - int32_t inputBias2; - int32_t meanChn0; - int32_t meanChn1; - int32_t meanChn2; - int32_t meanChn3; - float minChn0; - float minChn1; - float minChn2; - float minChn3; - float varReciChn0; - float varReciChn1; - float varReciChn2; - float varReciChn3; - aclFormat srcFormat; - aclDataType srcDatatype; - size_t srcDimNum; - size_t shapeCount; - aclAippDims outDims[ACL_MAX_SHAPE_COUNT]; - aclAippExtendInfo *aippExtend; /**< reserved parameters, current version needs to be null */ -} aclAippInfo; - -/** - * @ingroup AscendCL - * @brief Create data of type aclmdlDesc - * - * @retval the aclmdlDesc pointer - */ -ACL_FUNC_VISIBILITY aclmdlDesc *aclmdlCreateDesc(); - -/** - * @ingroup AscendCL - * @brief destroy data of type aclmdlDesc - * - * @param modelDesc [IN] Pointer to almdldlDesc to be destroyed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlDestroyDesc(aclmdlDesc *modelDesc); - -/** - * @ingroup AscendCL - * @brief Get aclmdlDesc data of the model according to the model ID - * - * @param modelDesc [OUT] aclmdlDesc pointer - * @param modelId [IN] model id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetDesc(aclmdlDesc *modelDesc, uint32_t modelId); - -/** - * @ingroup AscendCL - * @brief Get the number of the inputs of - * the model according to data of aclmdlDesc - * - * @param modelDesc [IN] aclmdlDesc pointer - * - * @retval input size with aclmdlDesc - */ -ACL_FUNC_VISIBILITY size_t aclmdlGetNumInputs(aclmdlDesc *modelDesc); - -/** - * @ingroup AscendCL - * @brief Get the number of the output of - * the model according to data of aclmdlDesc - * - * @param modelDesc [IN] aclmdlDesc pointer - * - * @retval output size with aclmdlDesc - */ -ACL_FUNC_VISIBILITY size_t aclmdlGetNumOutputs(aclmdlDesc *modelDesc); - -/** - * @ingroup AscendCL - * @brief Get the size of the specified input according to - * the data of type aclmdlDesc - * - * @param modelDesc [IN] aclmdlDesc pointer - * @param index [IN] the size of the number of inputs to be obtained, - * the index value starts from 0 - * - * @retval Specify the size of the input - */ -ACL_FUNC_VISIBILITY size_t aclmdlGetInputSizeByIndex(aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief Get the size of the specified output according to - * the data of type aclmdlDesc - * - * @param modelDesc [IN] aclmdlDesc pointer - * @param index [IN] the size of the number of outputs to be obtained, - * the index value starts from 0 - * - * @retval Specify the size of the output - */ -ACL_FUNC_VISIBILITY size_t aclmdlGetOutputSizeByIndex(aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief Create data of type aclmdlDataset - * - * @retval the aclmdlDataset pointer - */ -ACL_FUNC_VISIBILITY aclmdlDataset *aclmdlCreateDataset(); - -/** - * @ingroup AscendCL - * @brief destroy data of type aclmdlDataset - * - * @param dataset [IN] Pointer to aclmdlDataset to be destroyed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlDestroyDataset(const aclmdlDataset *dataset); - -/** - * @ingroup AscendCL - * @brief Add aclDataBuffer to aclmdlDataset - * - * @param dataset [OUT] aclmdlDataset address of aclDataBuffer to be added - * @param dataBuffer [IN] aclDataBuffer address to be added - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlAddDatasetBuffer(aclmdlDataset *dataset, aclDataBuffer *dataBuffer); - -/** - * @ingroup AscendCL - * @brief Set aclTensorDesc to aclmdlDataset - * - * @param dataset [OUT] aclmdlDataset address of aclDataBuffer to be added - * @param tensorDesc [IN] aclTensorDesc address to be added - * @param index [IN] index of tensorDesc which to be added - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlSetDatasetTensorDesc(aclmdlDataset *dataset, - aclTensorDesc *tensorDesc, - size_t index); - -/** - * @ingroup AscendCL - * @brief Get aclTensorDesc from aclmdlDataset - * - * @param dataset [IN] aclmdlDataset pointer; - * @param index [IN] index of tensorDesc - * - * @retval Get address of aclTensorDesc when executed successfully. - * @retval Failure return NULL - */ -ACL_FUNC_VISIBILITY aclTensorDesc *aclmdlGetDatasetTensorDesc(const aclmdlDataset *dataset, size_t index); - -/** - * @ingroup AscendCL - * @brief Get the number of aclDataBuffer in aclmdlDataset - * - * @param dataset [IN] aclmdlDataset pointer - * - * @retval the number of aclDataBuffer - */ -ACL_FUNC_VISIBILITY size_t aclmdlGetDatasetNumBuffers(const aclmdlDataset *dataset); - -/** - * @ingroup AscendCL - * @brief Get the aclDataBuffer in aclmdlDataset by index - * - * @param dataset [IN] aclmdlDataset pointer - * @param index [IN] the index of aclDataBuffer - * - * @retval Get successfully, return the address of aclDataBuffer - * @retval Failure return NULL - */ -ACL_FUNC_VISIBILITY aclDataBuffer *aclmdlGetDatasetBuffer(const aclmdlDataset *dataset, size_t index); - -/** - * @ingroup AscendCL - * @brief Load offline model data from files - * and manage memory internally by the system - * - * @par Function - * After the system finishes loading the model, - * the model ID returned is used as a mark to identify the model - * during subsequent operations - * - * @param modelPath [IN] Storage path for offline model files - * @param modelId [OUT] Model ID generated after - * the system finishes loading the model - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFile(const char *modelPath, uint32_t *modelId); - -/** - * @ingroup AscendCL - * @brief Load offline model data from memory and manage the memory of - * model running internally by the system - * - * @par Function - * After the system finishes loading the model, - * the model ID returned is used as a mark to identify the model - * during subsequent operations - * - * @param model [IN] Model data stored in memory - * @param modelSize [IN] model data size - * @param modelId [OUT] Model ID generated after - * the system finishes loading the model - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMem(const void *model, size_t modelSize, - uint32_t *modelId); - -/** - * @ingroup AscendCL - * @brief Load offline model data from a file, - * and the user manages the memory of the model run by itself - * - * @par Function - * After the system finishes loading the model, - * the model ID returned is used as a mark to identify the model - * during subsequent operations. - * @param modelPath [IN] Storage path for offline model files - * @param modelId [OUT] Model ID generated after finishes loading the model - * @param workPtr [IN] A pointer to the working memory - * required by the model on the Device,can be null - * @param workSize [IN] The amount of working memory required by the model - * @param weightPtr [IN] Pointer to model weight memory on Device - * @param weightSize [IN] The amount of weight memory required by the model - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFileWithMem(const char *modelPath, - uint32_t *modelId, void *workPtr, size_t workSize, - void *weightPtr, size_t weightSize); - -/** - * @ingroup AscendCL - * @brief Load offline model data from memory, - * and the user can manage the memory of model running - * - * @par Function - * After the system finishes loading the model, - * the model ID returned is used as a mark to identify the model - * during subsequent operations - * @param model [IN] Model data stored in memory - * @param modelSize [IN] model data size - * @param modelId [OUT] Model ID generated after finishes loading the model - * @param workPtr [IN] A pointer to the working memory - * required by the model on the Device,can be null - * @param workSize [IN] work memory size - * @param weightPtr [IN] Pointer to model weight memory on Device,can be null - * @param weightSize [IN] The amount of weight memory required by the model - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMemWithMem(const void *model, size_t modelSize, - uint32_t *modelId, void *workPtr, size_t workSize, - void *weightPtr, size_t weightSize); - -/** - * @ingroup AscendCL - * @brief load model from file with async queue - * - * @param modelPath [IN] model path - * @param modelId [OUT] return model id if load success - * @param inputQ [IN] input queue pointer - * @param inputQNum [IN] input queue num - * @param outputQ [IN] output queue pointer - * @param outputQNum [IN] output queue num - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFileWithQ(const char *modelPath, uint32_t *modelId, const uint32_t *inputQ, - size_t inputQNum, const uint32_t *outputQ, size_t outputQNum); - -/** - * @ingroup AscendCL - * @brief load model from memory with async queue - * - * @param model [IN] model memory which user manages - * @param modelSize [IN] model size - * @param modelId [OUT] return model id if load success - * @param inputQ [IN] input queue pointer - * @param inputQNum [IN] input queue num - * @param outputQ [IN] output queue pointer - * @param outputQNum [IN] output queue num - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMemWithQ(const void *model, size_t modelSize, uint32_t *modelId, - const uint32_t *inputQ, size_t inputQNum, - const uint32_t *outputQ, size_t outputQNum); - -/** - * @ingroup AscendCL - * @brief Execute model synchronous inference until the inference result is returned - * - * @param modelId [IN] ID of the model to perform inference - * @param input [IN] Input data for model inference - * @param output [OUT] Output data for model inference - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlExecute(uint32_t modelId, const aclmdlDataset *input, aclmdlDataset *output); - -/** - * @ingroup AscendCL - * @brief Execute model asynchronous inference until the inference result is returned - * - * @param modelId [IN] ID of the model to perform inference - * @param input [IN] Input data for model inference - * @param output [OUT] Output data for model inference - * @param stream [IN] stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem - */ -ACL_FUNC_VISIBILITY aclError aclmdlExecuteAsync(uint32_t modelId, const aclmdlDataset *input, - aclmdlDataset *output, aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief unload model with model id - * - * @param modelId [IN] model id to be unloaded - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlUnload(uint32_t modelId); - -/** - * @ingroup AscendCL - * @brief Get the weight memory size and working memory size - * required for model execution according to the model file - * - * @param fileName [IN] Model path to get memory information - * @param workSize [OUT] The amount of working memory for model executed - * @param weightSize [OUT] The amount of weight memory for model executed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlQuerySize(const char *fileName, size_t *workSize, size_t *weightSize); - -/** - * @ingroup AscendCL - * @brief Obtain the weights required for - * model execution according to the model data in memory - * - * @par Restriction - * The execution and weight memory is Device memory, - * and requires user application and release. - * @param model [IN] model memory which user manages - * @param modelSize [IN] model data size - * @param workSize [OUT] The amount of working memory for model executed - * @param weightSize [OUT] The amount of weight memory for model executed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlQuerySizeFromMem(const void *model, size_t modelSize, size_t *workSize, - size_t *weightSize); - -/** - * @ingroup AscendCL - * @brief In dynamic batch scenarios, - * it is used to set the number of images processed - * at one time during model inference - * - * @param modelId [IN] model id - * @param dataset [IN|OUT] data for model inference - * @param index [IN] index of dynamic tensor - * @param batchSize [IN] Number of images processed at a time during model - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName - */ -ACL_FUNC_VISIBILITY aclError aclmdlSetDynamicBatchSize(uint32_t modelId, aclmdlDataset *dataset, size_t index, - uint64_t batchSize); - -/** - * @ingroup AscendCL - * @brief Sets the H and W of the specified input of the model - * - * @param modelId [IN] model id - * @param dataset [IN|OUT] data for model inference - * @param index [IN] index of dynamic tensor - * @param height [IN] model height - * @param width [IN] model width - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName - */ -ACL_FUNC_VISIBILITY aclError aclmdlSetDynamicHWSize(uint32_t modelId, aclmdlDataset *dataset, size_t index, - uint64_t height, uint64_t width); - -/** - * @ingroup AscendCL - * @brief Sets the dynamic dims of the specified input of the model - * - * @param modelId [IN] model id - * @param dataset [IN|OUT] data for model inference - * @param index [IN] index of dynamic dims - * @param dims [IN] value of dynamic dims - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName - */ -ACL_FUNC_VISIBILITY aclError aclmdlSetInputDynamicDims(uint32_t modelId, aclmdlDataset *dataset, size_t index, - const aclmdlIODims *dims); - -/** - * @ingroup AscendCL - * @brief get input dims info - * - * @param modelDesc [IN] model description - * @param index [IN] input tensor index - * @param dims [OUT] dims info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlGetInputDimsV2 - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetInputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); - -/** - * @ingroup AscendCL - * @brief get input dims info(version 2), especially for static aipp - * it is the same with aclmdlGetInputDims while model without static aipp - * - * @param modelDesc [IN] model description - * @param index [IN] input tensor index - * @param dims [OUT] dims info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlGetInputDims - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetInputDimsV2(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); - -/** - * @ingroup AscendCL - * @brief get output dims info - * - * @param modelDesc [IN] model description - * @param index [IN] output tensor index - * @param dims [OUT] dims info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetOutputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); - -/** - * @ingroup AscendCL - * @brief get current output dims info - * - * @par Function - * The following use cases are supported: - * @li Get current output shape when model is dynamic and - * dynamic shape info is set - * @li Get max output shape when model is dynamic and - * dynamic shape info is not set - * @li Get actual output shape when model is static - * - * @param modelDesc [IN] model description - * @param index [IN] output tensor index - * @param dims [OUT] dims info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetCurOutputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); - -/** - * @ingroup AscendCL - * @brief get attr value by op name - * - * @param modelDesc [IN] model description - * @param opName [IN] op name - * @param attr [IN] attr name - * - * @retval the attr value - */ -ACL_FUNC_VISIBILITY const char *aclmdlGetOpAttr(aclmdlDesc *modelDesc, const char *opName, const char *attr); - -/** - * @ingroup AscendCL - * @brief get input name by index - * - * @param modelDesc [IN] model description - * @param index [IN] intput tensor index - * - * @retval input tensor name,the same life cycle with modelDesc - */ -ACL_FUNC_VISIBILITY const char *aclmdlGetInputNameByIndex(const aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief get output name by index - * - * @param modelDesc [IN] model description - * @param index [IN] output tensor index - * - * @retval output tensor name,the same life cycle with modelDesc - */ -ACL_FUNC_VISIBILITY const char *aclmdlGetOutputNameByIndex(const aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief get input format by index - * - * @param modelDesc [IN] model description - * @param index [IN] intput tensor index - * - * @retval input tensor format - */ -ACL_FUNC_VISIBILITY aclFormat aclmdlGetInputFormat(const aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief get output format by index - * - * @param modelDesc [IN] model description - * @param index [IN] output tensor index - * - * @retval output tensor format - */ -ACL_FUNC_VISIBILITY aclFormat aclmdlGetOutputFormat(const aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief get input data type by index - * - * @param modelDesc [IN] model description - * @param index [IN] intput tensor index - * - * @retval input tensor data type - */ -ACL_FUNC_VISIBILITY aclDataType aclmdlGetInputDataType(const aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief get output data type by index - * - * @param modelDesc [IN] model description - * @param index [IN] output tensor index - * - * @retval output tensor data type - */ -ACL_FUNC_VISIBILITY aclDataType aclmdlGetOutputDataType(const aclmdlDesc *modelDesc, size_t index); - -/** - * @ingroup AscendCL - * @brief get input tensor index by name - * - * @param modelDesc [IN] model description - * @param name [IN] intput tensor name - * @param index [OUT] intput tensor index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetInputIndexByName(const aclmdlDesc *modelDesc, const char *name, size_t *index); - -/** - * @ingroup AscendCL - * @brief get output tensor index by name - * - * @param modelDesc [IN] model description - * @param name [IN] output tensor name - * @param index [OUT] output tensor index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetOutputIndexByName(const aclmdlDesc *modelDesc, const char *name, size_t *index); - -/** - * @ingroup AscendCL - * @brief get dynamic batch info - * - * @param modelDesc [IN] model description - * @param batch [OUT] dynamic batch info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetDynamicBatch(const aclmdlDesc *modelDesc, aclmdlBatch *batch); - -/** - * @ingroup AscendCL - * @brief get dynamic height&width info - * - * @param modelDesc [IN] model description - * @param index [IN] input tensor index - * @param hw [OUT] dynamic height&width info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetDynamicHW(const aclmdlDesc *modelDesc, size_t index, aclmdlHW *hw); - -/** - * @ingroup AscendCL - * @brief get dynamic gear count - * - * @param modelDesc [IN] model description - * @param index [IN] unused, must be -1 - * @param gearCount [OUT] dynamic gear count - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetInputDynamicGearCount(const aclmdlDesc *modelDesc, size_t index, - size_t *gearCount); - -/** - * @ingroup AscendCL - * @brief get dynamic dims info - * - * @param modelDesc [IN] model description - * @param index [IN] unused, must be -1 - * @param dims [OUT] value of dynamic dims - * @param gearCount [IN] dynamic gear count - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlGetInputDynamicDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims, - size_t gearCount); - -/** - * @ingroup AscendCL - * @brief Create data of type aclmdlAIPP - * - * @param batchSize [IN] batchsizes of model - * - * @retval the aclmdlAIPP pointer - */ -ACL_FUNC_VISIBILITY aclmdlAIPP *aclmdlCreateAIPP(uint64_t batchSize); - -/** - * @ingroup AscendCL - * @brief destroy data of type aclmdlAIPP - * - * @param aippParmsSet [IN] Pointer for aclmdlAIPP to be destroyed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlDestroyAIPP(const aclmdlAIPP *aippParmsSet); - -/** - * @ingroup AscendCL - * @brief set InputFormat of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param inputFormat [IN] The inputFormat of aipp - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP - */ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPInputFormat(aclmdlAIPP *aippParmsSet, aclAippInputFormat inputFormat); - -/** - * @ingroup AscendCL - * @brief set cscParms of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param csc_switch [IN] Csc switch - * @param cscMatrixR0C0 [IN] Csc_matrix_r0_c0 - * @param cscMatrixR0C1 [IN] Csc_matrix_r0_c1 - * @param cscMatrixR0C2 [IN] Csc_matrix_r0_c2 - * @param cscMatrixR1C0 [IN] Csc_matrix_r1_c0 - * @param cscMatrixR1C1 [IN] Csc_matrix_r1_c1 - * @param cscMatrixR1C2 [IN] Csc_matrix_r1_c2 - * @param cscMatrixR2C0 [IN] Csc_matrix_r2_c0 - * @param cscMatrixR2C1 [IN] Csc_matrix_r2_c1 - * @param cscMatrixR2C2 [IN] Csc_matrix_r2_c2 - * @param cscOutputBiasR0 [IN] Output Bias for RGB to YUV, element of row 0, unsigned number - * @param cscOutputBiasR1 [IN] Output Bias for RGB to YUV, element of row 1, unsigned number - * @param cscOutputBiasR2 [IN] Output Bias for RGB to YUV, element of row 2, unsigned number - * @param cscInputBiasR0 [IN] Input Bias for YUV to RGB, element of row 0, unsigned number - * @param cscInputBiasR1 [IN] Input Bias for YUV to RGB, element of row 1, unsigned number - * @param cscInputBiasR2 [IN] Input Bias for YUV to RGB, element of row 2, unsigned number - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPCscParams(aclmdlAIPP *aippParmsSet, int8_t cscSwitch, - int16_t cscMatrixR0C0, int16_t cscMatrixR0C1, int16_t cscMatrixR0C2, - int16_t cscMatrixR1C0, int16_t cscMatrixR1C1, int16_t cscMatrixR1C2, - int16_t cscMatrixR2C0, int16_t cscMatrixR2C1, int16_t cscMatrixR2C2, - uint8_t cscOutputBiasR0, uint8_t cscOutputBiasR1, - uint8_t cscOutputBiasR2, uint8_t cscInputBiasR0, - uint8_t cscInputBiasR1, uint8_t cscInputBiasR2); - -/** - * @ingroup AscendCL - * @brief set rb/ub swap switch of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param rbuvSwapSwitch [IN] rb/ub swap switch - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPRbuvSwapSwitch(aclmdlAIPP *aippParmsSet, int8_t rbuvSwapSwitch); - -/** - * @ingroup AscendCL - * @brief set RGBA->ARGB, YUVA->AYUV swap switch of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param axSwapSwitch [IN] RGBA->ARGB, YUVA->AYUV swap switch - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPAxSwapSwitch(aclmdlAIPP *aippParmsSet, int8_t axSwapSwitch); - -/** - * @ingroup AscendCL - * @brief set source image of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param srcImageSizeW [IN] Source image width - * @param srcImageSizeH [IN] Source image height - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPSrcImageSize(aclmdlAIPP *aippParmsSet, int32_t srcImageSizeW, - int32_t srcImageSizeH); - -/** - * @ingroup AscendCL - * @brief set resize switch of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param scfSwitch [IN] Resize switch - * @param scfInputSizeW [IN] Input width of scf - * @param scfInputSizeH [IN] Input height of scf - * @param scfOutputSizeW [IN] Output width of scf - * @param scfOutputSizeH [IN] Output height of scf - * @param batchIndex [IN] Batch parameter index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPScfParams(aclmdlAIPP *aippParmsSet, - int8_t scfSwitch, - int32_t scfInputSizeW, - int32_t scfInputSizeH, - int32_t scfOutputSizeW, - int32_t scfOutputSizeH, - uint64_t batchIndex); - -/** - * @ingroup AscendCL - * @brief set cropParams of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param cropSwitch [IN] Crop switch - * @param cropStartPosW [IN] The start horizontal position of cropping - * @param cropStartPosH [IN] The start vertical position of cropping - * @param cropSizeW [IN] Crop width - * @param cropSizeH [IN] Crop height - * @param batchIndex [IN] Batch parameter index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPCropParams(aclmdlAIPP *aippParmsSet, - int8_t cropSwitch, - int32_t cropStartPosW, - int32_t cropStartPosH, - int32_t cropSizeW, - int32_t cropSizeH, - uint64_t batchIndex); - -/** - * @ingroup AscendCL - * @brief set paddingParams of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param paddingSwitch [IN] Padding switch - * @param paddingSizeTop [IN] Top padding size - * @param paddingSizeBottom [IN] Bottom padding size - * @param paddingSizeLeft [IN] Left padding size - * @param paddingSizeRight [IN] Right padding size - * @param batchIndex [IN] Batch parameter index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPPaddingParams(aclmdlAIPP *aippParmsSet, int8_t paddingSwitch, - int32_t paddingSizeTop, int32_t paddingSizeBottom, - int32_t paddingSizeLeft, int32_t paddingSizeRight, - uint64_t batchIndex); - -/** - * @ingroup AscendCL - * @brief set DtcPixelMean of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param dtcPixelMeanChn0 [IN] Mean value of channel 0 - * @param dtcPixelMeanChn1 [IN] Mean value of channel 1 - * @param dtcPixelMeanChn2 [IN] Mean value of channel 2 - * @param dtcPixelMeanChn3 [IN] Mean value of channel 3 - * @param batchIndex [IN] Batch parameter index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPDtcPixelMean(aclmdlAIPP *aippParmsSet, - int16_t dtcPixelMeanChn0, - int16_t dtcPixelMeanChn1, - int16_t dtcPixelMeanChn2, - int16_t dtcPixelMeanChn3, - uint64_t batchIndex); - -/** - * @ingroup AscendCL - * @brief set DtcPixelMin of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param dtcPixelMinChn0 [IN] Min value of channel 0 - * @param dtcPixelMinChn1 [IN] Min value of channel 1 - * @param dtcPixelMinChn2 [IN] Min value of channel 2 - * @param dtcPixelMinChn3 [IN] Min value of channel 3 - * @param batchIndex [IN] Batch parameter index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPDtcPixelMin(aclmdlAIPP *aippParmsSet, - float dtcPixelMinChn0, - float dtcPixelMinChn1, - float dtcPixelMinChn2, - float dtcPixelMinChn3, - uint64_t batchIndex); - -/** - * @ingroup AscendCL - * @brief set PixelVarReci of type aclmdlAIPP - * - * @param aippParmsSet [OUT] Pointer for aclmdlAIPP - * @param dtcPixelVarReciChn0 [IN] sfr_dtc_pixel_variance_reci_ch0 - * @param dtcPixelVarReciChn1 [IN] sfr_dtc_pixel_variance_reci_ch1 - * @param dtcPixelVarReciChn2 [IN] sfr_dtc_pixel_variance_reci_ch2 - * @param dtcPixelVarReciChn3 [IN] sfr_dtc_pixel_variance_reci_ch3 - * @param batchIndex [IN] Batch parameter index - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPPixelVarReci(aclmdlAIPP *aippParmsSet, - float dtcPixelVarReciChn0, - float dtcPixelVarReciChn1, - float dtcPixelVarReciChn2, - float dtcPixelVarReciChn3, - uint64_t batchIndex); - -/** - * @ingroup AscendCL - * @brief set aipp parameters to model - * - * @param modelId [IN] model id - * @param dataset [IN] Pointer of dataset - * @param index [IN] index of input for aipp data(ACL_DYNAMIC_AIPP_NODE) - * @param aippParmsSet [IN] Pointer for aclmdlAIPP - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetInputAIPP(uint32_t modelId, - aclmdlDataset *dataset, - size_t index, - const aclmdlAIPP *aippParmsSet); - -/** - * @ingroup AscendCL - * @brief set aipp parameters to model - * - * @param modelId [IN] model id - * @param dataset [IN] Pointer of dataset - * @param index [IN] index of input for data which linked dynamic aipp(ACL_DATA_WITH_DYNAMIC_AIPP) - * @param aippParmsSet [IN] Pointer for aclmdlAIPP - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPByInputIndex(uint32_t modelId, - aclmdlDataset *dataset, - size_t index, - const aclmdlAIPP *aippParmsSet); - -/** - * @ingroup AscendCL - * @brief get input aipp type - * - * @param modelId [IN] model id - * @param index [IN] index of input - * @param type [OUT] aipp type for input.refrer to aclmdlInputAippType(enum) - * @param dynamicAttachedDataIndex [OUT] index for dynamic attached data(ACL_DYNAMIC_AIPP_NODE) - * valid when type is ACL_DATA_WITH_DYNAMIC_AIPP, invalid value is ACL_INVALID_NODE_INDEX - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP -*/ -ACL_FUNC_VISIBILITY aclError aclmdlGetAippType(uint32_t modelId, - size_t index, - aclmdlInputAippType *type, - size_t *dynamicAttachedDataIndex); - -/** - * @ingroup AscendCL - * @brief get static aipp parameters from model - * - * @param modelId [IN] model id - * @param index [IN] index of tensor - * @param aippInfo [OUT] Pointer for static aipp info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval ACL_ERROR_MODEL_AIPP_NOT_EXIST The tensor of index is not configured with aipp - * @retval OtherValues Failure - * - * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | - * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName -*/ -ACL_FUNC_VISIBILITY aclError aclmdlGetFirstAippInfo(uint32_t modelId, size_t index, aclAippInfo *aippInfo); - -/** - * @ingroup AscendCL - * @brief get op description info - * - * @param deviceId [IN] device id - * @param streamId [IN] stream id - * @param taskId [IN] task id - * @param opName [OUT] pointer to op name - * @param opNameLen [IN] the length of op name - * @param inputDesc [OUT] pointer to input description - * @param numInputs [OUT] the number of input tensor - * @param outputDesc [OUT] pointer to output description - * @param numOutputs [OUT] the number of output tensor - * - * @retval ACL_SUCCESS The function is successfully executed - * @retval OtherValues Failure -*/ -ACL_FUNC_VISIBILITY aclError aclmdlCreateAndGetOpDesc(uint32_t deviceId, uint32_t streamId, - uint32_t taskId, char *opName, size_t opNameLen, aclTensorDesc **inputDesc, size_t *numInputs, - aclTensorDesc **outputDesc, size_t *numOutputs); - -/** - * @ingroup AscendCL - * @brief init dump - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure -*/ -ACL_FUNC_VISIBILITY aclError aclmdlInitDump(); - -/** - * @ingroup AscendCL - * @brief set param of dump - * - * @param dumpCfgPath [IN] the path of dump config - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure -*/ -ACL_FUNC_VISIBILITY aclError aclmdlSetDump(const char *dumpCfgPath); - -/** - * @ingroup AscendCL - * @brief finalize dump. - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure -*/ -ACL_FUNC_VISIBILITY aclError aclmdlFinalizeDump(); - -/** - * @ingroup AscendCL - * @brief load model with config - * - * @param handle [IN] pointer to model config handle - * @param modelId [OUT] pointer to model id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure -*/ -ACL_FUNC_VISIBILITY aclError aclmdlLoadWithConfig(const aclmdlConfigHandle *handle, uint32_t *modelId); - -/** - * @ingroup AscendCL - * @brief create model config handle of type aclmdlConfigHandle - * - * @retval the aclmdlConfigHandle pointer - * - * @see aclmdlDestroyConfigHandle -*/ -ACL_FUNC_VISIBILITY aclmdlConfigHandle *aclmdlCreateConfigHandle(); - -/** - * @ingroup AscendCL - * @brief destroy data of type aclmdlConfigHandle - * - * @param handle [IN] pointer to model config handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclmdlCreateConfigHandle - */ -ACL_FUNC_VISIBILITY aclError aclmdlDestroyConfigHandle(aclmdlConfigHandle *handle); - -/** - * @ingroup AscendCL - * @brief set config for model load - * - * @param handle [OUT] pointer to model config handle - * @param attr [IN] config attr in model config handle to be set - * @param attrValue [IN] pointer to model config value - * @param valueSize [IN] memory size of attrValue - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclmdlSetConfigOpt(aclmdlConfigHandle *handle, aclmdlConfigAttr attr, - const void *attrValue, size_t valueSize); - -/** - * @ingroup AscendCL - * @brief get real tensor name from modelDesc - * - * @param modelDesc [IN] pointer to modelDesc - * @param name [IN] tensor name - * - * @retval the pointer of real tensor name - * @retval Failure return NULL - */ -ACL_FUNC_VISIBILITY const char *aclmdlGetTensorRealName(const aclmdlDesc *modelDesc, const char *name); - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_MODEL_H_ diff --git a/inc/external/acl/acl_op.h b/inc/external/acl/acl_op.h deleted file mode 100644 index e5da43ee4..000000000 --- a/inc/external/acl/acl_op.h +++ /dev/null @@ -1,570 +0,0 @@ -/** -* @file acl_op.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ -#ifndef INC_EXTERNAL_ACL_ACL_OP_H_ -#define INC_EXTERNAL_ACL_ACL_OP_H_ - -#include "acl_base.h" -#include "acl_rt.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct aclopHandle aclopHandle; -typedef struct aclopAttr aclopAttr; -typedef struct aclopKernelDesc aclopKernelDesc; - -typedef void (*aclDataDeallocator)(void *data, size_t length); - -static const int ACL_COMPILE_FLAG_BIN_SELECTOR = 1; - -typedef enum aclEngineType { - ACL_ENGINE_SYS, - ACL_ENGINE_AICORE, - ACL_ENGINE_VECTOR, -} aclopEngineType; - -/** - * @ingroup AscendCL - * @brief Set base directory that contains single op models - * - * @par Restriction - * The aclopSetModelDir interface can be called only once in a process. - * @param modelDir [IN] path of the directory - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetModelDir(const char *modelDir); - -/** - * @ingroup AscendCL - * @brief load single op models from memory - * - * @par Restriction - * The aclopLoad interface can be called more than one times in a process. - * @param model [IN] address of single op models - * @param modelSize [IN] size of single op models - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopLoad(const void *model, size_t modelSize); - -/** - * @ingroup AscendCL - * @brief create data of type aclopAttr - * - * @retval pointer to created instance. - * @retval nullptr if run out of memory - */ -ACL_FUNC_VISIBILITY aclopAttr *aclopCreateAttr(); - -/** - * @ingroup AscendCL - * @brief destroy data of typ aclopAttr - * - * @param attr [IN] pointer to the instance of aclopAttr - */ -ACL_FUNC_VISIBILITY void aclopDestroyAttr(const aclopAttr *attr); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is bool - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param attrValue [IN] attribute value - * false if attrValue is 0, true otherwise. - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrBool(aclopAttr *attr, const char *attrName, uint8_t attrValue); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is int64_t - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param attrValue [IN] attribute value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrInt(aclopAttr *attr, const char *attrName, int64_t attrValue); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is float - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param attrValue [IN] attribute value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrFloat(aclopAttr *attr, const char *attrName, float attrValue); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is string - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param attrValue [IN] attribute value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrString(aclopAttr *attr, const char *attrName, const char *attrValue); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is aclDataType - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param attrValue [IN] attribute value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrDataType(aclopAttr *attr, const char *attrName, aclDataType attrValue); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is list of aclDataType - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numValues [IN] number of values. false if attrValue is 0, true otherwise. - * @param values [IN] pointer to values - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrListDataType(aclopAttr *attr, const char *attrName, int numValues, - const aclDataType values[]); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is list of bools - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numValues [IN] number of values. false if attrValue is 0, true otherwise. - * @param values [IN] pointer to values - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrListBool(aclopAttr *attr, const char *attrName, int numValues, - const uint8_t *values); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is list of ints - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numValues [IN] number of values - * @param values [IN] pointer to values - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrListInt(aclopAttr *attr, const char *attrName, int numValues, - const int64_t *values); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is list of floats - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numValues [IN] number of values - * @param values [IN] pointer to values - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrListFloat(aclopAttr *attr, const char *attrName, int numValues, - const float *values); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is list of strings - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numValues [IN] number of values - * @param values [IN] pointer to values - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrListString(aclopAttr *attr, const char *attrName, int numValues, - const char **values); - -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is list of list of ints - * - * @param attr [OUT] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numLists [IN] number of lists - * @param numValues [IN] pointer to number of values of each list - * @param values [IN] pointer to values - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrListListInt(aclopAttr *attr, - const char *attrName, - int numLists, - const int *numValues, - const int64_t *const values[]); - -/** - * @ingroup AscendCL - * @brief Load and execute the specified operator asynchronously - * - * @par Restriction - * @li The input and output organization of each operator is different, - * and the application needs to organize the operator strictly - * according to the operator input and output parameters when calling. - * @li When the user calls aclopExecute, - * the ACL finds the corresponding task according to the optype, - * the description of the input tesnsor, - * the description of the output tesnsor, and attr, and issues the execution. - * - * @param opType [IN] type of op - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param inputs [IN] pointer to array of input buffers - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN] pointer to array of output tensor descriptions - * @param outputs [OUT] pointer to array of output buffers - * @param attr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * @param stream [IN] stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_DEPRECATED_MESSAGE("aclopExecute is deprecated, use aclopExecuteV2 instead") -ACL_FUNC_VISIBILITY aclError aclopExecute(const char *opType, - int numInputs, - const aclTensorDesc *const inputDesc[], - const aclDataBuffer *const inputs[], - int numOutputs, - const aclTensorDesc *const outputDesc[], - aclDataBuffer *const outputs[], - const aclopAttr *attr, - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief Load and execute the specified operator - * The difference with aclopExecute is that aclopExecuteV2 will refresh outputDesc - * - * @par Restriction - * @li The input and output organization of each operator is different, - * and the application needs to organize the operator strictly - * according to the operator input and output parameters when calling. - * @li When the user calls aclopExecuteV2, - * the ACL finds the corresponding task according to the optype, - * the description of the input tesnsor, - * the description of the output tesnsor, and attr, and issues the execution. - * - * @param opType [IN] type of op - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param inputs [IN] pointer to array of input buffers - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN|OUT] pointer to array of output tensor descriptions - * @param outputs [OUT] pointer to array of output buffers - * @param attr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * @param stream [IN] stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopExecuteV2(const char *opType, - int numInputs, - aclTensorDesc *inputDesc[], - aclDataBuffer *inputs[], - int numOutputs, - aclTensorDesc *outputDesc[], - aclDataBuffer *outputs[], - aclopAttr *attr, - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief create a instance of aclopHandle. - * - * @param opType [IN] type of op - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN] pointer to array of output tensor descriptions - * @param opAttr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * @param handle [OUT] pointer to the pointer to the handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopCreateHandle(const char *opType, - int numInputs, - const aclTensorDesc *const inputDesc[], - int numOutputs, - const aclTensorDesc *const outputDesc[], - const aclopAttr *opAttr, - aclopHandle **handle); - -/** - * @ingroup AscendCL - * @brief destroy aclopHandle instance - * - * @param handle [IN] pointer to the instance of aclopHandle - */ -ACL_FUNC_VISIBILITY void aclopDestroyHandle(aclopHandle *handle); - -/** - * @ingroup AscendCL - * @brief execute an op with the handle. - * can save op model matching cost compared with aclopExecute - * - * @param handle [IN] pointer to the instance of aclopHandle. - * The aclopCreateHandle interface has been called - * in advance to create aclopHandle type data. - * @param numInputs [IN] number of inputs - * @param inputs [IN] pointer to array of input buffers. - * The aclCreateDataBuffer interface has been called - * in advance to create aclDataBuffer type data. - * @param numOutputs [IN] number of outputs - * @param outputs [OUT] pointer to array of output buffers - * @param stream [IN] stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclopCreateHandle | aclCreateDataBuffer - */ -ACL_FUNC_VISIBILITY aclError aclopExecWithHandle(aclopHandle *handle, - int numInputs, - const aclDataBuffer *const inputs[], - int numOutputs, - aclDataBuffer *const outputs[], - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief cast data type - * - * @param srcDesc [IN] source tensor desc - * @param srcBuffer [IN] source tensor buffer - * @param dstDesc [IN] destination tensor desc - * @param dstBuffer [OUT] destination tensor buffer - * @param truncate [IN] do not truncate if value is 0, truncate otherwise - * @param stream [IN] stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopCast(const aclTensorDesc *srcDesc, - const aclDataBuffer *srcBuffer, - const aclTensorDesc *dstDesc, - aclDataBuffer *dstBuffer, - uint8_t truncate, - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief create a handle for casting datatype - * - * @param srcDesc [IN] source tensor desc - * @param dstDesc [IN] destination tensor desc - * @param truncate [IN] do not truncate if value is 0, truncate otherwise - * @param handle [OUT] pointer to the pointer to the handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopCreateHandleForCast(aclTensorDesc *srcDesc, - aclTensorDesc *dstDesc, - uint8_t truncate, - aclopHandle **handle); - - -/** - * @ingroup AscendCL - * @brief create kernel - * - * @param opType [IN] op type - * @param kernelId [IN] kernel id - * @param kernelName [IN] kernel name - * @param binData [IN] kernel bin data - * @param binSize [IN] kernel bin size - * @param enginetype [IN] enigne type - * @param deallocator [IN] callback function for deallocating bin data, - * null if bin data to be deallocated by caller - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclopCompile - */ -ACL_FUNC_VISIBILITY aclError aclopCreateKernel(const char *opType, - const char *kernelId, - const char *kernelName, - void *binData, - int binSize, - aclopEngineType enginetype, - aclDataDeallocator deallocator); - - -/** - * @ingroup AscendCL - * @brief create kernel - * - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN] pointer to array of output tensor descriptions - * @param opAttr [IN] pointer to instance of aclopAttr - * @param aclopKernelDesc [IN] pointer to instance of aclopKernelDesc - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -typedef aclError (*aclopCompileFunc)(int numInputs, - const aclTensorDesc *const inputDesc[], - int numOutputs, - const aclTensorDesc *const outputDesc[], - const aclopAttr *opAttr, - aclopKernelDesc *aclopKernelDesc); - -/** - * @ingroup AscendCL - * @brief register compile function - * - * @param opType [IN] op type - * @param func [IN] compile function - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclopUnregisterCompileFunc - */ -ACL_FUNC_VISIBILITY aclError aclopRegisterCompileFunc(const char *opType, aclopCompileFunc func); - -/** - * @ingroup AscendCL - * @brief unregister compile function - * - * @param opType [IN] op type - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopUnregisterCompileFunc(const char *opType); - -/** - * @ingroup AscendCL - * @brief set kernel args - * - * @param kernelDesc [IN] pointer to instance of aclopKernelDesc - * @param kernelId [IN] kernel id - * @param blockDim [IN] block dim - * @param args [IN] args - * @param argSize [IN] size in bytes of args - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetKernelArgs(aclopKernelDesc *kernelDesc, - const char *kernelId, - uint32_t blockDim, - const void *args, - uint32_t argSize); - -/** - * @ingroup AscendCL - * @brief set workspace sizes - * - * @param kernelDesc [IN] pointer to instance of aclopKernelDesc - * @param numWorkspaces [IN] number of workspaces - * @param workspaceSizes [IN] pointer to array of sizes of workspaces - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetKernelWorkspaceSizes(aclopKernelDesc *kernelDesc, int numWorkspaces, - size_t *workspaceSizes); - -/** - * @ingroup AscendCL - * @brief compile op with dynamic shape - * - * @param opType [IN] op type - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN] pointer to array of output tensor descriptions - * @param attr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopUpdateParams(const char *opType, - int numInputs, - const aclTensorDesc *const inputDesc[], - int numOutputs, - const aclTensorDesc *const outputDesc[], - const aclopAttr *attr); - -/** - * @ingroup AscendCL - * @brief inferShape the specified operator synchronously - * - * @param opType [IN] type of op - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param inputs [IN] pointer to array of input buffers - * @param numOutputs [IN] number of outputs - * @param outputDesc [OUT] pointer to array of output tensor descriptions - * @param attr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopInferShape(const char *opType, - int numInputs, - aclTensorDesc *inputDesc[], - aclDataBuffer *inputs[], - int numOutputs, - aclTensorDesc *outputDesc[], - aclopAttr *attr); - - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_OP_H_ diff --git a/inc/external/acl/acl_op_compiler.h b/inc/external/acl/acl_op_compiler.h deleted file mode 100644 index 65809749e..000000000 --- a/inc/external/acl/acl_op_compiler.h +++ /dev/null @@ -1,126 +0,0 @@ -/** -* @file acl_op_compiler.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ -#ifndef INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ -#define INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ - -#include "acl_base.h" -#include "acl_op.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum aclCompileType { - ACL_COMPILE_SYS, - ACL_COMPILE_UNREGISTERED -} aclopCompileType; - -typedef enum { - ACL_PRECISION_MODE, - ACL_AICORE_NUM, - ACL_AUTO_TUNE_MODE, - ACL_OP_SELECT_IMPL_MODE, - ACL_OPTYPELIST_FOR_IMPLMODE, - ACL_OP_DEBUG_LEVEL, - ACL_DEBUG_DIR, - ACL_OP_COMPILER_CACHE_MODE, - ACL_OP_COMPILER_CACHE_DIR, - ACL_OP_PERFORMANCE_MODE -} aclCompileOpt; - -typedef enum aclCompileFlag { - ACL_OP_COMPILE_DEFAULT, - ACL_OP_COMPILE_FUZZ -} aclOpCompileFlag; - -/** - * @ingroup AscendCL - * @brief compile op - * - * @param opType [IN] op type - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN] pointer to array of output tensor descriptions - * @param attr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * @param engineType [IN] engine type - * @param compileFlag [IN] compile flag - * @param opPath [IN] path of op - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopCompile(const char *opType, - int numInputs, - const aclTensorDesc *const inputDesc[], - int numOutputs, - const aclTensorDesc *const outputDesc[], - const aclopAttr *attr, - aclopEngineType engineType, - aclopCompileType compileFlag, - const char *opPath); - -/** - * @ingroup AscendCL - * @brief compile and execute op - * - * @param opType [IN] op type - * @param numInputs [IN] number of inputs - * @param inputDesc [IN] pointer to array of input tensor descriptions - * @param inputs [IN] pointer to array of input buffers - * @param numOutputs [IN] number of outputs - * @param outputDesc [IN] pointer to array of output tensor descriptions - * @param outputs [IN] pointer to array of outputs buffers - * @param attr [IN] pointer to instance of aclopAttr. - * may pass nullptr if the op has no attribute - * @param engineType [IN] engine type - * @param compileFlag [IN] compile flag - * @param opPath [IN] path of op - * @param stream [IN] stream handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopCompileAndExecute(const char *opType, - int numInputs, const aclTensorDesc *const inputDesc[], const aclDataBuffer *const inputs[], - int numOutputs, const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], - const aclopAttr *attr, aclopEngineType engineType, aclopCompileType compileFlag, - const char *opPath, aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief set compile option - * - * @param aclCompileOpt [IN] compile option - * @param value [IN] pointer for the option value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclSetCompileopt(aclCompileOpt opt, const char *value); - -/** - * @ingroup AscendCL - * @brief set compile flag - * - * @param flag [IN] compile flag, ACL_OP_COMPILE_DEFAULT means compile with default mode - * ACL_OP_COMPILE_FUZZ means compile with fuzz mode - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetCompileFlag(aclOpCompileFlag flag); - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ diff --git a/inc/external/acl/acl_rt.h b/inc/external/acl/acl_rt.h deleted file mode 100644 index 38dfc2528..000000000 --- a/inc/external/acl/acl_rt.h +++ /dev/null @@ -1,1084 +0,0 @@ -/** -* @file acl_rt.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef INC_EXTERNAL_ACL_ACL_RT_H_ -#define INC_EXTERNAL_ACL_ACL_RT_H_ - -#include -#include -#include "acl_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define ACL_EVENT_TIME_LINE 0x00000008u - -typedef enum aclrtRunMode { - ACL_DEVICE, - ACL_HOST, -} aclrtRunMode; - -typedef enum aclrtTsId { - ACL_TS_ID_AICORE = 0, - ACL_TS_ID_AIVECTOR = 1, - ACL_TS_ID_RESERVED = 2, -} aclrtTsId; - -typedef enum aclrtEventStatus { - ACL_EVENT_STATUS_COMPLETE = 0, - ACL_EVENT_STATUS_NOT_READY = 1, - ACL_EVENT_STATUS_RESERVED = 2, -} aclrtEventStatus; - -typedef enum aclrtEventWaitStatus { - ACL_EVENT_WAIT_STATUS_COMPLETE = 0, - ACL_EVENT_WAIT_STATUS_NOT_READY = 1, - ACL_EVENT_WAIT_STATUS_RESERVED = 0xffff, -} aclrtEventWaitStatus; - -typedef enum aclrtCallbackBlockType { - ACL_CALLBACK_NO_BLOCK, - ACL_CALLBACK_BLOCK, -} aclrtCallbackBlockType; - -typedef enum aclrtMemcpyKind { - ACL_MEMCPY_HOST_TO_HOST, - ACL_MEMCPY_HOST_TO_DEVICE, - ACL_MEMCPY_DEVICE_TO_HOST, - ACL_MEMCPY_DEVICE_TO_DEVICE, -} aclrtMemcpyKind; - -typedef enum aclrtMemMallocPolicy { - ACL_MEM_MALLOC_HUGE_FIRST, - ACL_MEM_MALLOC_HUGE_ONLY, - ACL_MEM_MALLOC_NORMAL_ONLY, - ACL_MEM_MALLOC_HUGE_FIRST_P2P, - ACL_MEM_MALLOC_HUGE_ONLY_P2P, - ACL_MEM_MALLOC_NORMAL_ONLY_P2P, -} aclrtMemMallocPolicy; - -typedef enum aclrtMemAttr { - ACL_DDR_MEM, - ACL_HBM_MEM, - ACL_DDR_MEM_HUGE, - ACL_DDR_MEM_NORMAL, - ACL_HBM_MEM_HUGE, - ACL_HBM_MEM_NORMAL, - ACL_DDR_MEM_P2P_HUGE, - ACL_DDR_MEM_P2P_NORMAL, - ACL_HBM_MEM_P2P_HUGE, - ACL_HBM_MEM_P2P_NORMAL, -} aclrtMemAttr; - -typedef enum aclrtGroupAttr { - ACL_GROUP_AICORE_INT, - ACL_GROUP_AIV_INT, - ACL_GROUP_AIC_INT, - ACL_GROUP_SDMANUM_INT, - ACL_GROUP_ASQNUM_INT, - ACL_GROUP_GROUPID_INT -} aclrtGroupAttr; - -typedef enum aclrtFloatOverflowMode { - ACL_RT_OVERFLOW_MODE_SATURATION = 0, - ACL_RT_OVERFLOW_MODE_INFNAN, - ACL_RT_OVERFLOW_MODE_UNDEF, -} aclrtFloatOverflowMode; - -typedef struct tagRtGroupInfo aclrtGroupInfo; - -typedef struct rtExceptionInfo aclrtExceptionInfo; - -typedef void (*aclrtCallback)(void *userData); - -typedef void (*aclrtExceptionInfoCallback)(aclrtExceptionInfo *exceptionInfo); - -/** - * @ingroup AscendCL - * @brief Set a callback function to handle exception information - * - * @param callback [IN] callback function to handle exception information - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSetExceptionInfoCallback(aclrtExceptionInfoCallback callback); - -/** - * @ingroup AscendCL - * @brief Get task id from exception information - * - * @param info [IN] pointer of exception information - * - * @retval The task id from exception information - * @retval 0xFFFFFFFF if info is null - */ -ACL_FUNC_VISIBILITY uint32_t aclrtGetTaskIdFromExceptionInfo(const aclrtExceptionInfo *info); - -/** - * @ingroup AscendCL - * @brief Get stream id from exception information - * - * @param info [IN] pointer of exception information - * - * @retval The stream id from exception information - * @retval 0xFFFFFFFF if info is null - */ -ACL_FUNC_VISIBILITY uint32_t aclrtGetStreamIdFromExceptionInfo(const aclrtExceptionInfo *info); - -/** - * @ingroup AscendCL - * @brief Get thread id from exception information - * - * @param info [IN] pointer of exception information - * - * @retval The thread id of fail task - * @retval 0xFFFFFFFF if info is null - */ -ACL_FUNC_VISIBILITY uint32_t aclrtGetThreadIdFromExceptionInfo(const aclrtExceptionInfo *info); - -/** - * @ingroup AscendCL - * @brief Get device id from exception information - * - * @param info [IN] pointer of exception information - * - * @retval The thread id of fail task - * @retval 0xFFFFFFFF if info is null - */ -ACL_FUNC_VISIBILITY uint32_t aclrtGetDeviceIdFromExceptionInfo(const aclrtExceptionInfo *info); - -/** - * @ingroup AscendCL - * @brief The thread that handles the callback function on the Stream - * - * @param threadId [IN] thread ID - * @param stream [IN] stream handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSubscribeReport(uint64_t threadId, aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief Add a callback function to be executed on the host - * to the task queue of the Stream - * - * @param fn [IN] Specify the callback function to be added - * The function prototype of the callback function is: - * typedef void (*aclrtCallback)(void *userData); - * @param userData [IN] User data to be passed to the callback function - * @param blockType [IN] callback block type - * @param stream [IN] stream handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtLaunchCallback(aclrtCallback fn, void *userData, aclrtCallbackBlockType blockType, - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief After waiting for a specified time, trigger callback processing - * - * @par Function - * The thread processing callback specified by - * the aclrtSubscribeReport interface - * - * @param timeout [IN] timeout value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtSubscribeReport - */ -ACL_FUNC_VISIBILITY aclError aclrtProcessReport(int32_t timeout); - -/** - * @ingroup AscendCL - * @brief Cancel thread registration, - * the callback function on the specified Stream - * is no longer processed by the specified thread - * - * @param threadId [IN] thread ID - * @param stream [IN] stream handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtUnSubscribeReport(uint64_t threadId, aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief create context and associates it with the calling thread - * - * @par Function - * The following use cases are supported: - * @li If you don't call the aclrtCreateContext interface - * to explicitly create the context, - * the system will use the default context, which is implicitly created - * when the aclrtSetDevice interface is called. - * @li If multiple contexts are created in a process - * (there is no limit on the number of contexts), - * the current thread can only use one of them at the same time. - * It is recommended to explicitly specify the context of the current thread - * through the aclrtSetCurrentContext interface to increase. - * the maintainability of the program. - * - * @param context [OUT] point to the created context - * @param deviceId [IN] device to create context on - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtSetDevice | aclrtSetCurrentContext - */ -ACL_FUNC_VISIBILITY aclError aclrtCreateContext(aclrtContext *context, int32_t deviceId); - -/** - * @ingroup AscendCL - * @brief destroy context instance - * - * @par Function - * Can only destroy context created through aclrtCreateContext interface - * - * @param context [IN] the context to destroy - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtCreateContext - */ -ACL_FUNC_VISIBILITY aclError aclrtDestroyContext(aclrtContext context); - -/** - * @ingroup AscendCL - * @brief set the context of the thread - * - * @par Function - * The following scenarios are supported: - * @li If the aclrtCreateContext interface is called in a thread to explicitly - * create a Context (for example: ctx1), the thread's Context can be specified - * without calling the aclrtSetCurrentContext interface. - * The system uses ctx1 as the context of thread1 by default. - * @li If the aclrtCreateContext interface is not explicitly created, - * the system uses the default context as the context of the thread. - * At this time, the aclrtDestroyContext interface cannot be used to release - * the default context. - * @li If the aclrtSetCurrentContext interface is called multiple times to - * set the thread's Context, the last one prevails. - * - * @par Restriction - * @li If the cevice corresponding to the context set for the thread - * has been reset, you cannot set the context as the context of the thread, - * otherwise a business exception will result. - * @li It is recommended to use the context created in a thread. - * If the aclrtCreateContext interface is called in thread A to create a context, - * and the context is used in thread B, - * the user must guarantee the execution order of tasks in the same stream - * under the same context in two threads. - * - * @param context [IN] the current context of the thread - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtCreateContext | aclrtDestroyContext - */ -ACL_FUNC_VISIBILITY aclError aclrtSetCurrentContext(aclrtContext context); - -/** - * @ingroup AscendCL - * @brief get the context of the thread - * - * @par Function - * If the user calls the aclrtSetCurrentContext interface - * multiple times to set the context of the current thread, - * then the last set context is obtained - * - * @param context [OUT] the current context of the thread - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtSetCurrentContext - */ -ACL_FUNC_VISIBILITY aclError aclrtGetCurrentContext(aclrtContext *context); - -/** - * @ingroup AscendCL - * @brief Specify the device to use for the operation - * implicitly create the default context and the default stream - * - * @par Function - * The following use cases are supported: - * @li Device can be specified in the process or thread. - * If you call the aclrtSetDevice interface multiple - * times to specify the same device, - * you only need to call the aclrtResetDevice interface to reset the device. - * @li The same device can be specified for operation - * in different processes or threads. - * @li Device is specified in a process, - * and multiple threads in the process can share this device to explicitly - * create a Context (aclrtCreateContext interface). - * @li In multi-device scenarios, you can switch to other devices - * through the aclrtSetDevice interface in the process. - * - * @param deviceId [IN] the device id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtResetDevice |aclrtCreateContext - */ -ACL_FUNC_VISIBILITY aclError aclrtSetDevice(int32_t deviceId); - -/** - * @ingroup AscendCL - * @brief Reset the current operating Device and free resources on the device, - * including the default context, the default stream, - * and all streams created under the default context, - * and synchronizes the interface. - * If the task under the default context or stream has not been completed, - * the system will wait for the task to complete before releasing it. - * - * @par Restriction - * @li The Context, Stream, and Event that are explicitly created - * on the device to be reset. Before resetting, - * it is recommended to follow the following interface calling sequence, - * otherwise business abnormalities may be caused. - * @li Interface calling sequence: - * call aclrtDestroyEvent interface to release Event or - * call aclrtDestroyStream interface to release explicitly created Stream-> - * call aclrtDestroyContext to release explicitly created Context-> - * call aclrtResetDevice interface - * - * @param deviceId [IN] the device id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtResetDevice(int32_t deviceId); - -/** - * @ingroup AscendCL - * @brief get target device of current thread - * - * @param deviceId [OUT] the device id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetDevice(int32_t *deviceId); - -/** - * @ingroup AscendCL - * @brief get target side - * - * @param runMode [OUT] the run mode - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetRunMode(aclrtRunMode *runMode); - -/** - * @ingroup AscendCL - * @brief Wait for compute device to finish - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSynchronizeDevice(void); - -/** - * @ingroup AscendCL - * @brief Set Scheduling TS - * - * @param tsId [IN] the ts id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSetTsDevice(aclrtTsId tsId); - -/** - * @ingroup AscendCL - * @brief get total device number. - * - * @param count [OUT] the device number - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetDeviceCount(uint32_t *count); - -/** - * @ingroup AscendCL - * @brief create event instance - * - * @param event [OUT] created event - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtCreateEvent(aclrtEvent *event); - -/** - * @ingroup AscendCL - * @brief create event instance with flag - * - * @param event [OUT] created event - * @param flag [IN] event flag - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtCreateEventWithFlag(aclrtEvent *event, uint32_t flag); - -/** - * @ingroup AscendCL - * @brief destroy event instance - * - * @par Function - * Only events created through the aclrtCreateEvent interface can be - * destroyed, synchronous interfaces. When destroying an event, - * the user must ensure that the tasks involved in the aclrtSynchronizeEvent - * interface or the aclrtStreamWaitEvent interface are completed before - * they are destroyed. - * - * @param event [IN] event to destroy - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtCreateEvent | aclrtSynchronizeEvent | aclrtStreamWaitEvent - */ -ACL_FUNC_VISIBILITY aclError aclrtDestroyEvent(aclrtEvent event); - -/** - * @ingroup AscendCL - * @brief Record an Event in the Stream - * - * @param event [IN] event to record - * @param stream [IN] stream handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtRecordEvent(aclrtEvent event, aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief Reset an event - * - * @par Function - * Users need to make sure to wait for the tasks in the Stream - * to complete before resetting the Event - * - * @param event [IN] event to reset - * @param stream [IN] stream handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtResetEvent(aclrtEvent event, aclrtStream stream); - - /** - * @ingroup AscendCL - * @brief Queries an event's status - * - * @param event [IN] event to query - * @param status [OUT] event status - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtQueryEvent(aclrtEvent event, aclrtEventStatus *status); - -/** -* @ingroup AscendCL -* @brief Queries an event's wait-status -* -* @param event [IN] event to query -* @param status [OUT] event wait-status -* -* @retval ACL_SUCCESS The function is successfully executed. -* @retval OtherValues Failure -*/ -ACL_FUNC_VISIBILITY aclError aclrtQueryEventWaitStatus(aclrtEvent event, aclrtEventWaitStatus *status); - -/** - * @ingroup AscendCL - * @brief Block Host Running, wait event to be complete - * - * @param event [IN] event to wait - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSynchronizeEvent(aclrtEvent event); - -/** - * @ingroup AscendCL - * @brief computes the elapsed time between events. - * - * @param ms [OUT] time between start and end in ms - * @param start [IN] starting event - * @param end [IN] ending event - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtCreateEvent | aclrtRecordEvent | aclrtSynchronizeStream - */ -ACL_FUNC_VISIBILITY aclError aclrtEventElapsedTime(float *ms, aclrtEvent startEvent, aclrtEvent endEvent); - -/** - * @ingroup AscendCL - * @brief alloc memory on device - * - * @par Function - * alloc for size linear memory on device - * and return a pointer to allocated memory by *devPtr - * - * @par Restriction - * @li The memory requested by the aclrtMalloc interface needs to be released - * through the aclrtFree interface. - * @li Before calling the media data processing interface, - * if you need to apply memory on the device to store input or output data, - * you need to call acldvppMalloc to apply for memory. - * - * @param devPtr [OUT] pointer to pointer to allocated memory on device - * @param size [IN] alloc memory size - * @param policy [IN] memory alloc policy - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtFree | acldvppMalloc | aclrtMallocCached - */ -ACL_FUNC_VISIBILITY aclError aclrtMalloc(void **devPtr, - size_t size, - aclrtMemMallocPolicy policy); - -/** - * @ingroup AscendCL - * @brief allocate memory on device with cache - * - * @par Function - * alloc for size linear memory on device - * and return a pointer to allocated memory by *devPtr - * - * @par Restriction - * @li The memory requested by the aclrtMallocCached interface needs to be released - * through the aclrtFree interface. - * - * @param devPtr [OUT] pointer to pointer to allocated memory on device - * @param size [IN] alloc memory size - * @param policy [IN] memory alloc policy - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtFree | aclrtMalloc - */ -ACL_FUNC_VISIBILITY aclError aclrtMallocCached(void **devPtr, - size_t size, - aclrtMemMallocPolicy policy); - -/** - * @ingroup AscendCL - * @brief flush cache data to ddr - * - * @param devPtr [IN] the pointer that flush data to ddr - * @param size [IN] flush size - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtMemFlush(void *devPtr, size_t size); - -/** - * @ingroup AscendCL - * @brief invalidate cache data - * - * @param devPtr [IN] pointer to invalidate cache data - * @param size [IN] invalidate size - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtMemInvalidate(void *devPtr, size_t size); - -/** - * @ingroup AscendCL - * @brief free device memory - * - * @par Function - * can only free memory allocated through the aclrtMalloc interface - * - * @param devPtr [IN] Pointer to memory to be freed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtMalloc - */ -ACL_FUNC_VISIBILITY aclError aclrtFree(void *devPtr); - -/** - * @ingroup AscendCL - * @brief alloc memory on host - * - * @par Restriction - * @li The requested memory cannot be used in the Device - * and needs to be explicitly copied to the Device. - * @li The memory requested by the aclrtMallocHost interface - * needs to be released through the aclrtFreeHost interface. - * - * @param hostPtr [OUT] pointer to pointer to allocated memory on the host - * @param size [IN] alloc memory size - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtFreeHost - */ -ACL_FUNC_VISIBILITY aclError aclrtMallocHost(void **hostPtr, size_t size); - -/** - * @ingroup AscendCL - * @brief free host memory - * - * @par Function - * can only free memory allocated through the aclrtMallocHost interface - * - * @param hostPtr [IN] free memory pointer - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtMallocHost - */ -ACL_FUNC_VISIBILITY aclError aclrtFreeHost(void *hostPtr); - -/** - * @ingroup AscendCL - * @brief synchronous memory replication between host and device - * - * @param dst [IN] destination address pointer - * @param destMax [IN] Max length of the destination address memory - * @param src [IN] source address pointer - * @param count [IN] the length of byte to copy - * @param kind [IN] memcpy type - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtMemcpy(void *dst, - size_t destMax, - const void *src, - size_t count, - aclrtMemcpyKind kind); - -/** - * @ingroup AscendCL - * @brief Initialize memory and set contents of memory to specified value - * - * @par Function - * The memory to be initialized is on the Host or device side, - * and the system determines whether - * it is host or device according to the address - * - * @param devPtr [IN] Starting address of memory - * @param maxCount [IN] Max length of destination address memory - * @param value [IN] Set value - * @param count [IN] The length of memory - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtMemset(void *devPtr, size_t maxCount, int32_t value, size_t count); - -/** - * @ingroup AscendCL - * @brief Asynchronous memory replication between Host and Device - * - * @par Function - * After calling this interface, - * be sure to call the aclrtSynchronizeStream interface to ensure that - * the task of memory replication has been completed - * - * @par Restriction - * @li For on-chip Device-to-Device memory copy, - * both the source and destination addresses must be 64-byte aligned - * - * @param dst [IN] destination address pointer - * @param destMax [IN] Max length of destination address memory - * @param src [IN] source address pointer - * @param count [IN] the number of byte to copy - * @param kind [IN] memcpy type - * @param stream [IN] asynchronized task stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtSynchronizeStream - */ -ACL_FUNC_VISIBILITY aclError aclrtMemcpyAsync(void *dst, - size_t destMax, - const void *src, - size_t count, - aclrtMemcpyKind kind, - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief synchronous memory replication of two-dimensional matrix between host and device - * - * @param dst [IN] destination address pointer - * @param dpitch [IN] pitch of destination memory - * @param src [IN] source address pointer - * @param spitch [IN] pitch of source memory - * @param width [IN] width of matrix transfer - * @param height [IN] height of matrix transfer - * @param kind [IN] memcpy type - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtMemcpy2d(void *dst, - size_t dpitch, - const void *src, - size_t spitch, - size_t width, - size_t height, - aclrtMemcpyKind kind); - -/** - * @ingroup AscendCL - * @brief asynchronous memory replication of two-dimensional matrix between host and device - * - * @param dst [IN] destination address pointer - * @param dpitch [IN] pitch of destination memory - * @param src [IN] source address pointer - * @param spitch [IN] pitch of source memory - * @param width [IN] width of matrix transfer - * @param height [IN] height of matrix transfer - * @param kind [IN] memcpy type - * @param stream [IN] asynchronized task stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtMemcpy2dAsync(void *dst, - size_t dpitch, - const void *src, - size_t spitch, - size_t width, - size_t height, - aclrtMemcpyKind kind, - aclrtStream stream); - -/** -* @ingroup AscendCL -* @brief Asynchronous initialize memory -* and set contents of memory to specified value async -* -* @par Function - * The memory to be initialized is on the Host or device side, - * and the system determines whether - * it is host or device according to the address - * -* @param devPtr [IN] destination address pointer -* @param maxCount [IN] Max length of destination address memory -* @param value [IN] set value -* @param count [IN] the number of byte to set -* @param stream [IN] asynchronized task stream -* -* @retval ACL_SUCCESS The function is successfully executed. -* @retval OtherValues Failure -* -* @see aclrtSynchronizeStream -*/ -ACL_FUNC_VISIBILITY aclError aclrtMemsetAsync(void *devPtr, - size_t maxCount, - int32_t value, - size_t count, - aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief create stream instance - * - * @param stream [OUT] the created stream - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtCreateStream(aclrtStream *stream); - -/** - * @ingroup AscendCL - * @brief destroy stream instance - * - * @par Function - * Can only destroy streams created through the aclrtCreateStream interface - * - * @par Restriction - * Before calling the aclrtDestroyStream interface to destroy - * the specified Stream, you need to call the aclrtSynchronizeStream interface - * to ensure that the tasks in the Stream have been completed. - * - * @param stream [IN] the stream to destroy - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtCreateStream | aclrtSynchronizeStream - */ -ACL_FUNC_VISIBILITY aclError aclrtDestroyStream(aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief block the host until all tasks - * in the specified stream have completed - * - * @param stream [IN] the stream to wait - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSynchronizeStream(aclrtStream stream); - -/** - * @ingroup AscendCL - * @brief Blocks the operation of the specified Stream until - * the specified Event is completed. - * Support for multiple streams waiting for the same event. - * - * @param stream [IN] the wait stream If using thedefault Stream, set NULL - * @param event [IN] the event to wait - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtStreamWaitEvent(aclrtStream stream, aclrtEvent event); - -/** - * @ingroup AscendCL - * @brief set group - * - * @par Function - * set the task to the corresponding group - * - * @param groupId [IN] group id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtGetGroupCount | aclrtGetAllGroupInfo | aclrtGetGroupInfoDetail - */ -ACL_FUNC_VISIBILITY aclError aclrtSetGroup(int32_t groupId); - -/** - * @ingroup AscendCL - * @brief get the number of group - * - * @par Function - * get the number of group. if the number of group is zero, - * it means that group is not supported or group is not created. - * - * @param count [OUT] the number of group - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - */ -ACL_FUNC_VISIBILITY aclError aclrtGetGroupCount(uint32_t *count); - -/** - * @ingroup AscendCL - * @brief create group information - * - * @retval null for failed. - * @retval OtherValues success. - * - * @see aclrtDestroyGroupInfo - */ -ACL_FUNC_VISIBILITY aclrtGroupInfo *aclrtCreateGroupInfo(); - -/** - * @ingroup AscendCL - * @brief destroy group information - * - * @param groupInfo [IN] pointer to group information - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtCreateGroupInfo - */ -ACL_FUNC_VISIBILITY aclError aclrtDestroyGroupInfo(aclrtGroupInfo *groupInfo); - -/** - * @ingroup AscendCL - * @brief get all group information - * - * @param groupInfo [OUT] pointer to group information - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtGetGroupCount - */ -ACL_FUNC_VISIBILITY aclError aclrtGetAllGroupInfo(aclrtGroupInfo *groupInfo); - -/** - * @ingroup AscendCL - * @brief get detail information of group - * - * @param groupInfo [IN] pointer to group information - * @param groupIndex [IN] group index value - * @param attr [IN] group attribute - * @param attrValue [OUT] pointer to attribute value - * @param valueLen [IN] length of attribute value - * @param paramRetSize [OUT] pointer to real length of attribute value - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtGetGroupCount | aclrtGetAllGroupInfo - */ -ACL_FUNC_VISIBILITY aclError aclrtGetGroupInfoDetail(const aclrtGroupInfo *groupInfo, - int32_t groupIndex, - aclrtGroupAttr attr, - void *attrValue, - size_t valueLen, - size_t *paramRetSize); - -/** - * @ingroup AscendCL - * @brief checking whether current device and peer device support the p2p feature - * - * @param canAccessPeer [OUT] pointer to save the checking result - * @param deviceId [IN] current device id - * @param peerDeviceId [IN] peer device id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtDeviceEnablePeerAccess | aclrtDeviceDisablePeerAccess - */ -ACL_FUNC_VISIBILITY aclError aclrtDeviceCanAccessPeer(int32_t *canAccessPeer, int32_t deviceId, int32_t peerDeviceId); - -/** - * @ingroup AscendCL - * @brief enable the peer device to support the p2p feature - * - * @param peerDeviceId [IN] the peer device id - * @param flags [IN] reserved field, now it must be zero - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtDeviceCanAccessPeer | aclrtDeviceDisablePeerAccess - */ -ACL_FUNC_VISIBILITY aclError aclrtDeviceEnablePeerAccess(int32_t peerDeviceId, uint32_t flags); - -/** - * @ingroup AscendCL - * @brief disable the peer device to support the p2p function - * - * @param peerDeviceId [IN] the peer device id - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see aclrtDeviceCanAccessPeer | aclrtDeviceEnablePeerAccess - */ -ACL_FUNC_VISIBILITY aclError aclrtDeviceDisablePeerAccess(int32_t peerDeviceId); - -/** - * @ingroup AscendCL - * @brief Obtain the free memory and total memory of specified attribute. - * the specified memory include normal memory and huge memory. - * - * @param attr [IN] the memory attribute of specified device - * @param free [OUT] the free memory of specified device - * @param total [OUT] the total memory of specified device. - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetMemInfo(aclrtMemAttr attr, size_t *free, size_t *total); - -/** - * @ingroup AscendCL - * @brief Set the timeout interval for waitting of op - * - * @param timeout [IN] op wait timeout - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSetOpWaitTimeout(uint32_t timeout); - -/** - * @ingroup AscendCL - * @brief enable or disable overflow switch on some stream - * @param stream [IN] set overflow switch on this stream - * @param flag [IN] 0 : disable 1 : enable - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSetStreamOverflowSwitch(aclrtStream stream, uint32_t flag); - -/** - * @ingroup AscendCL - * @brief get overflow switch on some stream - * @param stream [IN] get overflow switch on this stream - * @param flag [OUT] current overflow switch, 0 : disable others : enable - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetStreamOverflowSwitch(aclrtStream stream, uint32_t *flag); - -/** - * @ingroup AscendCL - * @brief set saturation mode - * @param mode [IN] target saturation mode - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtSetDeviceSatMode(aclrtFloatOverflowMode mode); - -/** - * @ingroup AscendCL - * @brief get saturation mode - * @param mode [OUT] get saturation mode - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclrtGetDeviceSatMode(aclrtFloatOverflowMode *mode); - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_RT_H_ - diff --git a/inc/external/acl/acl_tdt.h b/inc/external/acl/acl_tdt.h deleted file mode 100644 index 4c7eeee83..000000000 --- a/inc/external/acl/acl_tdt.h +++ /dev/null @@ -1,307 +0,0 @@ -/** -* @file acl_tdt.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef INC_EXTERNAL_ACL_ACL_TDT_H_ -#define INC_EXTERNAL_ACL_ACL_TDT_H_ - -#include "acl/acl_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -enum acltdtTensorType { - ACL_TENSOR_DATA_UNDEFINED = -1, - ACL_TENSOR_DATA_TENSOR, - ACL_TENSOR_DATA_END_OF_SEQUENCE, - ACL_TENSOR_DATA_ABNORMAL -}; - -typedef struct acltdtDataItem acltdtDataItem; -typedef struct acltdtDataset acltdtDataset; -typedef struct acltdtChannelHandle acltdtChannelHandle; - -/** - * @ingroup AscendCL - * @brief Get tensor type from item - * - * @param dataItem [IN] pointer to the data item - * - * @retval Tensor type. - * @retval ACL_DT_UNDEFINED if dataItem is null - */ -ACL_FUNC_VISIBILITY acltdtTensorType acltdtGetTensorTypeFromItem(const acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Get data type from item - * - * @param dataItem [IN] pointer to the data item - * - * @retval Data type. - * @retval ACL_DT_UNDEFINED if dataItem is null - */ -ACL_FUNC_VISIBILITY aclDataType acltdtGetDataTypeFromItem(const acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Get data address from item - * - * @param dataItem [IN] pointer to data item - * - * @retval null for failed - * @retval OtherValues success -*/ -ACL_FUNC_VISIBILITY void *acltdtGetDataAddrFromItem(const acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Get data size from item - * - * @param dataItem [IN] pointer to data item - * - * @retval 0 for failed - * @retval OtherValues success -*/ -ACL_FUNC_VISIBILITY size_t acltdtGetDataSizeFromItem(const acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Get dim's number from item - * - * @param dataItem [IN] pointer to data item - * - * @retval 0 for failed - * @retval OtherValues success -*/ -ACL_FUNC_VISIBILITY size_t acltdtGetDimNumFromItem(const acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Get dims from item - * - * @param dataItem [IN] the struct of data item - * @param dims [IN|OUT] pointer to the dims of dataTtem - * @param dimNum [IN] the size of the dims - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError acltdtGetDimsFromItem(const acltdtDataItem *dataItem, int64_t *dims, size_t dimNum); - -/** - * @ingroup AscendCL - * @brief Create the struct of data item - * - * @param tdtType [IN] Tdt tensor type - * @param dims [IN] pointer of tdtDataItem's dims - * @param dimNum [IN] Dim number - * @param dataType [IN] Data type - * @param data [IN] Data pointer - * @param size [IN] Data size - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyDataItem - */ -ACL_FUNC_VISIBILITY acltdtDataItem *acltdtCreateDataItem(acltdtTensorType tdtType, - const int64_t *dims, - size_t dimNum, - aclDataType dataType, - void *data, - size_t size); - -/** - * @ingroup AscendCL - * @brief Destroy the struct of data item - * - * @param dataItem [IN] pointer to the data item - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateDataItem - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyDataItem(acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Create the tdt dataset - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyDataset - */ -ACL_FUNC_VISIBILITY acltdtDataset *acltdtCreateDataset(); - -/** - * @ingroup AscendCL - * @brief Destroy the tdt dataset - * - * @param dataset [IN] pointer to the dataset - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateDataset - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyDataset(acltdtDataset *dataset); - -/** - * @ingroup AscendCL - * @brief Get the data item - * - * @param dataset [IN] pointer to the dataset - * @param index [IN] index of the dataset - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtAddDataItem - */ -ACL_FUNC_VISIBILITY acltdtDataItem *acltdtGetDataItem(const acltdtDataset *dataset, size_t index); - -/** - * @ingroup AscendCL - * @brief Get the data item - * - * @param dataset [OUT] pointer to the dataset - * @param dataItem [IN] pointer to the data item - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtGetDataItem - */ -ACL_FUNC_VISIBILITY aclError acltdtAddDataItem(acltdtDataset *dataset, acltdtDataItem *dataItem); - -/** - * @ingroup AscendCL - * @brief Get the size of dataset - * - * @param dataset [IN] pointer to the dataset - * - * @retval 0 for failed - * @retval OtherValues success - */ -ACL_FUNC_VISIBILITY size_t acltdtGetDatasetSize(const acltdtDataset *dataset); - -/** - * @ingroup AscendCL - * @brief Stop the channel - * - * @param handle [IN] pointer to the channel handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateChannel | acltdtDestroyChannel - */ -ACL_FUNC_VISIBILITY aclError acltdtStopChannel(acltdtChannelHandle *handle); - -/** - * @ingroup AscendCL - * @brief Create the channel - * - * @param deviceId [IN] the device id - * @param name [IN] the name of channel - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtStopChannel | acltdtDestroyChannel - */ -ACL_FUNC_VISIBILITY acltdtChannelHandle *acltdtCreateChannel(uint32_t deviceId, const char *name); - -/** - * @ingroup AscendCL - * @brief Create the channel with max size - * - * @param deviceId [IN] the device id - * @param name [IN] the name of channel - * @param capacity [IN] the capacity of channel - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyChannel - */ -ACL_FUNC_VISIBILITY acltdtChannelHandle *acltdtCreateChannelWithCapacity(uint32_t deviceId, - const char *name, - size_t capacity); - -/** - * @ingroup AscendCL - * @brief Destroy the channel - * - * @param handle [IN] pointer to the channel handle - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateChannel | acltdtStopChannel - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyChannel(acltdtChannelHandle *handle); - -/** - * @ingroup AscendCL - * @brief Send tensor to device - * - * @param handle [IN] pointer to the channel handle - * @param dataset [IN] pointer to the dataset - * @param timeout [IN] to be reserved, now it must be -1 - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtReceiveTensor - */ -ACL_FUNC_VISIBILITY aclError acltdtSendTensor(const acltdtChannelHandle *handle, - const acltdtDataset *dataset, - int32_t timeout); - -/** - * @ingroup AscendCL - * @brief Receive tensor from device - * - * @param handle [IN] pointer to the channel handle - * @param dataset [OUT] pointer to the dataset - * @param timeout [IN] to be reserved, now it must be -1 - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtSendTensor - */ -ACL_FUNC_VISIBILITY aclError acltdtReceiveTensor(const acltdtChannelHandle *handle, - acltdtDataset *dataset, - int32_t timeout); - -/** - * @ingroup AscendCL - * @brief query the size of the channel - * - * @param handle [IN] pointer to the channel handle - * @param size [OUT] current size of this channel - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - */ -ACL_FUNC_VISIBILITY aclError acltdtQueryChannelSize(const acltdtChannelHandle *handle, size_t *size); - -#ifdef __cplusplus -} -#endif - -#endif //INC_EXTERNAL_ACL_ACL_TDT_H_ - diff --git a/inc/external/acl/acl_tdt_queue.h b/inc/external/acl/acl_tdt_queue.h deleted file mode 100644 index c751f29ea..000000000 --- a/inc/external/acl/acl_tdt_queue.h +++ /dev/null @@ -1,445 +0,0 @@ -/** -* @file acl_tdt_queue.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2020-2021. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef INC_EXTERNAL_ACL_ACL_TDT_QUEUE_H_ -#define INC_EXTERNAL_ACL_ACL_TDT_QUEUE_H_ - -#include "acl/acl_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define ACL_TDT_QUEUE_PERMISSION_MANAGE 1 -#define ACL_TDT_QUEUE_PERMISSION_DEQUEUE 2 -#define ACL_TDT_QUEUE_PERMISSION_ENQUEUE 4 - -typedef void *acltdtBuf; -typedef struct tagMemQueueAttr acltdtQueueAttr; -typedef struct acltdtQueueRouteList acltdtQueueRouteList; -typedef struct acltdtQueueRouteQueryInfo acltdtQueueRouteQueryInfo; -typedef struct acltdtQueueRoute acltdtQueueRoute; - -typedef enum { - ACL_TDT_QUEUE_NAME_PTR = 0, - ACL_TDT_QUEUE_DEPTH_UINT32 -} acltdtQueueAttrType; - -typedef enum { - ACL_TDT_QUEUE_ROUTE_SRC_UINT32 = 0, - ACL_TDT_QUEUE_ROUTE_DST_UINT32, - ACL_TDT_QUEUE_ROUTE_STATUS_INT32 -} acltdtQueueRouteParamType; - -typedef enum { - ACL_TDT_QUEUE_ROUTE_QUERY_SRC = 0, - ACL_TDT_QUEUE_ROUTE_QUERY_DST, - ACL_TDT_QUEUE_ROUTE_QUERY_SRC_AND_DST -} acltdtQueueRouteQueryMode; - -typedef enum { - ACL_TDT_QUEUE_ROUTE_QUERY_MODE_ENUM = 0, - ACL_TDT_QUEUE_ROUTE_QUERY_SRC_ID_UINT32, - ACL_TDT_QUEUE_ROUTE_QUERY_DST_ID_UINT32 -} acltdtQueueRouteQueryInfoParamType; - -/** - * @ingroup AscendCL - * @brief create queue - * - * @param attr [IN] pointer to the queue attr - * @param qid [OUT] pointer to the qid - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtDestroyQueue - */ -ACL_FUNC_VISIBILITY aclError acltdtCreateQueue(const acltdtQueueAttr *attr, uint32_t *qid); - -/** - * @ingroup AscendCL - * @brief destroy queue - * - * @param qid [IN] qid which to be destroyed - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueue - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyQueue(uint32_t qid); - -/** - * @ingroup AscendCL - * @brief enqueue function - * - * @param qid [IN] qid - * @param buf [IN] acltdtBuf - * @param timeout [IN] timeout - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtDequeue - */ -ACL_FUNC_VISIBILITY aclError acltdtEnqueue(uint32_t qid, acltdtBuf buf, int32_t timeout); - -/** - * @ingroup AscendCL - * @brief dequeue function - * - * @param qid [IN] qid - * @param buf [OUT] pointer to the acltdtBuf - * @param timeout [IN] timeout - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtEnqueue - */ -ACL_FUNC_VISIBILITY aclError acltdtDequeue(uint32_t qid, acltdtBuf *buf, int32_t timeout); - -/** - * @ingroup AscendCL - * @brief grant queue to other process - * - * @param qid [IN] qid - * @param pid [IN] pid of dst process - * @param permission [IN] permission of queue - * @param timeout [IN] timeout - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see ACL_TDT_QUEUE_PERMISSION_MANAGE | ACL_TDT_QUEUE_PERMISSION_DEQUEUE | ACL_TDT_QUEUE_PERMISSION_ENQUEUE - */ -ACL_FUNC_VISIBILITY aclError acltdtGrantQueue(uint32_t qid, int32_t pid, uint32_t permission, int32_t timeout); - -/** - * @ingroup AscendCL - * @brief attach queue in current process - * - * @param qid [IN] qid - * @param timeout [IN] timeout - * @param permission [OUT] permission of queue - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtGrantQueue - */ -ACL_FUNC_VISIBILITY aclError acltdtAttachQueue(uint32_t qid, int32_t timeout, uint32_t *permission); - -/** - * @ingroup AscendCL - * @brief bind queue routes - * - * @param qRouteList [IN|OUT] pointer to the route list - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError acltdtBindQueueRoutes(acltdtQueueRouteList *qRouteList); - -/** - * @ingroup AscendCL - * @brief unbind queue routes - * - * @param qRouteList [IN|OUT] pointer to the route list - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError acltdtUnbindQueueRoutes(acltdtQueueRouteList *qRouteList); - -/** - * @ingroup AscendCL - * @brief query queue routes according to query mode - * - * @param queryInfo [IN] pointer to the queue route query info - * @param qRouteList [IN|OUT] pointer to the route list - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError acltdtQueryQueueRoutes(const acltdtQueueRouteQueryInfo *queryInfo, - acltdtQueueRouteList *qRouteList); - -/** - * @ingroup AscendCL - * @brief alloc acltdtBuf - * - * @param size [IN] alloc buf size - * @param buf [OUT] pointer to the acltdtBuf - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtFreeBuf - */ -ACL_FUNC_VISIBILITY aclError acltdtAllocBuf(size_t size, acltdtBuf *buf); - -/** - * @ingroup AscendCL - * @brief free acltdtBuf - * - * @param buf [IN] pointer to the acltdtBuf - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtAllocBuf - */ -ACL_FUNC_VISIBILITY aclError acltdtFreeBuf(acltdtBuf buf); - -/** - * @ingroup AscendCL - * @brief get data buf address - * - * @param buf [IN] acltdtBuf - * @param dataPtr [OUT] pointer to the data ptr which is acquired from acltdtBuf - * @param size [OUT] pointer to the size - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtAllocBuf - */ -ACL_FUNC_VISIBILITY aclError acltdtGetBufData(const acltdtBuf buf, void **dataPtr, size_t *size); - -/** - * @ingroup AscendCL - * @brief Create the queue attr - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyQueueAttr - */ -ACL_FUNC_VISIBILITY acltdtQueueAttr *acltdtCreateQueueAttr(); - -/** - * @ingroup AscendCL - * @brief Destroy the queue attr - * - * @param attr [IN] pointer to the queue attr - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueueAttr - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyQueueAttr(const acltdtQueueAttr *attr); - -/** - * @ingroup AscendCL - * @brief Set parameter for queue attr - * - * @param attr [IN|OUT] pointer to the queue attr - * @param type [IN] parameter type - * @param len [IN] parameter length - * @param param [IN] pointer to parameter value - * - * @retval ACL_SUCCESS for success, other for failure - * - * @see acltdtCreateQueueAttr - */ -ACL_FUNC_VISIBILITY aclError acltdtSetQueueAttr(acltdtQueueAttr *attr, - acltdtQueueAttrType type, - size_t len, - const void *param); - -/** - * @ingroup AscendCL - * - * @brief Get parameter for queue attr. - * - * @param attr [IN] pointer to the queue attr - * @param type [IN] parameter type - * @param len [IN] parameter length - * @param paramRetSize [OUT] pointer to parameter real length - * @param param [OUT] pointer to parameter value - * - * @retval ACL_SUCCESS for success, other for failure - * - * @see acltdtCreateQueueAttr - */ -ACL_FUNC_VISIBILITY aclError acltdtGetQueueAttr(const acltdtQueueAttr *attr, - acltdtQueueAttrType type, - size_t len, - size_t *paramRetSize, - void *param); - -/** - * @ingroup AscendCL - * @brief Create the queue route - * - * @param srcId [IN] src id of queue route - * @param dstId [IN] dst id of queue route - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyQueueRoute - */ -ACL_FUNC_VISIBILITY acltdtQueueRoute* acltdtCreateQueueRoute(uint32_t srcId, uint32_t dstId); - -/** - * @ingroup AscendCL - * @brief Destroy the queue attr - * - * @param route [IN] pointer to the queue route - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueueRoute - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyQueueRoute(const acltdtQueueRoute *route); - -/** - * @ingroup AscendCL - * - * @brief Get parameter for queue route. - * - * @param route [IN] pointer to the queue route - * @param type [IN] parameter type - * @param len [IN] parameter length - * @param paramRetSize [OUT] pointer to parameter real length - * @param param [OUT] pointer to parameter value - * - * @retval ACL_SUCCESS for success, other for failure - * - * @see acltdtCreateQueueRoute - */ -ACL_FUNC_VISIBILITY aclError acltdtGetQueueRouteParam(const acltdtQueueRoute *route, - acltdtQueueRouteParamType type, - size_t len, - size_t *paramRetSize, - void *param); - -/** - * @ingroup AscendCL - * @brief Create the queue route list - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyQueueRouteList - */ -ACL_FUNC_VISIBILITY acltdtQueueRouteList* acltdtCreateQueueRouteList(); - -/** - * @ingroup AscendCL - * @brief Destroy the queue route list - * - * @param routeList [IN] pointer to the queue route list - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueueRouteList - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyQueueRouteList(const acltdtQueueRouteList *routeList); - -/** - * @ingroup AscendCL - * @brief add queue route to the route list - * - * @param routeList [IN|OUT] pointer to the queue route list - * @param route [IN] pointer to the queue route - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueueRouteList | acltdtCreateQueueRoute - * - */ -ACL_FUNC_VISIBILITY aclError acltdtAddQueueRoute(acltdtQueueRouteList *routeList, const acltdtQueueRoute *route); - -/** - * @ingroup AscendCL - * @brief get queue route from route list - * - * @param routeList [IN] pointer to the queue route list - * @param index [IN] index of queue route in route list - * @param route [IN|OUT] pointer to the queue route - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueueRouteList | acltdtCreateQueueRoute - * - */ -ACL_FUNC_VISIBILITY aclError acltdtGetQueueRoute(const acltdtQueueRouteList *routeList, - size_t index, - acltdtQueueRoute *route); - -/** - * @ingroup AscendCL - * @brief get queue route num from route list - * - * @param routeList [IN] pointer to the queue route list - * - * @retval the number of queue route - * - */ -ACL_FUNC_VISIBILITY size_t acltdtGetQueueRouteNum(const acltdtQueueRouteList *routeList); - -/** - * @ingroup AscendCL - * @brief Create the queue route query info - * - * @retval null for failed - * @retval OtherValues success - * - * @see acltdtDestroyQueueRouteQueryInfo - */ -ACL_FUNC_VISIBILITY acltdtQueueRouteQueryInfo* acltdtCreateQueueRouteQueryInfo(); - -/** - * @ingroup AscendCL - * @brief Destroy the queue route query info - * - * @param info [IN] pointer to the queue route info - * - * @retval ACL_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - * - * @see acltdtCreateQueueRouteQueryInfo - * - */ -ACL_FUNC_VISIBILITY aclError acltdtDestroyQueueRouteQueryInfo(const acltdtQueueRouteQueryInfo *info); - -/** - * @ingroup AscendCL - * @brief Set parameter for queue route info - * - * @param attr [IN|OUT] pointer to the queue route info - * @param type [IN] parameter type - * @param len [IN] parameter length - * @param param [IN] pointer to parameter value - * - * @retval ACL_SUCCESS for success, other for failure - * - * @see acltdtCreateQueueRouteQueryInfo - */ -ACL_FUNC_VISIBILITY aclError acltdtSetQueueRouteQueryInfo(acltdtQueueRouteQueryInfo *param, - acltdtQueueRouteQueryInfoParamType type, - size_t len, - const void *value); - - -#ifdef __cplusplus -} -#endif - -#endif // INC_EXTERNAL_ACL_ACL_TDT_QUEUE_H_ \ No newline at end of file diff --git a/inc/external/acl/error_codes/ge_error_codes.h b/inc/external/acl/error_codes/ge_error_codes.h deleted file mode 100644 index cafc5a648..000000000 --- a/inc/external/acl/error_codes/ge_error_codes.h +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GE_GE_ERROR_CODES_H_ -#define INC_EXTERNAL_GE_GE_ERROR_CODES_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include - -#ifdef __cplusplus -extern "C" { -#endif -static const uint32_t ACL_ERROR_GE_PARAM_INVALID = 145000; -static const uint32_t ACL_ERROR_GE_EXEC_NOT_INIT = 145001; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID = 145002; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ID_INVALID = 145003; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID = 145006; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID = 145007; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID = 145008; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED = 145009; -static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID = 145011; -static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID = 145012; -static const uint32_t ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID = 145013; -static const uint32_t ACL_ERROR_GE_AIPP_BATCH_EMPTY = 145014; -static const uint32_t ACL_ERROR_GE_AIPP_NOT_EXIST = 145015; -static const uint32_t ACL_ERROR_GE_AIPP_MODE_INVALID = 145016; -static const uint32_t ACL_ERROR_GE_OP_TASK_TYPE_INVALID = 145017; -static const uint32_t ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID = 145018; -static const uint32_t ACL_ERROR_GE_PLGMGR_PATH_INVALID = 145019; -static const uint32_t ACL_ERROR_GE_FORMAT_INVALID = 145020; -static const uint32_t ACL_ERROR_GE_SHAPE_INVALID = 145021; -static const uint32_t ACL_ERROR_GE_DATATYPE_INVALID = 145022; -static const uint32_t ACL_ERROR_GE_MEMORY_ALLOCATION = 245000; -static const uint32_t ACL_ERROR_GE_MEMORY_OPERATE_FAILED = 245001; -static const uint32_t ACL_ERROR_GE_INTERNAL_ERROR = 545000; -static const uint32_t ACL_ERROR_GE_LOAD_MODEL = 545001; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED = 545002; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED = 545003; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED = 545004; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED = 545005; -static const uint32_t ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA = 545006; -static const uint32_t ACL_ERROR_GE_COMMAND_HANDLE = 545007; -static const uint32_t ACL_ERROR_GE_GET_TENSOR_INFO = 545008; -static const uint32_t ACL_ERROR_GE_UNLOAD_MODEL = 545009; - -#ifdef __cplusplus -} // namespace ge -#endif -#endif // INC_EXTERNAL_GE_GE_ERROR_CODES_H_ diff --git a/inc/external/acl/error_codes/rt_error_codes.h b/inc/external/acl/error_codes/rt_error_codes.h deleted file mode 100644 index 9fb940693..000000000 --- a/inc/external/acl/error_codes/rt_error_codes.h +++ /dev/null @@ -1,105 +0,0 @@ -/** -* @file rt_error_codes.h -* -* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__ -#define __INC_EXTERNEL_RT_ERROR_CODES_H__ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -static const int32_t ACL_RT_SUCCESS = 0; // success - -static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid -static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id -static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null -static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context -static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context -static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model -static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid -static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal -static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned -static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed -static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed -static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream -static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread -static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set -static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create -static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream -static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type -static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle -static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type - -static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support -static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error -static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error -static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow -static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device -static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail -static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission -static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource -static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource -static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource -static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource -static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource -static const int32_t ACL_ERROR_RT_OVER_LIMIT = 207012; // over limit -static const int32_t ACL_ERROR_RT_QUEUE_EMPTY = 207013; // queue is empty -static const int32_t ACL_ERROR_RT_QUEUE_FULL = 207014; // queue is full -static const int32_t ACL_ERROR_RT_REPEATED_INIT = 207015; // repeated init - -static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error -static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error -static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream -static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream -static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete -static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence -static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete -static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error -static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error -static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support -static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat -static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed -static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout -static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error -static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout -static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception -static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception -static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout -static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception -static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error -static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error -static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error -static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error -static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal -static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering -static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init -static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data -static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error -static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate -static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed -static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed -static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context -static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out -static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error -static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout -static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception -static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception - -static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error -static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error -static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect - -#ifdef __cplusplus -} -#endif - -#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__ diff --git a/inc/external/hccl/hccl_types.h b/inc/external/hccl/hccl_types.h deleted file mode 100644 index de982588a..000000000 --- a/inc/external/hccl/hccl_types.h +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef HCCL_TYPES_H_ -#define HCCL_TYPES_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * @brief HCCL functions return value definition - */ -typedef enum { - HCCL_SUCCESS = 0, /**< success */ - HCCL_E_PARA = 1, /**< parameter error */ - HCCL_E_PTR = 2, /**< empty pointer */ - HCCL_E_MEMORY = 3, /**< memory error */ - HCCL_E_INTERNAL = 4, /**< internal error */ - HCCL_E_NOT_SUPPORT = 5, /**< not support feature */ - HCCL_E_NOT_FOUND = 6, /**< not found specific resource */ - HCCL_E_UNAVAIL = 7, /**< resource unavailable */ - HCCL_E_SYSCALL = 8, /**< call system interface error */ - HCCL_E_TIMEOUT = 9, /**< timeout */ - HCCL_E_OPEN_FILE_FAILURE = 10, /**< open file fail */ - HCCL_E_TCP_CONNECT = 11, /**< tcp connect fail */ - HCCL_E_ROCE_CONNECT = 12, /**< roce connect fail */ - HCCL_E_TCP_TRANSFER = 13, /**< tcp transfer fail */ - HCCL_E_ROCE_TRANSFER = 14, /**< roce transfer fail */ - HCCL_E_RUNTIME = 15, /**< call runtime api fail */ - HCCL_E_DRV = 16, /**< call driver api fail */ - HCCL_E_PROFILING = 17, /**< call profiling api fail */ - HCCL_E_CCE = 18, /**< call cce api fail */ - HCCL_E_NETWORK = 19, /**< call network api fail */ - HCCL_E_RESERVED /**< reserved */ -} HcclResult; - -/** - * @brief handle to HCCL communicator - */ -typedef void *HcclComm; - -/** - * @brief HCCL Reduction opperation - */ -typedef enum { - HCCL_REDUCE_SUM = 0, /**< sum */ - HCCL_REDUCE_PROD = 1, /**< prod */ - HCCL_REDUCE_MAX = 2, /**< max */ - HCCL_REDUCE_MIN = 3, /**< min */ - HCCL_REDUCE_RESERVED /**< reserved */ -} HcclReduceOp; - -/** - * @brief HCCL data type - */ -typedef enum { - HCCL_DATA_TYPE_INT8 = 0, /**< int8 */ - HCCL_DATA_TYPE_INT16 = 1, /**< int16 */ - HCCL_DATA_TYPE_INT32 = 2, /**< int32 */ - HCCL_DATA_TYPE_FP16 = 3, /**< fp16 */ - HCCL_DATA_TYPE_FP32 = 4, /**< fp32 */ - HCCL_DATA_TYPE_INT64 = 5, /**< int64 */ - HCCL_DATA_TYPE_UINT64 = 6, /**< uint64 */ - HCCL_DATA_TYPE_RESERVED /**< reserved */ -} HcclDataType; - -const uint32_t HCCL_ROOT_INFO_BYTES = 4108; // 4108: root info length - -/** - * @brief HCCL root info - */ -typedef struct HcclRootInfoDef { - char internal[HCCL_ROOT_INFO_BYTES]; -} HcclRootInfo; - -#ifdef __cplusplus -} -#endif -#endif - diff --git a/inc/graphengine/inc/external/ge/ge_api.h b/inc/graphengine/inc/external/ge/ge_api.h deleted file mode 100644 index c2cbe7947..000000000 --- a/inc/graphengine/inc/external/ge/ge_api.h +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GE_GE_API_H_ -#define INC_EXTERNAL_GE_GE_API_H_ - -#include -#include -#include - -#include "ge/ge_api_error_codes.h" -#include "ge/ge_api_types.h" -#include "graph/graph.h" -#include "graph/tensor.h" - -namespace ge { -typedef uint32_t (*pCallBackFunc)(uint32_t graph_id, const std::map ¶ms_list); - -namespace session { -typedef uint32_t (*pCallBackFunc)(uint32_t graph_id, const std::map ¶ms_list); -} - -// Initialize GE -ATTRIBUTED_DEPRECATED(GE_FUNC_VISIBILITY Status GEInitialize(const std::map &)) -GE_FUNC_VISIBILITY Status GEInitialize(const std::map &options); - -GE_FUNC_VISIBILITY Status GEInitialize(const std::map &options); - -// Finalize GE, release all resources -GE_FUNC_VISIBILITY Status GEFinalize(); - -GE_FUNC_VISIBILITY std::string GEGetErrorMsg(); - -GE_FUNC_VISIBILITY std::string GEGetWarningMsg(); - -class GE_FUNC_VISIBILITY Session { - public: - ATTRIBUTED_DEPRECATED(Session(const std::map &)) - explicit Session(const std::map &options); - - explicit Session(const std::map &options); - - ~Session(); - - /// - /// @ingroup client - /// @brief add a graph with a specific graphId - /// @param [in] graphId graph id - /// @return Status result of function - /// - Status AddGraph(uint32_t graphId, const Graph &graph); - - /// - /// @ingroup client - /// @brief add a graph with a specific graphId and graphOptions - /// @param [in] graphId graph id - /// @param [in] graph the graph - /// @param [in] options graph options - /// @return Status result of function - /// - ATTRIBUTED_DEPRECATED(Status AddGraph(uint32_t, const Graph &, const std::map &)) - Status AddGraph(uint32_t graphId, const Graph &graph, const std::map &options); - - /// - /// @ingroup client - /// @brief add a graph with a specific graphId and graphOptions - /// @param [in] graphId graph id - /// @param [in] graph the graph - /// @param [in] options graph options - /// @return Status result of function - /// - Status AddGraph(uint32_t graphId, const Graph &graph, const std::map &options); - - /// - /// @ingroup client - /// @brief add a copy graph with a specific graphId - /// @param [in] graphId graph id - /// @param [in] graph the graph - /// @return Status result of function - /// - Status AddGraphWithCopy(uint32_t graph_id, const Graph &graph); - - /// - /// @ingroup client - /// @brief add a copy graph with a specific graphId and graphOptions - /// @param [in] graphId graph id - /// @param [in] graph the graph - /// @param [in] options graph options - /// @return Status result of function - /// - Status AddGraphWithCopy(uint32_t graph_id, const Graph &graph, const std::map &options); - - /// - /// @ingroup ge_graph - /// @brief remove a graph of the session with specific session id - /// @param [in] graphId graph id - /// @return Status result of function - /// - Status RemoveGraph(uint32_t graphId); - - /// - /// @ingroup ge_graph - /// @brief run a graph of the session with specific session id - /// @param [in] graphId graph id - /// @param [in] inputs input data - /// @param [out] outputs output data - /// @return Status result of function - /// - Status RunGraph(uint32_t graphId, const std::vector &inputs, std::vector &outputs); - - /// - /// @ingroup ge_graph - /// @brief run a graph of the session with specific session id and specific stream asynchronously - /// @param [in] graph_id graph id - /// @param [in] stream specific stream - /// @param [in] inputs input data - /// @param [out] outputs output data - /// @return Status result of function - /// - Status RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const std::vector &inputs, - std::vector &outputs); - - /// - /// @ingroup ge_graph - /// @brief build graph in the session with specific session id - /// @param [in] graphId: graph id - /// @param [in] inputs: input data - /// @return Status result of function - /// - Status BuildGraph(uint32_t graphId, const std::vector &inputs); - - Status BuildGraph(uint32_t graphId, const std::vector &inputs); /*lint !e148*/ - - /// - /// @ingroup ge_graph - /// @brief run graph in the session with specific session id asynchronously - /// @param [in] graphId: graph id - /// @param [in] inputs: input data - /// @param [out] callback: callback while runing graph has been finished. - /// The callback function will not be checked. - /// Please ensure that the implementation of the function is trusted. - /// @return Status result of function - /// - Status RunGraphAsync(uint32_t graphId, const std::vector &inputs, RunAsyncCallback callback); - - /// - /// @ingroup ge_graph - /// @brief get variables in the session with specific session id - /// @param [in] var_names: variable names - /// @param [out] var_values: variable values - /// @return Status result of function - /// - ATTRIBUTED_DEPRECATED(Status GetVariables(const std::vector &, std::vector &)) - Status GetVariables(const std::vector &var_names, std::vector &var_values); - - /// - /// @ingroup ge_graph - /// @brief get variables in the session with specific session id - /// @param [in] var_names: variable names - /// @param [out] var_values: variable values - /// @return Status result of function - /// - Status GetVariables(const std::vector &var_names, std::vector &var_values); - - /// - /// @ingroup ge_graph - /// @brief register callback func with specific summary or checkpoint by users - /// @param [in] key: func key - /// @param [in] callback: callback specific summary or checkpoint. - /// The callback function will not be checked. - /// Please ensure that the implementation of the function is trusted. - /// @return Status result of function - /// - ATTRIBUTED_DEPRECATED(Status RegisterCallBackFunc(const char *, const session::pCallBackFunc &)) - Status RegisterCallBackFunc(const std::string &key, const pCallBackFunc &callback); - - Status RegisterCallBackFunc(const char *key, const session::pCallBackFunc &callback); - - bool IsGraphNeedRebuild(uint32_t graphId); - - private: - uint64_t sessionId_; -}; -} // namespace ge - -#endif // INC_EXTERNAL_GE_GE_API_H_ diff --git a/inc/graphengine/inc/external/ge/ge_api_error_codes.h b/inc/graphengine/inc/external/ge/ge_api_error_codes.h deleted file mode 100644 index d0d7981e7..000000000 --- a/inc/graphengine/inc/external/ge/ge_api_error_codes.h +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GE_GE_API_ERROR_CODES_H_ -#define INC_EXTERNAL_GE_GE_API_ERROR_CODES_H_ - -#include -#include -#include "ge_error_codes.h" - -namespace ge { -#ifdef __GNUC__ -#define ATTRIBUTED_DEPRECATED(replacement) __attribute__((deprecated("Please use " #replacement " instead."))) -#else -#define ATTRIBUTED_DEPRECATED(replacement) __declspec(deprecated("Please use " #replacement " instead.")) -#endif - -class GE_FUNC_VISIBILITY StatusFactory { - public: - static StatusFactory *Instance() { - static StatusFactory instance; - return &instance; - } - - void RegisterErrorNo(uint32_t err, const std::string &desc) { - // Avoid repeated addition - if (err_desc_.find(err) != err_desc_.end()) { - return; - } - err_desc_[err] = desc; - } - - void RegisterErrorNo(uint32_t err, const char *desc) { - if (desc == nullptr) { - return; - } - std::string error_desc = desc; - if (err_desc_.find(err) != err_desc_.end()) { - return; - } - err_desc_[err] = error_desc; - } - - std::string GetErrDesc(uint32_t err) { - auto iter_find = err_desc_.find(err); - if (iter_find == err_desc_.end()) { - return ""; - } - return iter_find->second; - } - - protected: - StatusFactory() {} - ~StatusFactory() {} - - private: - std::map err_desc_; -}; - -class GE_FUNC_VISIBILITY ErrorNoRegisterar { - public: - ErrorNoRegisterar(uint32_t err, const std::string &desc) { StatusFactory::Instance()->RegisterErrorNo(err, desc); } - ErrorNoRegisterar(uint32_t err, const char *desc) { StatusFactory::Instance()->RegisterErrorNo(err, desc); } - ~ErrorNoRegisterar() {} -}; - -// Code compose(4 byte), runtime: 2 bit, type: 2 bit, level: 3 bit, sysid: 8 bit, modid: 5 bit, value: 12 bit -#define GE_ERRORNO(runtime, type, level, sysid, modid, name, value, desc) \ - constexpr ge::Status name = \ - ((0xFF & (static_cast(runtime))) << 30) | ((0xFF & (static_cast(type))) << 28) | \ - ((0xFF & (static_cast(level))) << 25) | ((0xFF & (static_cast(sysid))) << 17) | \ - ((0xFF & (static_cast(modid))) << 12) | (0x0FFF & (static_cast(value))); \ - const ErrorNoRegisterar g_##name##_errorno(name, desc); - -#define GE_ERRORNO_EXTERNAL(name, desc) const ErrorNoRegisterar g_##name##_errorno(name, desc); - -using Status = uint32_t; - -// General error code -GE_ERRORNO(0, 0, 0, 0, 0, SUCCESS, 0, "success"); -GE_ERRORNO(0b11, 0b11, 0b111, 0xFF, 0b11111, FAILED, 0xFFF, "failed"); /*lint !e401*/ - -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_PARAM_INVALID, "Parameter invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_NOT_INIT, "GE executor not initialized yet."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Model file path invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "Model id invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "Data size of model invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "Model addr invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Queue id of model invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED, "The model loaded repeatedly."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Dynamic input size invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "Dynamic batch size invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_BATCH_EMPTY, "AIPP batch parameter empty."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_NOT_EXIST, "AIPP parameter not exist."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_MODE_INVALID, "AIPP mode invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "Task type invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, "Kernel type invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_PLGMGR_PATH_INVALID, "Plugin path is invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_FORMAT_INVALID, "Format is invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_SHAPE_INVALID, "Shape is invalid."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DATATYPE_INVALID, "Datatype is invalid."); - -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_MEMORY_ALLOCATION, "Memory allocation error."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate memory."); - -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_INTERNAL_ERROR, "Internal error."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_LOAD_MODEL, "Load model error."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED, "Failed to load model partition."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED, "Failed to load weight partition."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "Failed to load task partition."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED, "Failed to load op kernel partition."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA, "Failed to release the model data."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_COMMAND_HANDLE, "Command handle error."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_GET_TENSOR_INFO, "Get tensor info error."); -GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_UNLOAD_MODEL, "Load model error."); - -} // namespace ge - -#endif // INC_EXTERNAL_GE_GE_API_ERROR_CODES_H_ diff --git a/inc/graphengine/inc/external/ge/ge_api_types.h b/inc/graphengine/inc/external/ge/ge_api_types.h deleted file mode 100644 index c2c9e6f87..000000000 --- a/inc/graphengine/inc/external/ge/ge_api_types.h +++ /dev/null @@ -1,512 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GE_GE_API_TYPES_H_ -#define INC_EXTERNAL_GE_GE_API_TYPES_H_ - -#include -#include -#include -#include -#include -#include -#include "graph/tensor.h" - -namespace ge { -// Option key: graph run mode -const char *const OPTION_GRAPH_RUN_MODE = "ge.graphRunMode"; - -// Option key: topo sorting mode -const char *const OPTION_TOPO_SORTING_MODE = "ge.topoSortingMode"; -// Option key: ome init -const char *const OPTION_EXEC_SESSION_ID = "ge.exec.sessionId"; -const char *const OPTION_EXEC_DEVICE_ID = "ge.exec.deviceId"; -const char *const OPTION_EXEC_JOB_ID = "ge.exec.jobId"; -const char *const OPTION_EXEC_IS_USEHCOM = "ge.exec.isUseHcom"; -const char *const OPTION_EXEC_IS_USEHVD = "ge.exec.isUseHvd"; -const char *const OPTION_EXEC_RANK_ID = "ge.exec.rankId"; -const char *const OPTION_EXEC_POD_NAME = "ge.exec.podName"; -const char *const OPTION_EXEC_DEPLOY_MODE = "ge.exec.deployMode"; -const char *const OPTION_EXEC_RANK_TABLE_FILE = "ge.exec.rankTableFile"; -const char *const GE_AICPU_FLAG = "ge.aicpuFlag"; -const char *const OPTION_EXEC_EXTERN_PLUGIN_PATH = "ge.soLoadPath"; - -const std::string OPTION_EXEC_CM_CHIEF_IP = "ge.cmChiefIp"; -const std::string OPTION_EXEC_CM_CHIEF_PORT = "ge.cmChiefPort"; -const std::string OPTION_EXEC_CM_CHIEF_DEVICE = "ge.cmChiefWorkerDevice"; -const std::string OPTION_EXEC_CM_WORKER_IP = "ge.cmWorkerIp"; -const std::string OPTION_EXEC_CM_WORKER_SIZE = "ge.cmWorkerSize"; - -// Dump flag and para -const char *const OPTION_EXEC_ENABLE_DUMP = "ge.exec.enableDump"; -const char *const OPTION_EXEC_DUMP_PATH = "ge.exec.dumpPath"; -const char *const OPTION_EXEC_DUMP_STEP = "ge.exec.dumpStep"; -const char *const OPTION_EXEC_DUMP_MODE = "ge.exec.dumpMode"; -const char *const OPTION_EXEC_ENABLE_DUMP_DEBUG = "ge.exec.enableDumpDebug"; -const char *const OPTION_EXEC_DUMP_DEBUG_MODE = "ge.exec.dumpDebugMode"; -const char *const OPTION_EXEC_ENABLE_INCRE_BUILD = "ge.exec.enableIncreBuild"; -const char *const OPTION_EXEC_INCRE_BUILD_CACHE_PATH = "ge.exec.increBuildCachePath"; -const char *const OPTION_EXEC_ENABLE_EXCEPTION_DUMP = "ge.exec.enable_exception_dump"; -const char *const OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES = "ge.exec.enableScopeFusionPasses"; -const char *const OPTION_EXEC_PROFILING_FPPONIT_OPTIONS = "ge.exec.profilingFpPointOptions"; -const char *const OPTION_EXEC_PROFILING_BPPONIT_OPTIONS = "ge.exec.profilingBpPointOptions"; -// profiling flag -const char *const OPTION_EXEC_PROFILING_MODE = "ge.exec.profilingMode"; -const char *const OPTION_EXEC_PROFILING_OPTIONS = "ge.exec.profilingOptions"; -// Hccl flag, if ge.exec.hcclFlag =1, it means load plugin for opskernel, else:ge.exec.hcclFlag =0 -const char *const OPTION_EXEC_HCCL_FLAG = "ge.exec.hcclFlag"; -const char *const OPTION_EXEC_ATOMIC_FLAG = "ge.exec.enable_atomic"; -const char *const OPTION_EXEC_DISABLE_REUSED_MEMORY = "ge.exec.disableReuseMemory"; -const char *const OPTION_EXEC_ENABLE_TAILING_OPTIMIZATION = "ge.exec.isTailingOptimization"; -// Dynamic input flag. ge.exec.dynamicInput=1, means enable dynaimc input, -// ge.exec.dynamicGraphExecuteMode, dynamic_execute[default] -const char *const OPTION_EXEC_DYNAMIC_INPUT = "ge.exec.dynamicInput"; -const char *const OPTION_EXEC_DYNAMIC_EXECUTE_MODE = "ge.exec.dynamicGraphExecuteMode"; -const char *const OPTION_EXEC_DATA_INPUTS_SHAPE_RANGE = "ge.exec.dataInputsShapeRange"; -const char *const OPTION_EXEC_GRAPH_EXEC_TIMEOUT = "ge.exec.graphExecTimeout"; -const char *const OPTION_EXEC_LOGICAL_DEVICE_CLUSTER_DEPLOY_MODE = "ge.exec.logicalDeviceClusterDeployMode"; -const char *const OPTION_EXEC_LOGICAL_DEVICE_ID = "ge.exec.logicalDeviceId"; -const char *const OPTION_EXEC_MODEL_DEPLOY_MODE = "ge.exec.modelDeployMode"; -const char *const OPTION_EXEC_MODEL_DEPLOY_DEVICELIST = "ge.exec.modelDeployDevicelist"; - -// Option key: memory init -const char *const GRAPH_MEMORY_MAX_SIZE = "ge.graphMemoryMaxSize"; -const char *const VARIABLE_MEMORY_MAX_SIZE = "ge.variableMemoryMaxSize"; -namespace configure_option { -const char *const STREAM_NUM = "ge.streamNum"; -const char *const HEAD_STREAM = "ge.headStream"; -const char *const PERF_LEVEL = "ge.perfLevel"; -const char *const ENCRYPT_MODE = "ge.encryptMode"; -const char *const EK_FILE = "ge.ekFile"; -const char *const CERT_FILE = "ge.certFile"; -const char *const HW_KEY_FILE = "ge.hwKeyFile"; -const char *const PRIVATE_KEY_FILE = "ge.privateKeyFile"; -const char *const FRAMEWORK_TYPE = "ge.frameworkType"; -const char *const CALIBRATION_CONF_FILE = "ge.calibrationConfFile"; -const char *const INSERT_OP_FILE = "ge.insertOpFile"; -const char *const OUTPUT_NODE_NAME = "ge.outputNodeName"; -const char *const COMPRESS_FLAG = "ge.compressFlag"; -const char *const PRECISION_MODE = "ge.exec.precision_mode"; -const char *const PRECISION_MODE_V2 = "ge.exec.precision_mode_v2"; -const char *const SINGLE_OP_FLAG = "ge.exec.single_op"; -const char *const TRAIN_FLAG = "ge.trainFlag"; -const char *const RUN_FLAG = "ge.runFlag"; -const char *const LOCAL_FMKOP_FLAG = "ge.enabledLocalFmkop"; -const char *const TBE_PLUGIN_PATH_FLAG = "ge.TBE_plugin_path"; -const char *const DDK_VERSION_FLAG = "ge.DDK_version"; -const char *const GE_FE_FLAG = "ge.feFlag"; -const char *const STREAM_MAX_PARALLEL_NUM = "ge.streamMaxParallelNum"; -const char *const OUTPUT_DATATYPE = "ge.outputDatatype"; -const char *const OP_SELECT_IMPL_MODE = "ge.opSelectImplmode"; -const char *const OPTYPELIST_FOR_IMPLMODE = "ge.optypelistForImplmode"; -const char *const HCOM_PARALLEL = "ge.hcomParallel"; -const char *const SOC_VERSION = "ge.socVersion"; -const char *const CORE_TYPE = "ge.engineType"; -const char *const AICORE_NUM = "ge.aicoreNum"; -const char *const L1_FUSION = "ge.l1Fusion"; -const char *const BUFFER_OPTIMIZE = "ge.bufferOptimize"; -const char *const ENABLE_SMALL_CHANNEL = "ge.enableSmallChannel"; -const char *const ENABLE_COMPRESS_WEIGHT = "ge.enableCompressWeight"; -const char *const FUSION_SWITCH_FILE = "ge.fusionSwitchFile"; -const char *const SAVE_ORIGINAL_MODEL = "ge.saveOriginalModel"; -const char *const ORIGINAL_MODEL_FILE = "ge.originalModelFile"; -const char *const INPUT_FP16_NODES = "ge.INPUT_NODES_SET_FP16"; -const char *const OP_DEBUG_LEVEL = "ge.opDebugLevel"; -const char *const PERFORMANCE_MODE = "ge.performance_mode"; -const char *const SHAPE_GENERALIZED_BUILD_MODE = "ge.shape_generalized_build_mode"; -const char *const MODIFY_MIXLIST = "ge.exec.modify_mixlist"; -const char *const OP_PRECISION_MODE = "ge.exec.op_precision_mode"; -const char *const HCCL_TIMEOUT = "ge.exec.hcclExecuteTimeOut"; -const char *const OP_WAIT_TIMEOUT = "ge.exec.opWaitTimeout"; -const char *const OP_EXECUTE_TIMEOUT = "ge.exec.opExecuteTimeout"; -const char *const ATOMIC_CLEAN_POLICY = "ge.exec.atomicCleanPolicy"; -const char *const MEMORY_OPTIMIZATION_POLICY = "ge.exec.memoryOptimizationPolicy"; -} // namespace configure_option -// Configure stream num by Session constructor options param, -// its value should be int32_t type, default value is "1" -const std::string STREAM_NUM = "ge.streamNum"; - -// Configure add head stream to model. -// its value should be "0" or "1", default value is "0" -const std::string HEAD_STREAM = "ge.headStream"; - -// Configure perf level by Session constructor options param, -// its value please see enum PerfLevel, default value is "4" -const std::string PERF_LEVEL = "ge.perfLevel"; - -// Configure encrypt mode by Session constructor options param, -// its value should be int32_t type, default value is "-1" -const std::string ENCRYPT_MODE = "ge.encryptMode"; - -// configure ek file by Session constructor options param, -// its value should be file path, default value is "" -const std::string EK_FILE = "ge.ekFile"; - -// Configure cert file by Session constructor options param, -// its value should be file path, default value is "" -const std::string CERT_FILE = "ge.certFile"; - -// Configure hw key file by Session constructor options param, -// its value should be file path, default value is "" -const std::string HW_KEY_FILE = "ge.hwKeyFile"; - -// Configure private file by Session constructor options param, -// its value should be file path, default value is "" -const std::string PRIVATE_KEY_FILE = "ge.privateKeyFile"; - -// Configure framework type by Session constructor options param, -// its value please see enum FrameworkType, default value is "3" -const std::string FRAMEWORK_TYPE = "ge.frameworkType"; - -// Configure calibration info file by Session constructor options param, -// its value should be file path, default value is "" -const std::string CALIBRATION_CONF_FILE = "ge.calibrationConfFile"; - -// Configure insert op info file by Session constructor options param, -// its value should be file path, default value is "" -const std::string INSERT_OP_FILE = "ge.insertOpFile"; - -// Configure output node name by Session constructor options param, -// its value should be std::string type, default value is "" -const std::string OUTPUT_NODE_NAME = "ge.outputNodeName"; - -// Configure weight compress flag by Session constructor options param, -// its value should be "0" or "1", default value is "0" -const std::string COMPRESS_FLAG = "ge.compressFlag"; - -const std::string PRECISION_MODE = "ge.exec.precision_mode"; - -const std::string PRECISION_MODE_V2 = "ge.exec.precision_mode_v2"; - -const std::string TUNE_DEVICE_IDS = "ge.exec.tuneDeviceIds"; - -// Configure single op flag for FE -// its value should be "0" or "1", default value is "0" -const std::string SINGLE_OP_FLAG = "ge.exec.single_op"; - -// Configure train flag by Session constructor options param, -// its value should be "0" or "1", default value is "0" -const std::string TRAIN_FLAG = "ge.trainFlag"; - -// Configure run flag by Session constructor options param, -// its value should be "0" or "1", default value is "0" -const std::string RUN_FLAG = "ge.runFlag"; - -// Configure run flag by Session constructor options param, -// its value should be "0" or "1", default value is "0" -// this option is to enable local framework op feature -const std::string LOCAL_FMKOP_FLAG = "ge.enabledLocalFmkop"; - -// Configure run flag by Session constructor options param, -// its value should be a path -// this option is to obtain the TBE op plugin path -const std::string TBE_PLUGIN_PATH_FLAG = "ge.TBE_plugin_path"; - -// Configure run flag by Session constructor options param, -// its value should be a path -// this option is to obtain the DDK Version info -const std::string DDK_VERSION_FLAG = "ge.DDK_version"; - -// Configure run flag by Session constructor options param, -// its value should be a path -// this option is to obtain fe flag -const std::string GE_FE_FLAG = "ge.feFlag"; - -// Configure stream max parallel num only by Session constructor options param, -// its value should be stream:int, such as "DNN_V100:2,DNN_HCCL:3", -// default value is "1", such as "DNN_V100:1,DNN_HCCL:1" -// this option is to obtain stream max parallel num -const std::string STREAM_MAX_PARALLEL_NUM = "ge.streamMaxParallelNum"; - -// congigure outputDatatype to setting net output type -const std::string OUTPUT_DATATYPE = "ge.outputDatatype"; - -// congigure opSelectImplmode to setting op select implmode -const std::string OP_SELECT_IMPL_MODE = "ge.opSelectImplmode"; - -// congigure optypelist_for_implmode to setting which op use implmode -const std::string OPTYPELIST_FOR_IMPLMODE = "ge.optypelistForImplmode"; - -// configure whether to enable hcom parallel by session constructor options param, -// its value should be "0" or "1", default value is "0" -const std::string HCOM_PARALLEL = "ge.hcomParallel"; - -// configure whether to use dynamic batch size -const char *const kDynamicBatchSize = "ge.dynamicBatchSize"; - -// configure threshold of fusion data size for communication op -const std::string FUSION_TENSOR_SIZE = "ge.fusionTensorSize"; - -const std::string INPUT_SHAPE = "ge.inputShape"; - -const std::string DYNAMIC_NODE_TYPE = "ge.dynamicNodeType"; -// configure whether to use dynamic image size -const char *const kDynamicImageSize = "ge.dynamicImageSize"; - -// Configure whether to use dynamic dims -const char *const kDynamicDims = "ge.dynamicDims"; - -// Configure soc version , example: "Ascend310" -const std::string SOC_VERSION = "ge.socVersion"; - -// Configure core type "VectorEngine", default value is "AIcoreEngine" -const std::string CORE_TYPE = "ge.engineType"; - -// Configure AICORE NUM -const std::string AICORE_NUM = "ge.aicoreNum"; - -// Configure L1FUSION -const std::string L1_FUSION = "ge.l1Fusion"; - -// Configure l1,l2,and others optimize option -const std::string BUFFER_OPTIMIZE = "ge.bufferOptimize"; - -// Configure Small Channel flag -const std::string ENABLE_SMALL_CHANNEL = "ge.enableSmallChannel"; - -// Configure Compress Weight flag -const std::string ENABLE_COMPRESS_WEIGHT = "ge.enableCompressWeight"; - -// Configure fusion switch file path -const std::string FUSION_SWITCH_FILE = "ge.fusionSwitchFile"; - -// Save original model -const std::string SAVE_ORIGINAL_MODEL = "ge.saveOriginalModel"; - -// Save original model file name -const std::string ORIGINAL_MODEL_FILE = "ge.originalModelFile"; - -const char *const OPTION_GE_MAX_DUMP_FILE_NUM = "ge.maxDumpFileNum"; -const char *const OPTION_GE_MAX_DUMP_FILE_SIZE = "ge.maxDumpFileSize"; -const char *const OPTION_GE_MAX_DUMP_OP_NUM = "ge.maxDumpOpNum"; - -// Configure for print op pass -// Its value should be "0" or "1", default value is "1" -const char *const ENABLE_PRINT_OP_PASS = "ge.enablePrintOpPass"; - -// Configure operator compilation path -// Its value should be file path, default value is "./" -const char *const DEBUG_DIR = "ge.debugDir"; - -// Configure operator compiler cache path -// Its value should be file path, default value is "./" -const char *const OP_COMPILER_CACHE_DIR = "ge.op_compiler_cache_dir"; - -// Configure operator compiler cache mode -// Its value should be "disable", "enable" or "force", default value is "disable" -const char *const OP_COMPILER_CACHE_MODE = "ge.op_compiler_cache_mode"; - -// Configure whether to use single stream. -// Its value should be "true" or "false", default value is "false" -const char *const ENABLE_SINGLE_STREAM = "ge.enableSingleStream"; - -// Configure input fp16 nodes -const std::string INPUT_FP16_NODES = "ge.INPUT_NODES_SET_FP16"; - -// Configure debug level, its value should be 0(default), 1 or 2. -// 0: close debug; 1: open TBE compiler; 2: open ccec compiler -const std::string OP_DEBUG_LEVEL = "ge.opDebugLevel"; - -// Configure model bank path -const std::string MDL_BANK_PATH_FLAG = "ge.mdl_bank_path"; - -// Configure display_model_info flag -const std::string DISPLAY_MODEL_INFO = "ge.display_model_info"; - -// Configure op bank path -const std::string OP_BANK_PATH_FLAG = "ge.op_bank_path"; -const std::string OP_BANK_UPDATE_FLAG = "ge.op_bank_update"; - -// Configure for fix hcombroadcast format. -// when config model multi, broadcast format should be fixed -// 0: data multi; 1: model multi; -const std::string HCOM_MULTI_MODE = "ge.hcomMultiMode"; - -// atc and ir option -const char *const INPUT_SHAPE_RANGE = "input_shape_range"; - -// Configure express high compile performance or high execute performance -// normal: no need to compile, used saved .o files directly -// high: need to recompile, high execute performance mode -const std::string PERFORMANCE_MODE = "ge.performance_mode"; - -// For selecting the mode of shape generalization when build graph. -// shape_generalized: Shape will be generalized during graph build. -// shape_precise: Shape will not be generalized, use precise shape. -const std::string SHAPE_GENERALIZED_BUILD_MODE = "ge.shape_generalized_build_mode"; - -const std::string MODIFY_MIXLIST = "ge.exec.modify_mixlist"; - -const std::string OP_PRECISION_MODE = "ge.exec.op_precision_mode"; - -const std::string OP_WAIT_TIMEOUT = "ge.exec.opWaitTimeout"; - -const std::string OP_EXECUTE_TIMEOUT = "ge.exec.opExecuteTimeout"; - -const std::string HCCL_TIMEOUT = "ge.exec.hcclExecuteTimeOut"; - -const std::string ATOMIC_CLEAN_POLICY = "ge.exec.atomicCleanPolicy"; - -const std::string MEMORY_OPTIMIZATION_POLICY = "ge.exec.memoryOptimizationPolicy"; - -// Graph run mode -enum GraphRunMode { PREDICTION = 0, TRAIN }; -// Topo sorting mode -enum class TopoSortingMode { BFS = 0, DFS = 1 }; -// Input/Output tensor info -struct InputTensorInfo { - uint32_t data_type; // data type - std::vector dims; // shape description - void *data; // tensor data - int64_t length; // tensor length -}; - -struct OutputTensorInfo { - uint32_t data_type; // data type - std::vector dims; // shape description - std::unique_ptr data; // tensor data - int64_t length; // tensor length - OutputTensorInfo() : data_type(0), dims({}), data(nullptr), length(0) {} - OutputTensorInfo(OutputTensorInfo &&out) - : data_type(out.data_type), dims(out.dims), data(std::move(out.data)), length(out.length) {} - - OutputTensorInfo &operator=(OutputTensorInfo &&out) { - if (this != &out) { - data_type = out.data_type; - dims = out.dims; - data = std::move(out.data); - length = out.length; - } - return *this; - } - OutputTensorInfo(const OutputTensorInfo &) = delete; - OutputTensorInfo &operator=(const OutputTensorInfo &) = delete; -}; - -using Status = uint32_t; -using RunAsyncCallback = std::function &)>; - -// for ir build -namespace ir_option { -static const char *const INPUT_FORMAT = "input_format"; -static const char *const INPUT_SHAPE = "input_shape"; -static const char *const INPUT_SHAPE_RANGE = ge::INPUT_SHAPE_RANGE; -static const char *const OP_NAME_MAP = "op_name_map"; -static const char *const IS_DYNAMIC_INPUT = "is_dynamic_input"; -static const char *const IS_INPUT_ADJUST_HW_LAYOUT = "is_input_adjust_hw_layout"; -static const char *const IS_OUTPUT_ADJUST_HW_LAYOUT = "is_output_adjust_hw_layout"; -static const char *const ENABLE_SCOPE_FUSION_PASSES = "enable_scope_fusion_passes"; -static const char *const OUTPUT = "output"; -static const char *const DYNAMIC_BATCH_SIZE = kDynamicBatchSize; -static const char *const DYNAMIC_IMAGE_SIZE = kDynamicImageSize; -static const char *const DYNAMIC_DIMS = kDynamicDims; -static const char *const INSERT_OP_FILE = ge::INSERT_OP_FILE.c_str(); -static const char *const PRECISION_MODE = ge::PRECISION_MODE.c_str(); -static const char *const PRECISION_MODE_V2 = ge::PRECISION_MODE_V2.c_str(); -static const char *const TUNE_DEVICE_IDS = ge::TUNE_DEVICE_IDS.c_str(); -static const char *const EXEC_DISABLE_REUSED_MEMORY = ge::OPTION_EXEC_DISABLE_REUSED_MEMORY; -static const char *const CORE_TYPE = ge::CORE_TYPE.c_str(); -static const char *const SOC_VERSION = ge::SOC_VERSION.c_str(); -static const char *const ENABLE_SINGLE_STREAM = ge::ENABLE_SINGLE_STREAM; -static const char *const AICORE_NUM = ge::AICORE_NUM.c_str(); -static const char *const FUSION_SWITCH_FILE = ge::FUSION_SWITCH_FILE.c_str(); -static const char *const ENABLE_SMALL_CHANNEL = ge::ENABLE_SMALL_CHANNEL.c_str(); -static const char *const OP_SELECT_IMPL_MODE = ge::OP_SELECT_IMPL_MODE.c_str(); -static const char *const OUTPUT_TYPE = ge::OUTPUT_DATATYPE.c_str(); -static const char *const BUFFER_OPTIMIZE = ge::BUFFER_OPTIMIZE.c_str(); -static const char *const ENABLE_COMPRESS_WEIGHT = ge::ENABLE_COMPRESS_WEIGHT.c_str(); -static const char *const COMPRESS_WEIGHT_CONF = "compress_weight_conf"; -static const char *const OUT_NODES = ge::OUTPUT_NODE_NAME.c_str(); -static const char *const INPUT_FP16_NODES = ge::INPUT_FP16_NODES.c_str(); -static const char *const LOG_LEVEL = "log"; -static const char *const OPTYPELIST_FOR_IMPLMODE = ge::OPTYPELIST_FOR_IMPLMODE.c_str(); -static const char *const DEBUG_DIR = ge::DEBUG_DIR; -static const char *const OP_COMPILER_CACHE_DIR = ge::OP_COMPILER_CACHE_DIR; -static const char *const OP_COMPILER_CACHE_MODE = ge::OP_COMPILER_CACHE_MODE; -static const char *const MDL_BANK_PATH = ge::MDL_BANK_PATH_FLAG.c_str(); -static const char *const OP_BANK_PATH = ge::OP_BANK_PATH_FLAG.c_str(); -static const char *const OP_BANK_UPDATE = ge::OP_BANK_UPDATE_FLAG.c_str(); -static const char *const OP_DEBUG_LEVEL = ge::OP_DEBUG_LEVEL.c_str(); -static const char *const PERFORMANCE_MODE = ge::PERFORMANCE_MODE.c_str(); -static const char *const SHAPE_GENERALIZED_BUILD_MODE = ge::SHAPE_GENERALIZED_BUILD_MODE.c_str(); -static const char *const MODIFY_MIXLIST = ge::MODIFY_MIXLIST.c_str(); -static const char *const OP_PRECISION_MODE = ge::OP_PRECISION_MODE.c_str(); -static const char *const HCCL_TIMEOUT = ge::HCCL_TIMEOUT.c_str(); -static const char *const OP_WAIT_TIMEOUT = ge::OP_WAIT_TIMEOUT.c_str(); -static const char *const OP_EXECUTE_TIMEOUT = ge::OP_EXECUTE_TIMEOUT.c_str(); -static const char *const ATOMIC_CLEAN_POLICY = ge::ATOMIC_CLEAN_POLICY.c_str(); - -// for interface: aclgrphBuildModel -#ifdef __GNUC__ -const std::set ir_builder_suppported_options = {INPUT_FORMAT, - INPUT_SHAPE, - INPUT_SHAPE_RANGE, - OP_NAME_MAP, - DYNAMIC_BATCH_SIZE, - DYNAMIC_IMAGE_SIZE, - DYNAMIC_DIMS, - INSERT_OP_FILE, - OP_PRECISION_MODE, - PRECISION_MODE, - PRECISION_MODE_V2, - TUNE_DEVICE_IDS, - EXEC_DISABLE_REUSED_MEMORY, - OUTPUT_TYPE, - OUT_NODES, - INPUT_FP16_NODES, - LOG_LEVEL, - OP_DEBUG_LEVEL, - DEBUG_DIR, - OP_COMPILER_CACHE_DIR, - OP_COMPILER_CACHE_MODE, - MDL_BANK_PATH, - OP_BANK_PATH, - OP_BANK_UPDATE, - PERFORMANCE_MODE, - SHAPE_GENERALIZED_BUILD_MODE, - MODIFY_MIXLIST}; - -// for interface: aclgrphParse -const std::set ir_parser_suppported_options = { - INPUT_FP16_NODES, IS_INPUT_ADJUST_HW_LAYOUT, IS_OUTPUT_ADJUST_HW_LAYOUT, OUTPUT, - OUT_NODES, ENABLE_SCOPE_FUSION_PASSES}; - -// for interface: aclgrphBuildInitialize -const std::set global_options = {CORE_TYPE, - SOC_VERSION, - BUFFER_OPTIMIZE, - ENABLE_COMPRESS_WEIGHT, - COMPRESS_WEIGHT_CONF, - PRECISION_MODE, - PRECISION_MODE_V2, - TUNE_DEVICE_IDS, - EXEC_DISABLE_REUSED_MEMORY, - ENABLE_SINGLE_STREAM, - AICORE_NUM, - FUSION_SWITCH_FILE, - ENABLE_SMALL_CHANNEL, - OP_SELECT_IMPL_MODE, - OPTYPELIST_FOR_IMPLMODE, - OP_DEBUG_LEVEL, - DEBUG_DIR, - OP_COMPILER_CACHE_DIR, - OP_COMPILER_CACHE_MODE, - MODIFY_MIXLIST}; -#endif -} // namespace ir_option -} // namespace ge - -#endif // INC_EXTERNAL_GE_GE_API_TYPES_H_ diff --git a/inc/graphengine/inc/external/ge/ge_error_codes.h b/inc/graphengine/inc/external/ge/ge_error_codes.h deleted file mode 100644 index cafc5a648..000000000 --- a/inc/graphengine/inc/external/ge/ge_error_codes.h +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GE_GE_ERROR_CODES_H_ -#define INC_EXTERNAL_GE_GE_ERROR_CODES_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include - -#ifdef __cplusplus -extern "C" { -#endif -static const uint32_t ACL_ERROR_GE_PARAM_INVALID = 145000; -static const uint32_t ACL_ERROR_GE_EXEC_NOT_INIT = 145001; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID = 145002; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ID_INVALID = 145003; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID = 145006; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID = 145007; -static const uint32_t ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID = 145008; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED = 145009; -static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID = 145011; -static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID = 145012; -static const uint32_t ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID = 145013; -static const uint32_t ACL_ERROR_GE_AIPP_BATCH_EMPTY = 145014; -static const uint32_t ACL_ERROR_GE_AIPP_NOT_EXIST = 145015; -static const uint32_t ACL_ERROR_GE_AIPP_MODE_INVALID = 145016; -static const uint32_t ACL_ERROR_GE_OP_TASK_TYPE_INVALID = 145017; -static const uint32_t ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID = 145018; -static const uint32_t ACL_ERROR_GE_PLGMGR_PATH_INVALID = 145019; -static const uint32_t ACL_ERROR_GE_FORMAT_INVALID = 145020; -static const uint32_t ACL_ERROR_GE_SHAPE_INVALID = 145021; -static const uint32_t ACL_ERROR_GE_DATATYPE_INVALID = 145022; -static const uint32_t ACL_ERROR_GE_MEMORY_ALLOCATION = 245000; -static const uint32_t ACL_ERROR_GE_MEMORY_OPERATE_FAILED = 245001; -static const uint32_t ACL_ERROR_GE_INTERNAL_ERROR = 545000; -static const uint32_t ACL_ERROR_GE_LOAD_MODEL = 545001; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED = 545002; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED = 545003; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED = 545004; -static const uint32_t ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED = 545005; -static const uint32_t ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA = 545006; -static const uint32_t ACL_ERROR_GE_COMMAND_HANDLE = 545007; -static const uint32_t ACL_ERROR_GE_GET_TENSOR_INFO = 545008; -static const uint32_t ACL_ERROR_GE_UNLOAD_MODEL = 545009; - -#ifdef __cplusplus -} // namespace ge -#endif -#endif // INC_EXTERNAL_GE_GE_ERROR_CODES_H_ diff --git a/inc/graphengine/inc/external/ge/ge_ir_build.h b/inc/graphengine/inc/external/ge/ge_ir_build.h deleted file mode 100644 index 04e059a1f..000000000 --- a/inc/graphengine/inc/external/ge/ge_ir_build.h +++ /dev/null @@ -1,159 +0,0 @@ -/** -* Copyright 2020 Huawei Technologies Co., Ltd - -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at - -* http://www.apache.org/licenses/LICENSE-2.0 - -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -#ifndef INC_EXTERNAL_GE_IR_BUILD_H_ -#define INC_EXTERNAL_GE_IR_BUILD_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include -#include -#include -#include "graph/graph.h" -#include "graph/ge_error_codes.h" - -namespace { -const int IR_MAJOR_VERSION = 1; -const int IR_MINOR_VERSION = 0; -const int IR_PATCH_VERSION = 0; -} // namespace - -namespace ge { - -struct ModelBufferData { - std::shared_ptr data = nullptr; - uint64_t length; -}; - -enum aclgrphAttrType { ATTR_TYPE_KEEP_DTYPE = 0, ATTR_TYPE_WEIGHT_COMPRESS }; - -/** - * @ingroup AscendCL - * @brief build model.Notice the model is stored in buffer - * - * @param global_options[IN] global init params for build - * @retval GRAPH_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ATTRIBUTED_DEPRECATED(GE_FUNC_VISIBILITY graphStatus aclgrphBuildInitialize(std::map &)) -GE_FUNC_VISIBILITY graphStatus aclgrphBuildInitialize(std::map global_options); - -GE_FUNC_VISIBILITY graphStatus aclgrphBuildInitialize(std::map &global_options); - -/** - * @ingroup AscendCL - * @brief build model.Notice the model is stored in buffer - * - */ -GE_FUNC_VISIBILITY void aclgrphBuildFinalize(); - -/** - * @ingroup AscendCL - * @brief build model.Notice the model is stored in buffer - * - * @param graph[IN] the graph ready to build - * @param options[IN] options used for build - * @param model[OUT] builded model - * @retval GRAPH_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ATTRIBUTED_DEPRECATED(GE_FUNC_VISIBILITY graphStatus aclgrphBuildModel(const ge::Graph &, - const std::map &, - ModelBufferData &)) -GE_FUNC_VISIBILITY graphStatus aclgrphBuildModel(const ge::Graph &graph, - const std::map &build_options, - ModelBufferData &model); - -GE_FUNC_VISIBILITY graphStatus aclgrphBuildModel(const ge::Graph &graph, - const std::map &build_options, - ModelBufferData &model); - -/** - * @ingroup AscendCL - * @brief save model buffer to file - * - * @param output_file[IN] the file path to be saved - * @param model[IN] model buffer data - * @retval GRAPH_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -ATTRIBUTED_DEPRECATED(GE_FUNC_VISIBILITY graphStatus aclgrphSaveModel(const char *, const ModelBufferData &)) -GE_FUNC_VISIBILITY graphStatus aclgrphSaveModel(const string &output_file, const ModelBufferData &model); - -GE_FUNC_VISIBILITY graphStatus aclgrphSaveModel(const char *output_file, const ModelBufferData &model); - -/** - * @ingroup AscendCL - * @brief query IR interface version - * - * @param major_version[OUT] IR interface major version - * @param minor_version[OUT] IR interface minor version - * @param patch_version[OUT] IR interface patch version - * @retval GRAPH_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -GE_FUNC_VISIBILITY graphStatus aclgrphGetIRVersion(int *major_version, int *minor_version, int *patch_version); - -/** - * @ingroup AscendCL - * @brief dump graph - * - * @param graph[IN] the graph ready to build - * @param file[IN] file path - * @param file[IN] file path string len - * @retval GRAPH_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -GE_FUNC_VISIBILITY graphStatus aclgrphDumpGraph(const ge::Graph &graph, const char *file, const size_t len); - -/** - * @ingroup AscendCL - * @brief create single op graph - * - * @param op_type[IN] the op_type - * @param inputs[IN] the inputdesc - * @param outputs[IN] the outputdesc - * @param graph[OUT] the graph - * @retval GRAPH_SUCCESS The function is successfully executed. - * @retval OtherValues Failure - */ -GE_FUNC_VISIBILITY graphStatus aclgrphGenerateForOp(const AscendString &op_type, const std::vector &inputs, - const std::vector &outputs, Graph &graph); - -/** - * @name aclgrphSetOpAttr - * @brief set attribute for operators in the configuration file - * @param graph [IN/OUT] compute graph - * @param attr_type [In] attribute type - * @param cfg_path [IN] the config file path - * @return graphStatus - */ -GE_FUNC_VISIBILITY graphStatus aclgrphSetOpAttr(Graph &graph, aclgrphAttrType attr_type, const char *cfg_path); - -}; // namespace ge -#endif // INC_EXTERNAL_GE_IR_BUILD_H_ diff --git a/inc/graphengine/inc/framework/common/aicpu_op.h b/inc/graphengine/inc/framework/common/aicpu_op.h deleted file mode 100644 index 850ceca34..000000000 --- a/inc/graphengine/inc/framework/common/aicpu_op.h +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_AICPU_OP_H_ -#define INC_FRAMEWORK_COMMON_AICPU_OP_H_ - -#include "cce/customize.h" - -#endif // INC_FRAMEWORK_COMMON_AICPU_OP_H_ diff --git a/inc/graphengine/inc/framework/common/debug/ge_log.h b/inc/graphengine/inc/framework/common/debug/ge_log.h deleted file mode 100644 index 754712f3e..000000000 --- a/inc/graphengine/inc/framework/common/debug/ge_log.h +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_DEBUG_GE_LOG_H_ -#define INC_FRAMEWORK_COMMON_DEBUG_GE_LOG_H_ - -#include - -#include "framework/common/ge_inner_error_codes.h" -#include "common/util/error_manager/error_manager.h" -#include "toolchain/slog.h" -#ifdef __GNUC__ -#include -#include -#else -#include "mmpa/mmpa_api.h" -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#define GE_MODULE_NAME static_cast(GE) - -// trace status of log -enum TraceStatus { TRACE_INIT = 0, TRACE_RUNNING, TRACE_WAITING, TRACE_STOP }; - -class GE_FUNC_VISIBILITY GeLog { - public: - static uint64_t GetTid() { -#ifdef __GNUC__ - uint64_t tid = static_cast(syscall(__NR_gettid)); -#else - uint64_t tid = static_cast(GetCurrentThreadId()); -#endif - return tid; - } -}; - -inline bool IsLogEnable(int module_name, int log_level) { - int32_t enable = CheckLogLevel(module_name, log_level); - // 1:enable, 0:disable - return (enable == 1); -} - -#define GELOGE(ERROR_CODE, fmt, ...) \ - dlog_error(GE_MODULE_NAME, "%lu %s: ErrorNo: %d(%s) %s" fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ - ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ErrorManager::GetInstance().GetLogHeader().c_str(), \ - ##__VA_ARGS__) -#define GELOGW(fmt, ...) \ - if (IsLogEnable(GE_MODULE_NAME, DLOG_WARN)) \ - dlog_warn(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) -#define GELOGI(fmt, ...) \ - if (IsLogEnable(GE_MODULE_NAME, DLOG_INFO)) \ - dlog_info(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) -#define GELOGD(fmt, ...) \ - if (IsLogEnable(GE_MODULE_NAME, DLOG_DEBUG)) \ - dlog_debug(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) - -#define GEEVENT(fmt, ...) dlog_event(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) - -#define GELOGT(VALUE, fmt, ...) \ - do { \ - TraceStatus stat = VALUE; \ - const char *const TraceStatStr[] = {"INIT", "RUNNING", "WAITING", "STOP"}; \ - int idx = static_cast(stat); \ - char *k = const_cast("status"); \ - char *v = const_cast(TraceStatStr[idx]); \ - KeyValue kv = {k, v}; \ - DlogWithKV(static_cast(GE_MODULE_NAME), DLOG_TRACE, &kv, 1, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, \ - ##__VA_ARGS__); \ - } while (0) - -#define GE_LOG_ERROR(MOD_NAME, ERROR_CODE, fmt, ...) \ - dlog_error(MOD_NAME, "%lu %s: ErrorNo: %d(%s) " fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ - ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ##__VA_ARGS__) - -// print memory when it is greater than 1KB. -#define GE_PRINT_DYNAMIC_MEMORY(FUNC, PURPOSE, SIZE) \ - do { \ - if ((SIZE) > 1024) { \ - GELOGI("MallocMemory, func=%s, size=%zu, purpose=%s", (#FUNC), static_cast(SIZE), (PURPOSE)); \ - } \ - } while (0); -#ifdef __cplusplus -} -#endif -#endif // INC_FRAMEWORK_COMMON_DEBUG_GE_LOG_H_ diff --git a/inc/graphengine/inc/framework/common/debug/log.h b/inc/graphengine/inc/framework/common/debug/log.h deleted file mode 100644 index f06faa1b9..000000000 --- a/inc/graphengine/inc/framework/common/debug/log.h +++ /dev/null @@ -1,294 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_DEBUG_LOG_H_ -#define INC_FRAMEWORK_COMMON_DEBUG_LOG_H_ - -#include -#include -#include - -#include "runtime/rt.h" -#include "common/string_util.h" -#include "common/util.h" -#include "common/util/error_manager/error_manager.h" -#include "framework/common/debug/ge_log.h" -#include "ge/ge_api_error_codes.h" - -#if !defined(__ANDROID__) && !defined(ANDROID) -#define DOMI_LOGE(fmt, ...) GE_LOG_ERROR(GE_MODULE_NAME, ge::FAILED, fmt, ##__VA_ARGS__) -#else -#include -#if defined(BUILD_VERSION_PERF) -#define DOMI_LOGE(fmt, ...) -#else -// The Android system has strict log control. Do not modify the log. -#define DOMI_LOGE(fmt, ...) \ - __android_log_print(ANDROID_LOG_ERROR, "NPU_FMK", "%s %s(%d)::" #fmt, __FILE__, __FUNCTION__, __LINE__, ##__VA_ARGS__) -#endif -#endif - -// ge marco -#define GE_LOGI_IF(condition, ...) \ - if ((condition)) { \ - GELOGI(__VA_ARGS__); \ - } - -#define GE_LOGW_IF(condition, ...) \ - if ((condition)) { \ - GELOGW(__VA_ARGS__); \ - } - -#define GE_LOGE_IF(condition, ...) \ - if ((condition)) { \ - DOMI_LOGE(__VA_ARGS__); \ - } - -// If expr is not SUCCESS, print the log and return the same value -#define GE_CHK_STATUS_RET(expr, ...) \ - do { \ - const ge::Status _status = (expr); \ - if (_status != ge::SUCCESS) { \ - DOMI_LOGE(__VA_ARGS__); \ - return _status; \ - } \ - } while (0); - -// If expr is not SUCCESS, print the log and do not execute return -#define GE_CHK_STATUS(expr, ...) \ - do { \ - const ge::Status _status = (expr); \ - if (_status != ge::SUCCESS) { \ - DOMI_LOGE(__VA_ARGS__); \ - } \ - } while (0); - -// If expr is not SUCCESS, return the same value -#define GE_CHK_STATUS_RET_NOLOG(expr) \ - do { \ - const ge::Status _status = (expr); \ - if (_status != ge::SUCCESS) { \ - return _status; \ - } \ - } while (0); - -// If expr is not GRAPH_SUCCESS, print the log and return FAILED -#define GE_CHK_GRAPH_STATUS_RET(expr, ...) \ - do { \ - if ((expr) != ge::GRAPH_SUCCESS) { \ - REPORT_CALL_ERROR("E19999", "Operator graph failed"); \ - DOMI_LOGE(__VA_ARGS__); \ - return FAILED; \ - } \ - } while (0); - -// If expr is not SUCCESS, print the log and execute a custom statement -#define GE_CHK_STATUS_EXEC(expr, exec_expr, ...) \ - do { \ - const ge::Status _status = (expr); \ - GE_CHK_BOOL_EXEC(_status == SUCCESS, exec_expr, __VA_ARGS__); \ - } while (0); - -// If expr is not true, print the log and return the specified status -#define GE_CHK_BOOL_RET_STATUS(expr, _status, ...) \ - do { \ - bool b = (expr); \ - if (!b) { \ - REPORT_INNER_ERROR("E19999", __VA_ARGS__); \ - GELOGE(_status, __VA_ARGS__); \ - return _status; \ - } \ - } while (0); - -// If expr is not true, print the log and return the specified status -#define GE_CHK_BOOL_RET_STATUS_NOLOG(expr, _status, ...) \ - do { \ - bool b = (expr); \ - if (!b) { \ - return _status; \ - } \ - } while (0); - -// If expr is not true, print the log and execute a custom statement -#define GE_CHK_BOOL_EXEC(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (!b) { \ - DOMI_LOGE(__VA_ARGS__); \ - exec_expr; \ - } \ - } - -// If expr is not true, print the log and execute a custom statement -#define GE_CHK_BOOL_EXEC_WARN(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (!b) { \ - GELOGW(__VA_ARGS__); \ - exec_expr; \ - } \ - } -// If expr is not true, print the log and execute a custom statement -#define GE_CHK_BOOL_EXEC_INFO(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (!b) { \ - GELOGI(__VA_ARGS__); \ - exec_expr; \ - } \ - } - -// If expr is not true, print the log and execute a custom statement -#define GE_CHK_BOOL_TRUE_EXEC_INFO(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (b) { \ - GELOGI(__VA_ARGS__); \ - exec_expr; \ - } \ - } - -// If expr is true, print logs and execute custom statements -#define GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (b) { \ - DOMI_LOGE(__VA_ARGS__); \ - exec_expr; \ - } \ - } -// If expr is true, print the Information log and execute a custom statement -#define GE_CHK_TRUE_EXEC_INFO(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (b) { \ - GELOGI(__VA_ARGS__); \ - exec_expr; \ - } \ - } - -// If expr is not SUCCESS, print the log and execute the expression + return -#define GE_CHK_BOOL_TRUE_RET_VOID(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (b) { \ - DOMI_LOGE(__VA_ARGS__); \ - exec_expr; \ - return; \ - } \ - } - -// If expr is not SUCCESS, print the log and execute the expression + return _status -#define GE_CHK_BOOL_TRUE_EXEC_RET_STATUS(expr, _status, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (b) { \ - REPORT_INNER_ERROR("E19999", __VA_ARGS__); \ - DOMI_LOGE(__VA_ARGS__); \ - exec_expr; \ - return _status; \ - } \ - } - -// If expr is not true, execute a custom statement -#define GE_CHK_BOOL_EXEC_NOLOG(expr, exec_expr) \ - { \ - bool b = (expr); \ - if (!b) { \ - exec_expr; \ - } \ - } - -// -----------------runtime related macro definitions------------------------------- -// If expr is not RT_ERROR_NONE, print the log -#define GE_CHK_RT(expr) \ - do { \ - rtError_t _rt_ret = (expr); \ - if (_rt_ret != RT_ERROR_NONE) { \ - DOMI_LOGE("Call rt api failed, ret: 0x%X", _rt_ret); \ - } \ - } while (0); - -// If expr is not RT_ERROR_NONE, print the log and execute the exec_expr expression -#define GE_CHK_RT_EXEC(expr, exec_expr) \ - { \ - rtError_t _rt_ret = (expr); \ - if (_rt_ret != RT_ERROR_NONE) { \ - DOMI_LOGE("Call rt api failed, ret: 0x%X", _rt_ret); \ - exec_expr; \ - } \ - } - -// If expr is not RT_ERROR_NONE, print the log and return -#define GE_CHK_RT_RET(expr) \ - do { \ - rtError_t _rt_ret = (expr); \ - if (_rt_ret != RT_ERROR_NONE) { \ - REPORT_CALL_ERROR("E19999", "Call %s fail, ret: 0x%X", #expr, _rt_ret); \ - DOMI_LOGE("Call rt api failed, ret: 0x%X", _rt_ret); \ - return RT_ERROR_TO_GE_STATUS(_rt_ret); \ - } \ - } while (0); - -// If expr is true, execute exec_expr without printing logs -#define GE_IF_BOOL_EXEC(expr, exec_expr) \ - { \ - if (expr) { \ - exec_expr; \ - } \ - } - -// If make_shared is abnormal, print the log and execute the statement -#define GE_MAKE_SHARED(exec_expr0, exec_expr1) \ - try { \ - exec_expr0; \ - } catch (const std::bad_alloc &) { \ - DOMI_LOGE("Make shared failed"); \ - exec_expr1; \ - } - -#define GE_ERRORLOG_AND_ERRORMSG(_status, errormsg) \ - { \ - GELOGE(_status, "[Check][InnerData]%s", errormsg); \ - REPORT_INNER_ERROR("E19999", "%s", errormsg); \ - } - -#define GE_WARNINGLOG_AND_ERRORMSG(errormsg) \ - { \ - GELOGW("%s", errormsg); \ - ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {errormsg}); \ - } - -#define GE_CHK_LOG_AND_ERRORMSG(expr, _status, errormsg) \ - do { \ - bool b = (expr); \ - if (!b) { \ - GELOGE(_status, "%s", errormsg); \ - ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {errormsg}); \ - return _status; \ - } \ - } while (0) - -template -GE_FUNC_VISIBILITY std::string FmtToStr(const T &t) { - std::string fmt; - std::stringstream st; - st << "[" << t << "]"; - fmt = st.str(); - return fmt; -} - -#endif // INC_FRAMEWORK_COMMON_DEBUG_LOG_H_ diff --git a/inc/graphengine/inc/framework/common/fmk_error_codes.h b/inc/graphengine/inc/framework/common/fmk_error_codes.h deleted file mode 100644 index e910e3463..000000000 --- a/inc/graphengine/inc/framework/common/fmk_error_codes.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_FMK_ERROR_CODES_H_ -#define INC_FRAMEWORK_COMMON_FMK_ERROR_CODES_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include -#include - -#include "framework/common/fmk_types.h" -#include "register/register_error_codes.h" - -// Each module uses the following four macros to define error codes: -#define DECLARE_ERRORNO_OMG(name, value) DECLARE_ERRORNO(SYSID_FWK, MODID_OMG, name, value) -#define DECLARE_ERRORNO_OME(name, value) DECLARE_ERRORNO(SYSID_FWK, MODID_OME, name, value) -#define DECLARE_ERRORNO_CALIBRATION(name, value) DECLARE_ERRORNO(SYSID_FWK, MODID_CALIBRATION, name, value) - -#define DEF_ERRORNO(name, desc) const ErrorNoRegisterar g_##name##_errorno(name, desc); - -// Interface for Obtaining Error Code Description -#define GET_ERRORNO_STR(value) domi::StatusFactory::Instance()->GetErrDesc(value) - -const int MODID_OMG = 1; // OMG module ID -const int MODID_OME = 2; // OME module ID -const int MODID_CALIBRATION = 3; // Calibration module ID - -namespace domi { -class GE_FUNC_VISIBILITY StatusFactory { - public: - static StatusFactory *Instance(); - - void RegisterErrorNo(uint32_t err, const std::string &desc); - - std::string GetErrDesc(uint32_t err); - - protected: - StatusFactory() {} - ~StatusFactory() {} - - private: - std::map err_desc_; -}; - -class GE_FUNC_VISIBILITY ErrorNoRegisterar { - public: - ErrorNoRegisterar(uint32_t err, const std::string &desc) { StatusFactory::Instance()->RegisterErrorNo(err, desc); } - ~ErrorNoRegisterar() {} -}; - -// Common errocode -DECLARE_ERRORNO_COMMON(MEMALLOC_FAILED, 0); // 50331648 -DECLARE_ERRORNO_COMMON(CCE_FAILED, 2); // 50331650 -DECLARE_ERRORNO_COMMON(RT_FAILED, 3); // 50331651 -DECLARE_ERRORNO_COMMON(INTERNAL_ERROR, 4); // 50331652 -DECLARE_ERRORNO_COMMON(CSEC_ERROR, 5); // 50331653 -DECLARE_ERRORNO_COMMON(TEE_ERROR, 6); // 50331653 -DECLARE_ERRORNO_COMMON(UNSUPPORTED, 100); -DECLARE_ERRORNO_COMMON(OUT_OF_MEMORY, 101); - -// Omg errorcode -DECLARE_ERRORNO_OMG(PARSE_MODEL_FAILED, 0); -DECLARE_ERRORNO_OMG(PARSE_WEIGHTS_FAILED, 1); -DECLARE_ERRORNO_OMG(NOT_INITIALIZED, 2); -DECLARE_ERRORNO_OMG(TIMEOUT, 3); - -// Ome errorcode -DECLARE_ERRORNO_OME(MODEL_NOT_READY, 0); -DECLARE_ERRORNO_OME(PUSH_DATA_FAILED, 1); -DECLARE_ERRORNO_OME(DATA_QUEUE_ISFULL, 2); -} // namespace domi - -#endif // INC_FRAMEWORK_COMMON_FMK_ERROR_CODES_H_ diff --git a/inc/graphengine/inc/framework/common/fmk_types.h b/inc/graphengine/inc/framework/common/fmk_types.h deleted file mode 100644 index f84390dac..000000000 --- a/inc/graphengine/inc/framework/common/fmk_types.h +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_FMK_TYPES_H_ -#define INC_FRAMEWORK_COMMON_FMK_TYPES_H_ - -#include "graph/types.h" -#include "register/register_types.h" - -#endif // INC_FRAMEWORK_COMMON_FMK_TYPES_H_ diff --git a/inc/graphengine/inc/framework/common/ge_compiler_options.h b/inc/graphengine/inc/framework/common/ge_compiler_options.h deleted file mode 100644 index 5c9473466..000000000 --- a/inc/graphengine/inc/framework/common/ge_compiler_options.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_GE_COMPILER_OPTIONS_H_ -#define INC_FRAMEWORK_COMMON_GE_COMPILER_OPTIONS_H_ - -namespace ge { -#ifdef __GNUC__ -#define GE_ATTRIBUTE_UNUSED __attribute__((unused)) -#define GE_FUNCTION_IDENTIFIER __PRETTY_FUNCTION__ -#define GE_BUILTIN_PREFETCH(args_addr) __builtin_prefetch(args_addr) -#else -#define GE_ATTRIBUTE_UNUSED -#define GE_FUNCTION_IDENTIFIER __FUNCSIG__ -#define GE_BUILTIN_PREFETCH(args_addr) -#endif -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_GE_COMPILER_OPTIONS_H_ \ No newline at end of file diff --git a/inc/graphengine/inc/framework/common/ge_format_util.h b/inc/graphengine/inc/framework/common/ge_format_util.h deleted file mode 100644 index dfceefb89..000000000 --- a/inc/graphengine/inc/framework/common/ge_format_util.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_GE_FORMAT_UTIL_H_ -#define INC_FRAMEWORK_COMMON_GE_FORMAT_UTIL_H_ - -#include - -#include "common/ge_inner_error_codes.h" -#include "graph/tensor.h" - -namespace ge { -class GE_FUNC_VISIBILITY GeFormatUtil { - public: - /// - /// @name TransShape - /// @brief transform the shape of tensor according to destination format - /// @param [in] src_desc source tensor desc - /// @param [in] dst_format destination format - /// @param [out] dst_shape destination shape - /// @return Status - /// - static Status TransShape(const TensorDesc &src_desc, Format dst_format, std::vector &dst_shape); -}; -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_GE_FORMAT_UTIL_H_ diff --git a/inc/graphengine/inc/framework/common/ge_inner_error_codes.h b/inc/graphengine/inc/framework/common/ge_inner_error_codes.h deleted file mode 100644 index 3697a5261..000000000 --- a/inc/graphengine/inc/framework/common/ge_inner_error_codes.h +++ /dev/null @@ -1,319 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*lint -e* */ -#ifndef INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_ -#define INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_ - -#include -#include -#include "ge/ge_api_error_codes.h" - -namespace ge { -// System ID -enum SystemIdType { SYSID_GE = 8 }; -// Runtime location -enum LogRuntime { - RT_HOST = 0b01, - RT_DEVICE = 0b10, -}; - -// Sub model -enum SubModuleId { - COMMON_MODULE = 0, - CLIENT_MODULE = 1, - INIT_MODULE = 2, - SESSION_MODULE = 3, - GRAPH_MODULE = 4, - ENGINE_MODULE = 5, - OPS_MODULE = 6, - PLUGIN_MODULE = 7, - RUNTIME_MODULE = 8, - EXECUTOR_MODULE = 9, - GENERATOR_MODULE = 10, -}; - -// Error code type -enum ErrorCodeType { - ERROR_CODE = 0b01, - EXCEPTION_CODE = 0b10, -}; - -// Error level -enum ErrorLevel { - COMMON_LEVEL = 0b000, - SUGGESTION_LEVEL = 0b001, - MINOR_LEVEL = 0b010, - MAJOR_LEVEL = 0b011, - CRITICAL_LEVEL = 0b100, -}; - -// Each module defines error codes using the following macros -#define GE_ERRORNO_COMMON(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, COMMON_MODULE, name, value, desc) -#define GE_ERRORNO_CLIENT(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, CLIENT_MODULE, name, value, desc) -#define GE_ERRORNO_INIT(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, INIT_MODULE, name, value, desc) -#define GE_ERRORNO_SESSION(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, SESSION_MODULE, name, value, desc) -#define GE_ERRORNO_GRAPH(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, GRAPH_MODULE, name, value, desc) -#define GE_ERRORNO_ENGINE(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, ENGINE_MODULE, name, value, desc) -#define GE_ERRORNO_OPS(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, OPS_MODULE, name, value, desc) -#define GE_ERRORNO_PLUGIN(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, PLUGIN_MODULE, name, value, desc) -#define GE_ERRORNO_RUNTIME(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, RUNTIME_MODULE, name, value, desc) -#define GE_ERRORNO_EXECUTOR(name, value, desc) \ - GE_ERRORNO(RT_DEVICE, ERROR_CODE, COMMON_LEVEL, SYSID_GE, EXECUTOR_MODULE, name, value, desc) -#define GE_ERRORNO_GENERATOR(name, value, desc) \ - GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, GENERATOR_MODULE, name, value, desc) - -// Get error code description -#define GE_GET_ERRORNO_STR(value) ge::StatusFactory::Instance()->GetErrDesc(value) - -// Common module error code definition -GE_ERRORNO_COMMON(MEMALLOC_FAILED, 0, "Failed to allocate memory!"); // 1343225856 -GE_ERRORNO_COMMON(PARAM_INVALID, 1, "Parameter's invalid!"); // 1343225857 -GE_ERRORNO_COMMON(CCE_FAILED, 2, "Failed to call CCE API!"); // 1343225858 -GE_ERRORNO_COMMON(RT_FAILED, 3, "Failed to call runtime API!"); // 1343225859 -GE_ERRORNO_COMMON(INTERNAL_ERROR, 4, "Internal errors"); // 1343225860 -GE_ERRORNO_COMMON(CSEC_ERROR, 5, "Failed to call libc_sec API!"); // 1343225861 -GE_ERRORNO_COMMON(TEE_ERROR, 6, "Failed to call tee API!"); // 1343225862 -GE_ERRORNO_COMMON(END_OF_SEQUENCE, 7, "End of sequence!"); // 1343225863 -GE_ERRORNO_COMMON(PATH_INVALID, 8, "Path is invalid!"); // 1343225864 - -// Error code for plugin manager -GE_ERRORNO_COMMON(GE_PLGMGR_PATH_INVALID, 30, "Path is invalid!"); // 1343225886 -GE_ERRORNO_COMMON(GE_PLGMGR_SO_NOT_EXIST, 31, "Failed to find any valid so file!"); // 1343225887 -GE_ERRORNO_COMMON(GE_PLGMGR_FUNC_NOT_EXIST, 32, "Failed to find any function!"); // 1343225888 -GE_ERRORNO_COMMON(GE_PLGMGR_INVOKE_FAILED, 33, "Failed to invoke any function!"); // 1343225889 - -GE_ERRORNO_COMMON(UNSUPPORTED, 100, "Parameter's unsupported!"); - -GE_ERRORNO_COMMON(OUT_OF_MEMORY, 101, "Out of memory!"); - -// Client module error code definition -GE_ERRORNO_CLIENT(GE_CLI_INIT_FAILED, 1, "GEInitialize Failed."); // 1343229953 -GE_ERRORNO_CLIENT(GE_CLI_FINAL_FAILED, 2, "GEFinalize Failed."); // 1343229954 -GE_ERRORNO_CLIENT(GE_CLI_SESS_CONSTRUCT_FAILED, 3, "Session constructor Failed."); // 1343229955 -GE_ERRORNO_CLIENT(GE_CLI_SESS_DESTROY_FAILED, 4, "Session destructor Failed."); // 1343229956 -GE_ERRORNO_CLIENT(GE_CLI_SESS_ADD_FAILED, 5, "Session AddGraph Failed."); // 1343229957 -GE_ERRORNO_CLIENT(GE_CLI_SESS_ADD_GRAPH_FAILED, 6, - "Session AddGraph Failed converting protobuf GraphProto."); // 1343229958 -GE_ERRORNO_CLIENT(GE_CLI_SESS_REMOVE_FAILED, 7, "Session RemoveGraph Failed."); // 1343229959 -GE_ERRORNO_CLIENT(GE_CLI_SESS_RUN_FAILED, 8, "Session RunGraph Failed."); // 1343229960 -GE_ERRORNO_CLIENT(GE_CLI_SESS_RUN_TENSOR_FAILED, 9, - "Session RunGraph Failed converting protobuf TensorProto."); // 1343229961 -GE_ERRORNO_CLIENT(GE_CLI_GE_ALREADY_INITIALIZED, 10, "GE is already initialized."); // 1343229962 -GE_ERRORNO_CLIENT(GE_CLI_GE_NOT_INITIALIZED, 11, "GE is not yet initialized or is finalized."); // 1343229963 - -// Init module error code definition -GE_ERRORNO_INIT(GE_MULTI_INIT, 0, "Multiple initializations are not supported."); // 1343234048 -GE_ERRORNO_INIT(GE_FINALIZE_NOT_INIT, 1, "Finalize is not allowed before initialization."); // 1343234049 -GE_ERRORNO_INIT(GE_MULTI_FINALIZE, 2, "Multiple finalizations are not supported."); // 1343234050 -GE_ERRORNO_INIT(GE_PROF_MULTI_INIT, 3, "Multiple profiling initializations are not supported."); // 1343234051 -GE_ERRORNO_INIT(GE_PROF_NOT_INIT, 4, "Profing initializations have not been done."); // 1343234052 -GE_ERRORNO_INIT(GE_PROF_MODE_CONFLICT, 5, - "Profiling command mode which is preferred is running, the api mode will not work."); // 1343234053 - -// Session module error code definition -GE_ERRORNO_SESSION(GE_SESS_INIT_FAILED, 0, "Failed to initialize session."); // 1343238144 -GE_ERRORNO_SESSION(GE_SESS_ALREADY_RUNNING, 1, "Session already running,not support parallel run."); // 1343238145 -GE_ERRORNO_SESSION(GE_SESS_GRAPH_NOT_EXIST, 2, "Graph ID not exist."); // 1343238146 -GE_ERRORNO_SESSION(GE_SESS_GRAPH_ALREADY_EXIST, 3, "Graph ID already exist."); // 1343238147 -GE_ERRORNO_SESSION(GE_SESS_GRAPH_IS_RUNNING, 4, "Graph is running."); // 1343238148 -GE_ERRORNO_SESSION(GE_SESSION_NOT_EXIST, 5, "Can not find session with specific session id."); // 1343238149 -GE_ERRORNO_SESSION(GE_SESSION_MANAGER_NOT_INIT, 6, "Session manager has not been initialized."); // 1343238150 - -// Graph module error code definition -GE_ERRORNO_GRAPH(GE_GRAPH_INIT_FAILED, 0, "Failed to initialize graph."); // 1343242240 -GE_ERRORNO_GRAPH(GE_GRAPH_ALREADY_RUNNING, 1, "graph already running,not support parallel run."); // 1343242241 -GE_ERRORNO_GRAPH(GE_GRAPH_GRAPH_NOT_EXIST, 2, "graph ID not exist."); // 1343242242 -GE_ERRORNO_GRAPH(GE_GRAPH_GRAPH_ALREADY_EXIST, 3, "Graph ID already exist."); // 1343242243 -GE_ERRORNO_GRAPH(GE_GRAPH_GRAPH_IS_RUNNING, 4, "Graph is running."); // 1343242244 -GE_ERRORNO_GRAPH(GE_GRAPH_MALLOC_FAILED, 5, "Graph malloc failed."); // 1343242245 -GE_ERRORNO_GRAPH(GE_GRAPH_FREE_FAILED, 6, "Graph FREE failed."); // 1343242246 -GE_ERRORNO_GRAPH(GE_GRAPH_NOT_MALLOC_BUFFER, 7, "Graph FREE failed, not malloc buffer."); // 1343242247 -GE_ERRORNO_GRAPH(GE_GRAPH_PARAM_NULLPTR, 8, "Graph param is NULL."); // 1343242248 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, 9, "Get computeGraph by graphNode failed."); // 1343242249 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_RUN_GRAPH_NODE_NULL, 10, "Run graph node is null."); // 1343242250 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_RUN_GRAPH_INVALID, 11, "Get computeGraph by graphNode failed."); // 1343242251 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_INSERT_DYN_OP_FAILED, 12, "Graph which insert dynamic op failed."); // 1343242252 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_PREPROCESS_FAILED, 13, "Graph preprocess failed."); // 1343242253 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_GRAPH_FUSION_FAILED, 14, "Graph fusion failed."); // 1343242254 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_CALIBRATION_FAILED, 16, "Calibration failed."); // 1343242256 -GE_ERRORNO_GRAPH(GE_GRAPH_SUBGRAPH_NUM_ZERO, 17, "Graph partition success, but subGraph num is 0."); // 1343242257 -GE_ERRORNO_GRAPH(GE_GRAPH_SUBGRAPH_ENGINENAME_REPEATED, 18, "Graph subGraph engine name is repeated."); // 1343242258 -GE_ERRORNO_GRAPH(GE_GRAPH_GET_IN_OUT_FAILED, 19, "OME GetInputOutputDescInfo failed."); // 1343242259 -GE_ERRORNO_GRAPH(GE_GRAPH_DATA_INPUT_FAILED, 20, "OME DataInput failed."); // 1343242260 -GE_ERRORNO_GRAPH(GE_GRAPH_EXECUTE_FAILED, 21, "Execute graph failed."); // 1343242261 -GE_ERRORNO_GRAPH(GE_GRAPH_DUPLICATE_ENGINE, 22, "Duplicate engine."); // 1343242262 -GE_ERRORNO_GRAPH(GE_GRAPH_EMPTY_SUBGRAPH, 23, "Empty sub graph info."); // 1343242263 -GE_ERRORNO_GRAPH(GE_GRAPH_EXECUTE_NOT_INIT, 24, "Call SetCondition first."); // 1343242264 -GE_ERRORNO_GRAPH(GE_GRAPH_PREPARE_FAILED, 25, "Prepare failed."); // 1343242265 -GE_ERRORNO_GRAPH(GE_GRAPH_SERIALIZE_FAILED, 26, "OMG SerializeModelDef failed."); // 1343242266 -GE_ERRORNO_GRAPH(GE_GRAPH_SAVE_FAILED, 27, "OMG SaveModel failed."); // 1343242267 -GE_ERRORNO_GRAPH(GE_GRAPH_PRERUN_FAILED, 28, "PreRun failed."); // 1343242268 -GE_ERRORNO_GRAPH(GE_GRAPH_SUBGRAPH_ID_INVALID, 29, "Graph subGraph id is invalid."); // 1343242269 -GE_ERRORNO_GRAPH(GE_GRAPH_INFERSHAPE_FAILED, 30, "Prepare Graph infershape failed"); // 1343242270 -GE_ERRORNO_GRAPH(GE_GRAPH_ISNULL, 31, "RunGraph input compute graph is NULL."); // 1343242271 -GE_ERRORNO_GRAPH(GE_GRAPH_SYNC_MODEL_FAILED, 32, "Graph SyncExecuteModel failed."); // 1343242272 -GE_ERRORNO_GRAPH(GE_GRAPH_RUNGRAPH_FAILED, 33, "Graph RunGraph failed."); // 1343242273 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_PARSE_DYN_OP_FAILED, 34, "Parse dynamic node config file failed"); // 1343242274 -GE_ERRORNO_GRAPH(GE_GRAPH_MULTI_SUBGRAPH_BUILD, 35, "Save model with multiple sub graph"); // 1343242275 -GE_ERRORNO_GRAPH(GE_GRAPH_GRAPH_NODE_NULL, 36, "Graph get graph node failed."); // 1343242276 -GE_ERRORNO_GRAPH(GE_GRAPH_NOT_INIT, 37, "Graph do not init."); // 1343242277 -GE_ERRORNO_GRAPH(GE_GRAPH_NULL_INPUT, 38, "input graph is null"); // 1343242278 -GE_ERRORNO_GRAPH(GE_GRAPH_TOPO_SORT_FAILED, 39, "topological sorting an partition failed"); // 1343242279 -GE_ERRORNO_GRAPH(GE_GRAPH_EMPTY_PARTITION, 40, "accessing an empty partition"); // 1343242280 -GE_ERRORNO_GRAPH(GE_GRAPH_UNSUPPORTED, 41, "unsupported feature in partition"); // 1343242281 -GE_ERRORNO_GRAPH(GE_GRAPH_ASSIGN_ENGINE_FAILED, 42, "assign engine failed"); // 1343242282 -GE_ERRORNO_GRAPH(GE_GRAPH_ADD_PLC_END_FAILED, 43, "add placeholder end node failed"); // 1343242283 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_PARSE_OUT_NODE_FAILED, 44, "Parse out node failed."); // 1343242284 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIMIZE_INSERT_OP_PARSE_FAILED, 45, - "OMG parse dynamic node config file failed."); // 1343242285 -GE_ERRORNO_GRAPH(GE_GRAPH_SAVE_WEIGHTS_FAILED, 46, "OMG Save Weights to Model failed."); // 1343242286 -GE_ERRORNO_GRAPH(GE_GRAPH_EMPTY_STRING_NAME, 47, "Empty string name."); // 1343242287 -GE_ERRORNO_GRAPH(GE_GRAPH_EMPTY_VARIABLE_TENSOR_TABLE, 48, "Empty variable-tensor table."); // 1343242288 -GE_ERRORNO_GRAPH(GE_GRAPH_VARIABLE_ALREADY_EXIST, 49, "Variable already exist."); // 1343242289 -GE_ERRORNO_GRAPH(GE_GRAPH_VARIABLE_DOES_NOT_EXIST, 50, "Variable does not exist."); // 1343242290 -GE_ERRORNO_GRAPH(GE_GRAPH_OPTIONS_INVALID, 51, "Client session options is invalid."); // 1343242291 -GE_ERRORNO_GRAPH(GE_GRAPH_NO_OUTPUT_DESC_INFO, 52, "No output desc info."); // 1343242292 -GE_ERRORNO_GRAPH(GE_GRAPH_OUTPUT_DESCINFO_TENSOR_NUM_MISMATCH, 53, - "Number of output descinfo and tensor mismatch."); // 1343242293 -GE_ERRORNO_GRAPH(GE_GRAPH_FILENAMEPREFIX_INVALID, 54, "Graph Save Model fileNamePrefix is invalid."); // 1343242294 -GE_ERRORNO_GRAPH(GE_GRAPH_NOT_BUILT, 55, "Graph is not built before SaveModel."); // 1343242295 -GE_ERRORNO_GRAPH(GE_GRAPH_SAVEMODEL_FAILED, 56, "Graph SaveModel failed."); // 1343242296 -GE_ERRORNO_GRAPH(GE_GRAPH_MEMORY_ALLOC_FAILED, 57, "Failed allocating memory for model file header."); // 1343242297 -GE_ERRORNO_GRAPH(GE_GRAPH_NODE_SEARCHER_REMOVE_GRAPH_FAILED, 58, "Failed remove graph in node seacher."); // 1343242298 -GE_ERRORNO_GRAPH(GE_GRAPH_NODE_SEARCHER_ADD_GRAPH_FAILED, 59, "Failed add graph in node seacher."); // 1343242299 -GE_ERRORNO_GRAPH(GE_GRAPH_NODE_SEARCHER_GET_GRAPH_REBUILD_FAILED, 60, - "Failed add graph in node seacher."); // 1343242300 -GE_ERRORNO_GRAPH(GE_GRAPH_NODE_SEARCHER_SET_GRAPH_FINISH_REBUILD_GRAPH_FAILED, 61, - "Failed set graph finish rebuild in node searcher."); // 1343242301 -GE_ERRORNO_GRAPH(GE_GRAPH_VARIABLE_OP_PASS_FAILED, 62, "Failed to run variable pass."); // 1343242302 - -// Engine_manager module error code definition -GE_ERRORNO_ENGINE(GE_ENG_INIT_FAILED, 0, "Failed to initialize engine."); // 1343246336 -GE_ERRORNO_ENGINE(GE_ENG_FINALIZE_FAILED, 1, "Engine finalize failed."); // 1343246337 -GE_ERRORNO_ENGINE(GE_ENG_MEMTYPE_ERROR, 2, "Memory type HBM is necessary when engine is in device"); // 1343246338 - -// Optimize errocode -GE_ERRORNO_GRAPH(TO_BE_DELETED, 63, "The node of the graph to be deleted."); // 1343242303 -GE_ERRORNO_GRAPH(NOT_CHANGED, 64, "The node of the graph no changed."); // 1343242304 - -// Ops module error code definition -GE_ERRORNO_OPS(GE_OPS_KERNEL_STORE_INIT_FAILED, 0, "Failed to initialize OpsKernelInfoStore."); // 1343250432 -GE_ERRORNO_OPS(GE_OPS_GRAPH_OPTIMIZER_INIT_FAILED, 1, "Failed to initialize GraphOptimizer."); // 1343250433 -GE_ERRORNO_OPS(GE_OPS_KERNEL_INFO_NOT_EXIST, 2, "OpsKernelInfo not exist."); // 1343250434 -GE_ERRORNO_OPS(GE_OPS_KERNEL_STORE_NOT_EXIST, 3, "OpsKernelInfoStore not exist."); // 1343250435 -GE_ERRORNO_OPS(GE_OPS_CALC_RUNNING_PARAM_FAILED, 4, "Failed to CalcOpRunningParam."); // 1343250436 -GE_ERRORNO_OPS(GE_OPS_GENERATE_TASK_FAILED, 5, "Failed to GenerateTask."); // 1343250437 -GE_ERRORNO_OPS(GE_OPS_OPTIMIZE_ORIGINAL_GRAPH_FAILED, 6, "Failed to OptimizeOriginalGraph."); // 1343250438 -GE_ERRORNO_OPS(GE_OPS_OPTIMIZE_FUSED_GRAPH_FAILED, 7, "Failed to OptimizeFusedGraph."); // 1343250439 -GE_ERRORNO_OPS(GE_OPS_ENGINE_IS_NOT_REGISTERED, 8, "Engine is not registered."); // 1343250440 -GE_ERRORNO_OPS(GE_OPS_GET_NO_VALID_SO, 9, - "There is no valid so about OpsKernelInfoStore or GraphOptimizer."); // 1343250441 -GE_ERRORNO_OPS(GE_OPS_GET_OPTIMIZE_BY_ENGINE_FAILED, 10, "Failed to get graphOptimizer by name."); // 1343250442 -GE_ERRORNO_OPS(GE_OPS_GET_OPTIMIZE_BY_PRIORITY_FAILED, 11, "Failed to get graphOptimizer by priority."); // 1343250443 -GE_ERRORNO_OPS(GE_OPS_LOAD_GE_OPTIMIZER_FAILED, 12, "Failed to load ge graphOptimizer."); // 1343250444 - -// Runtime module error code definition -GE_ERRORNO_RUNTIME(GE_RTI_DEVICE_ID_INVALID, 1, "device id is invalid"); -GE_ERRORNO_RUNTIME(GE_RTI_DEVICE_NOT_READY, 2, "set device failed, device not ready"); -GE_ERRORNO_RUNTIME(GE_RTI_MEMALLOC_FAILED, 3, "malloc memory failed"); -GE_ERRORNO_RUNTIME(GE_RTI_MODEL_NOT_LOADED, 4, "model has not been loaded"); -GE_ERRORNO_RUNTIME(GE_RTI_THREAD_POOL_IS_NULL, 5, "model excute failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_CCE_CREATE_HANDLE_FAILED, 6, "cce create handle failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_CCE_SET_STREAM_FAILED, 7, "cce set stream failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_CREATE_RTMODEL_FAILED, 8, "call runtime create rtModel failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_CREATE_STREAM_FAILED, 9, "call runtime create stream failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_BIND_STREAM_FAILED, 10, "call runtime bind stream to model failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_CREATE_LABLE_FAILED, 11, "call runtime create lable failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MODEL_LOAD_COMPLETE_FAILED, 12, "call runtime model load complete failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MODEL_GET_TASK_ID_FAILED, 14, "call runtime get task id failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_KERNEL_LAUNCH_FAILED, 13, "call runtime kernel launch failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_KERNEL_LAUNCHEX_FAILED, 15, "call runtime kernel launchex failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_KERNEL_FUSION_START_FAILED, 16, "call runtime kernel fusion start failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_KERNEL_FUSION_END_FAILED, 17, "call runtime kernel fusion end failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_LABEL_SET_FAILED, 18, "call runtime lable set failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_LABLE_GOTO_FAILED, 19, "call runtime lable goto failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_LABLE_SWITCH_FAILED, 20, "call runtime lable switch failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_ALLOC_MANAGED_FAILED, 21, "call runtime mem alloc managed failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_FREE_MANAGED_FAILED, 22, "call runtime mem free managed failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_FREE_FAILED, 23, "call runtime free failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_STREAM_SYNC_FAILED, 24, "call runtime sync stream failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MODEL_EXCUTE_FAILED, 25, "call runtime model excute failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_ASYNC_FAILED, 26, "call runtime mem async failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_ALLOC_HOST_FAILED, 27, "call runtime alloc host memory failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_FREE_HOST_FAILED, 28, "call runtime free host memory failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_ALLOC_DEVICE_FAILED, 29, "call runtime alloc device memory failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_MEM_FREE_DEVICE_FAILED, 30, "call runtime free device memory failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_FLUSH_CACHE_FAILED, 31, "call runtime flush cache failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_UNBIND_STREAM_FAILED, 32, "unbind rtstream from rtmodel failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_DESTORY_STREAM_FAILED, 33, "destory stream failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_DESTORY_LABEL_FAILED, 34, "destory label failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_DESTORY_MODEL_FAILED, 35, "destory model failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_CCE_TRANS_TENSOR_FAILED, 36, "call cce transfer tensor descriptor failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_CCE_TRANS_FILTER_FAILED, 37, "call cce transfer filter descriptor failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_CCE_UPDATE_KERNEL_ARGS_FAILED, 38, "call cce update kernel args failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_CCE_DESTORY_HANDLE_FAILED, 39, "destory handle failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_CREATE_EVENT_FAILED, 40, "call rutime create event failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_EVENT_RECORD_FAILED, 41, "call rutime event record failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_STREAM_WAIT_EVENT_FAILED, 42, "call rutime stream wait event failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_HCCL_BROADCAST_FAILED, 43, "call hccl hcom broadcast failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_HCCL_ALL_GATHER_FAILED, 44, "call hccl hcom all gather failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_HCCL_ALL_REDUCE_FAILED, 45, "call hccl hcom all reduce failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_RUNTIME_DESTORY_EVENT_FAILED, 46, "destory rt event failed"); -GE_ERRORNO_RUNTIME(GE_RTI_CALL_HCCL_REDUCE_SCATTER_FAILED, 47, "call hccl hcom reduce scatter failed"); - -// Executor module error code definition -GE_ERRORNO_EXECUTOR(GE_EXEC_NOT_INIT, 1, "GE Executor is not yet initialized."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_PATH_INVALID, 2, "Model file path is invalid."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_KEY_PATH_INVALID, 3, "Key file path of model is invalid."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_ID_INVALID, 4, "Model id is invalid."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_DATA_SIZE_INVALID, 5, "Data size of model is invalid."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_PARTITION_NUM_INVALID, 6, "Partition number of model is invalid."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_QUEUE_ID_INVALID, 7, "Queue id of model is invalid."); -GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, 8, "Model does not support encryption."); -GE_ERRORNO_EXECUTOR(GE_EXEC_READ_MODEL_FILE_FAILED, 9, "Failed to read model file."); -GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_MODEL_REPEATED, 10, "The model is loaded repeatedly."); -GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_MODEL_PARTITION_FAILED, 11, "Failed to load model partition."); -GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED, 12, "Failed to load weight partition."); -GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_TASK_PARTITION_FAILED, 13, "Failed to load task partition."); -GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_KERNEL_PARTITION_FAILED, 14, "Failed to load kernel partition."); -GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_FEATURE_MAP_MEM_FAILED, 15, "Failed to allocate feature map memory."); -GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_WEIGHT_MEM_FAILED, 16, "Failed to allocate weight memory."); -GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_VAR_MEM_FAILED, 17, "Failed to allocate variable memory."); -GE_ERRORNO_EXECUTOR(GE_AIPP_NOT_EXIST, 18, "GE AIPP is not exist."); -GE_ERRORNO_EXECUTOR(GE_DYNAMIC_AIPP_NOT_SUPPORT_QUERY, 19, "GE Dynamic AIPP is not support to query temporarily."); -GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_P2P_MEM_FAILED, 20, "Failed to allocate P2P memory"); - -// Generator module error code definition -GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, 1, "Graph manager initialize failed."); -GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, 2, "Graph manager add graph failed."); -GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, 3, "Graph manager build graph failed."); -GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, 4, "Graph manager finalize failed."); -GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_SAVE_MODEL_FAILED, 5, "Graph manager save model failed."); - -#define RT_ERROR_TO_GE_STATUS(RT_ERROR) static_cast(RT_ERROR) -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_ diff --git a/inc/graphengine/inc/framework/common/ge_types.h b/inc/graphengine/inc/framework/common/ge_types.h deleted file mode 100644 index 64231b8cb..000000000 --- a/inc/graphengine/inc/framework/common/ge_types.h +++ /dev/null @@ -1,303 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_GE_TYPES_H_ -#define INC_FRAMEWORK_COMMON_GE_TYPES_H_ - -#include - -#include -#include - -#include "framework/common/fmk_error_codes.h" -#include "ge/ge_api_error_codes.h" -#include "external/graph/types.h" -#include "external/ge/ge_api_types.h" - -namespace ge { -enum RuntimeType { HOST = 0, DEVICE = 1 }; - -enum PerfLevel { GEN_TASK_WITH_FUSION = -1, GEN_TASK_WITHOUT_L2FUSION = 3, GEN_TASK_WITHOUT_FUSION = 4 }; - -enum FrameworkType { - CAFFE = 0, - MINDSPORE = 1, - TENSORFLOW = 3, - ANDROID_NN, - ONNX, -}; - -const std::map kFwkTypeToStr = { - {"0", "Caffe"}, {"1", "MindSpore"}, {"3", "TensorFlow"}, {"4", "Android_NN"}, {"5", "Onnx"}}; - -enum OpEngineType { - ENGINE_SYS = 0, // default engine - ENGINE_AICORE = 1, - ENGINE_VECTOR = 2, - ENGINE_AICUBE = 3, // not support - ENGINE_AIVECTOR = 4 // not support -}; - -enum InputAippType { DATA_WITHOUT_AIPP = 0, DATA_WITH_STATIC_AIPP, DATA_WITH_DYNAMIC_AIPP, DYNAMIC_AIPP_NODE }; - -const char *const GE_ENGINE_ATTR_MEM_TYPE_HBM = "HBM"; -const char *const GE_OPTION_EXEC_PLACEMENT = "ge.exec.placement"; - -// profiling data -const std::string kTaskTypeAicore = "AI_CORE"; -const std::string kTaskTypeAicpu = "AI_CPU"; -const std::string kTaskTypeInvalid = "TASK_TYPE_INVALID"; - -// dynamic execute mode -const char *const kLazyRecompile = "lazy_recompile"; - -// Data cache, including data address and length -struct DataBuffer { - public: - void *data; // Data address - uint64_t length; // Data length - bool isDataSupportMemShare = false; - uint32_t placement = 0; - DataBuffer(void *dataIn, uint64_t len, bool isSupportMemShare, uint32_t placement = 0) - : data(dataIn), length(len), isDataSupportMemShare(isSupportMemShare), placement(placement) {} - - DataBuffer() : data(nullptr), length(0), isDataSupportMemShare(false) {} -}; - -/// -/// @ingroup domi_ome -/// @brief External input data -/// -struct InputData { - uint32_t index; // Index of input data - uint32_t timestamp; // Data creation time - uint32_t timeout; // Processing timeout - uint32_t model_id; // Model ID required for data processing - uint64_t request_id = 0; // Request ID - std::vector blobs; // Actual input data, currently only supports one input - bool is_dynamic_batch = false; // Whether is dynamic batch size scene, default:false - std::string batch_label; // Gear used for current inference in dynamic batch scene - std::vector> shapes; // Input shapes -}; - -/// Output result structure definition -struct OutputData { - uint32_t index; // Index of input data - uint32_t model_id; // The model ID corresponding to the processing result - /// Output data cache, arranged in sequence of output operators. - /// If the operator has multiple outputs, - /// the data buffer order of the operator is the same as that defined in the - /// offline model - std::vector blobs; -}; - -// The definition of command data structure -struct Command { - std::string cmd_type; // Command type - std::vector cmd_params; // Command params - uint64_t module_index; // prof module -}; - -// The definition of I/O shape description -struct ShapeDescription { - int64_t num = 0; - int64_t channel = 0; - int64_t height = 0; - int64_t width = 0; - std::vector dims; - std::vector> shape_ranges; -}; - -// Definition of input and output description information -struct InputOutputDescInfo { - std::string name; - uint64_t size; - uint32_t data_type; - ShapeDescription shape_info; -}; - -// Definition of model io dims -struct InputOutputDims { - std::string name; - size_t dim_num; - uint32_t size; - std::vector dims; -}; - -// Definition of model io dims -struct OriginInputInfo { - Format format; - DataType data_type; - uint32_t dim_num; -}; - -// The structure of AIPP info -struct AippConfigInfo { - int8_t aipp_mode; - int8_t input_format; - int32_t src_image_size_w; - int32_t src_image_size_h; - int8_t crop; - int32_t load_start_pos_w; - int32_t load_start_pos_h; - int32_t crop_size_w; - int32_t crop_size_h; - int8_t resize; - int32_t resize_output_w; - int32_t resize_output_h; - int8_t padding; - int32_t left_padding_size; - int32_t right_padding_size; - int32_t top_padding_size; - int32_t bottom_padding_size; - int8_t csc_switch; - int8_t rbuv_swap_switch; - int8_t ax_swap_switch; - int8_t single_line_mode; - int32_t matrix_r0c0; - int32_t matrix_r0c1; - int32_t matrix_r0c2; - int32_t matrix_r1c0; - int32_t matrix_r1c1; - int32_t matrix_r1c2; - int32_t matrix_r2c0; - int32_t matrix_r2c1; - int32_t matrix_r2c2; - int32_t output_bias_0; - int32_t output_bias_1; - int32_t output_bias_2; - int32_t input_bias_0; - int32_t input_bias_1; - int32_t input_bias_2; - int32_t mean_chn_0; - int32_t mean_chn_1; - int32_t mean_chn_2; - int32_t mean_chn_3; - float min_chn_0; - float min_chn_1; - float min_chn_2; - float min_chn_3; - float var_reci_chn_0; - float var_reci_chn_1; - float var_reci_chn_2; - float var_reci_chn_3; - int8_t support_rotation; - uint32_t related_input_rank; - uint32_t max_src_image_size; -}; - -// The structure of offline Modeldata -struct ModelData { - void *model_data = nullptr; // Model binary data start addr - uint32_t model_len = 0; // Model binary data length - int32_t priority = 0; // Model priority - std::string key; // Key path for encrypt model, Empty for unencrypt - std::string om_name; // om file name, used for data dump -}; - -// The definition of Model information -struct ModelInfo { - uint32_t version = 0; - std::string name; - bool is_encrypt = 0; // 0:unencrypt, 1:encrypt - std::vector input_desc; - std::vector output_desc; - uint8_t reserved[3] = {0}; // 3-byte reserved field -}; - -// Asynchronous callback interface, implemented by the caller -class GE_FUNC_VISIBILITY ModelListener { - public: - virtual ~ModelListener() {} - /// - /// @brief Asynchronous callback interface - /// @param [in] model_id Model ID of the callback - /// @param [in] data_index Index of the input_data - /// @param [in] resultCode Execution results - /// - virtual Status OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t result_code, - std::vector &outputs) = 0; -}; - -// OMM configuration item -struct Options { - int64_t session_id; - int32_t device_id; - std::string job_id; - bool isUseHcom; - bool isUseHvd; - bool deployMode; - bool isAICPUMode; - bool enable_atomic; - std::string podName; - int64_t rankId; - std::string rankTableFile; - int32_t ge_hccl_flag = 0; - int32_t physical_device_id; - std::string profiling_mode; - std::string profiling_options; -}; - -// Profiling info of task -struct TaskDescInfo { - std::string model_name; - std::string op_name; - std::string op_type; - uint32_t block_dim; - uint32_t task_id; - uint32_t stream_id; - std::string shape_type; - int64_t cur_iter_num; - std::string task_type; - std::vector input_format; - std::vector> input_shape; - std::vector input_data_type; - std::vector output_format; - std::vector> output_shape; - std::vector output_data_type; -}; - -struct OpDescInfo { - std::string op_name; - std::string op_type; - uint32_t task_id; - uint32_t stream_id; - std::vector input_format; - std::vector> input_shape; - std::vector input_data_type; - std::vector input_addrs; - std::vector input_size; - std::vector output_format; - std::vector> output_shape; - std::vector output_data_type; - std::vector output_addrs; - std::vector output_size; -}; -struct ModelDumpConfig { - std::string model_name; - std::vector layers; -}; - -struct DumpConfig { - std::string dump_path; - std::string dump_mode; - std::string dump_status; - std::string dump_op_switch; - std::string dump_debug; - std::vector dump_list; -}; -} // namespace ge -#endif // INC_FRAMEWORK_COMMON_GE_TYPES_H_ diff --git a/inc/graphengine/inc/framework/common/helper/model_helper.h b/inc/graphengine/inc/framework/common/helper/model_helper.h deleted file mode 100644 index e25d5d6fe..000000000 --- a/inc/graphengine/inc/framework/common/helper/model_helper.h +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_HELPER_MODEL_HELPER_H_ -#define INC_FRAMEWORK_COMMON_HELPER_MODEL_HELPER_H_ - -#include -#include - -#include "common/fmk_types.h" -#include "common/helper/om_file_helper.h" -#include "common/types.h" -#include "graph/model.h" -#include "model/ge_model.h" -#include "model/ge_root_model.h" - -namespace ge { -class GE_FUNC_VISIBILITY ModelHelper { - public: - ModelHelper() = default; - ~ModelHelper(); - - Status SaveToOmModel(const GeModelPtr &ge_model, const SaveParam &save_param, const std::string &output_file, - ge::ModelBufferData &model); - Status SaveToOmRootModel(const GeRootModelPtr &ge_root_model, const SaveParam &save_param, const string &output_file, - ModelBufferData &model, bool is_unknown_shape); - Status SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::string &output_file); - Status LoadModel(const ge::ModelData &model_data); - Status LoadRootModel(const ge::ModelData &model_data); - Status GetModelBufferData(ge::ModelBufferData &model); - - const ModelFileHeader *GetFileHeader() const { return file_header_; } - - GeModelPtr GetGeModel(); - GeRootModelPtr GetGeRootModel(); - void SetSaveMode(bool val) { is_offline_ = val; } - bool GetSaveMode(void) const { return is_offline_; } - bool GetModelType() const { return is_unknown_shape_model_; }; - - Status GetBaseNameFromFileName(const std::string &file_name, std::string &base_name); - Status GetModelNameFromMergedGraphName(const std::string &graph_name, std::string &model_name); - - private: - bool is_assign_model_ = false; - bool is_offline_ = true; - bool is_unknown_shape_model_ = false; - ModelFileHeader *file_header_ = nullptr; - // Encrypted model need delete temp model and unencrypted model need not delete model - uint8_t *model_addr_tmp_ = nullptr; - uint32_t model_len_tmp_ = 0; - GeModelPtr model_; - GeRootModelPtr root_model_; - - ModelHelper(const ModelHelper &); - ModelHelper &operator=(const ModelHelper &); - Status GenerateGeModel(OmFileLoadHelper &om_load_helper); - Status GenerateGeRootModel(OmFileLoadHelper &om_load_helper); - Status LoadModelData(OmFileLoadHelper &om_load_helper); - void SetModelToGeModel(GeModelPtr &ge_model, Model &model); - Status LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); - Status LoadWeights(OmFileLoadHelper &om_load_helper); - Status LoadWeights(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); - Status LoadTask(OmFileLoadHelper &om_load_helper); - Status LoadTask(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); - Status LoadTBEKernelStore(OmFileLoadHelper &om_load_helper); - Status LoadTBEKernelStore(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); - Status LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper); - Status LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); - Status ReleaseLocalModelData() noexcept; - Status SaveModelPartition(std::shared_ptr &om_file_save_helper, ModelPartitionType type, - const uint8_t *data, size_t size, size_t model_index); - Status SaveModelDef(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - Buffer &model_buffer, size_t model_index = 0); - Status SaveSizeToModelDef(const GeModelPtr &ge_model); - Status SaveModelWeights(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - size_t model_index = 0); - Status SaveModelTbeKernel(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - size_t model_index = 0); - Status SaveModelCustAICPU(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - size_t model_index = 0); - Status SaveModelTaskDef(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - Buffer &task_buffer, size_t model_index = 0); - Status SaveModelHeader(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - size_t model_num = 1); - Status SaveAllModelPartiton(shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, - Buffer &model_buffer, Buffer &task_buffer, size_t model_index = 0); -}; -} // namespace ge -#endif // INC_FRAMEWORK_COMMON_HELPER_MODEL_HELPER_H_ diff --git a/inc/graphengine/inc/framework/common/helper/om_file_helper.h b/inc/graphengine/inc/framework/common/helper/om_file_helper.h deleted file mode 100644 index 34509b393..000000000 --- a/inc/graphengine/inc/framework/common/helper/om_file_helper.h +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_HELPER_OM_FILE_HELPER_H_ -#define INC_FRAMEWORK_COMMON_HELPER_OM_FILE_HELPER_H_ - -#include -#include - -#include "external/ge/ge_ir_build.h" -#include "framework/common/fmk_types.h" -#include "framework/common/types.h" -#include "framework/common/ge_types.h" - -using ProcParam = struct PROC_PARAM; -using std::string; -using std::vector; - -namespace ge { -struct ModelPartition { - ModelPartitionType type; - uint8_t *data = 0; - uint32_t size = 0; -}; - -struct OmFileContext { - std::vector partition_datas_; - std::vector partition_table_; - uint32_t model_data_len_ = 0; -}; - -struct SaveParam { - int32_t encode_mode; - std::string ek_file; - std::string cert_file; - std::string hw_key_file; - std::string pri_key_file; - std::string model_name; -}; - -class GE_FUNC_VISIBILITY OmFileLoadHelper { - public: - Status Init(const ge::ModelData &model); - - Status Init(uint8_t *model_data, const uint32_t model_data_size); - - Status Init(uint8_t *model_data, const uint32_t model_data_size, uint32_t model_num); - - Status GetModelPartition(ModelPartitionType type, ModelPartition &partition); - - Status GetModelPartition(ModelPartitionType type, ModelPartition &partition, size_t model_index); - - OmFileContext context_; - - vector model_contexts_; - - private: - Status CheckModelValid(const ge::ModelData &model) const; - - Status LoadModelPartitionTable(uint8_t *model_data, const uint32_t model_data_size); - - Status LoadModelPartitionTable(uint8_t *model_data, const uint32_t model_data_size, uint32_t model_num); - - bool is_inited_{false}; -}; - -class GE_FUNC_VISIBILITY OmFileSaveHelper { - public: - ModelFileHeader &GetModelFileHeader() { return model_header_; } - - uint32_t GetModelDataSize() const { return context_.model_data_len_; } - - ModelPartitionTable *GetPartitionTable(); - - Status AddPartition(ModelPartition &partition); - - Status AddPartition(ModelPartition &partition, size_t cur_index); - - const std::vector &GetModelPartitions() const; - - Status SaveModel(const SaveParam &save_param, const char *target_file, ge::ModelBufferData &model, - bool is_offline = true); - - Status SaveModelToFile(const char *output_file, ge::ModelBufferData &model, bool is_offline = true); - - vector model_contexts_; - - ModelFileHeader model_header_; - OmFileContext context_; - - ModelPartitionTable *GetPartitionTable(size_t cur_ctx_index); - - Status SaveRootModel(const SaveParam &save_param, const char *output_file, ModelBufferData &model, bool is_offline); -}; -} // namespace ge -#endif // INC_FRAMEWORK_COMMON_HELPER_OM_FILE_HELPER_H_ diff --git a/inc/graphengine/inc/framework/common/l2_cache_optimize.h b/inc/graphengine/inc/framework/common/l2_cache_optimize.h deleted file mode 100644 index fdb1c8b5b..000000000 --- a/inc/graphengine/inc/framework/common/l2_cache_optimize.h +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_L2_CACHE_OPTIMIZE_H_ -#define INC_FRAMEWORK_COMMON_L2_CACHE_OPTIMIZE_H_ - -#include - -#include -#include -#include -#include - -#include "common/types.h" -#include "common/util.h" -#include "graph/compute_graph.h" - -using std::vector; - -namespace ge { -// Size of RC memory alignment, 2M -constexpr size_t ALIGN_SIZE = 2097152; - -constexpr uint32_t RC_VALUE_DEFAULT = 1; -constexpr uint32_t RC_VALUE_MAX = 32; - -// RC data type classification -enum RCType { - RC_DEFAULT, // Such as temporary workspace memory of operator, variable (including global and local variable) - RC_HCOM, // Output of gradient aggregation, RC value should be set to 0 - RC_L2LOSS, // Parameter of L2 loss operator, RC value should be set to 0 - RC_INPUTOUTPUT, // Input and output tensor of operator, RC value is returned by FE calculation - RC_WEIGHTS, // The weight, fp16, RC value used by FP/BP operator should be set to 1 or the actual access numbers - RC_DW, // The gradient data DW and RC value output by BP operator - // should be set to 1 or the actual access numbers - RC_ARGS // Args of FlowTable, actual access numbers -}; - -enum MemType { INPUT_TENSOR, OUTPUT_TENSOR, WEIGHT, WORKSPACE }; - -// Memory usage information < node, type, number > -struct NodeInfo { - string nodeName; - MemType memType; - size_t index; -}; - -// Memory block RC value -struct RCMemoryBlock { - RCType type; // RC type - size_t blockSize; // memory block size - size_t headOffset; // Start offset from base address - size_t tailOffset; // End offset from base address - uint32_t rcCount; // RC value - NodeInfo nodeInfo; // Input and output indexes of node objects to which RC belongs -}; - -// L2Cache optimizer -class GE_FUNC_VISIBILITY L2CacheOptimize { - public: - explicit L2CacheOptimize(ge::ComputeGraphPtr &graph); - ~L2CacheOptimize(); - - // Collect the information L2Cache Memory optimization - Status Gath(); - - private: - ge::ComputeGraphPtr graph_; - - // Save RC block information list - vector weightRCs; - vector opRCs; - - // Extract RC information generated by FE from compiled graph - void RetirveRCinfo(); - - // Take the maximum common divisor of RC values for the duplicate address - void Merge(vector &blocks); - - // The RC information is aligned with the 2m address - void Align(vector &blocks); - - // Weight of l2loss operator, output of gradient aggregation output, RC value set to 0 - void HandleOutputZeroRC(RCType type, ge::NodePtr node, vector &outputList, vector &blocks); - - // Processing operator input Tensor's RC - void HandOPInput(ge::NodePtr node, vector &inputList, vector &blocks); - - // Processing operator output Tensor's RC - void HandOPoutput(ge::NodePtr node, vector &outputList, vector &blocks); - - // maximum common divisor - uint32_t Measure(uint32_t x, uint32_t y) { - if (x == 0 || y == 0) return RC_VALUE_DEFAULT; - uint32_t z = y; - while (x % y != 0) { - z = x % y; - x = y; - y = z; - } - return z; - } - - bool Contain(const RCMemoryBlock &l_block, const RCMemoryBlock &r_block); - bool Cross(const RCMemoryBlock &l_block, const RCMemoryBlock &r_block); - bool Connect(const RCMemoryBlock &l_block, const RCMemoryBlock &r_block); -}; -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_L2_CACHE_OPTIMIZE_H_ \ No newline at end of file diff --git a/inc/graphengine/inc/framework/common/op/attr_value_util.h b/inc/graphengine/inc/framework/common/op/attr_value_util.h deleted file mode 100644 index 28d48c1d1..000000000 --- a/inc/graphengine/inc/framework/common/op/attr_value_util.h +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_OP_ATTR_VALUE_UTIL_H_ -#define INC_FRAMEWORK_COMMON_OP_ATTR_VALUE_UTIL_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include -#include -#include - -#include "graph/debug/ge_attr_define.h" -#include "proto/om.pb.h" - -using domi::AttrDef; -using domi::AttrDef_ListValue; -using domi::ModelDef; -using domi::NamedAttrs; -using domi::OpDef; - -namespace ge { -using AttrDefMap = ::google::protobuf::Map<::std::string, ::domi::AttrDef>; -using AttrDefPair = ::google::protobuf::MapPair; - -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, AttrDef &attr, OpDef *opdef); -// DEFINE_ADD_ATTR_VALUE -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const std::string &value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const char *value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const char *key, const char *value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const uint32_t value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const int32_t value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const int64_t value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const float value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const double value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const bool value, AttrDefMap *attrs); - -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const AttrDef_ListValue &value, AttrDefMap *attrs); - -// DEFINE_ADD_ATTR_VALUE -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const std::string &value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const char *value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const char *key, const char *value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const uint32_t value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const int32_t value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const int64_t value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const float value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const double value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const bool value, OpDef *opdef); - -GE_FUNC_VISIBILITY void AddOpAttr(const std::string &key, const AttrDef_ListValue &value, OpDef *opdef); - -GE_FUNC_VISIBILITY void AddOpBytesAttr(const std::string &key, const void *value, size_t size, OpDef *opdef); - -// DEFINE_ADD_ATTR_VALUE_LIST -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const double value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const float value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const uint32_t value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const int32_t value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const std::string value, AttrDefMap *attrs); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const double value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const float value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const uint32_t value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const int32_t value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const bool value, OpDef *opdef); -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const int64_t value, OpDef *opdef); - -GE_FUNC_VISIBILITY void AddOpAttrList(const std::string &key, const std::string &value, OpDef *opdef); - -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, std::string *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, int32_t *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, int64_t *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, uint32_t *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, float *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, double *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, bool *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetOpAttr(const std::string &key, AttrDef_ListValue *value, const OpDef *opdef); - -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, std::string value, const OpDef *opdef); -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, int32_t value, const OpDef *opdef); -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, int64_t value, const OpDef *opdef); -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, uint32_t value, const OpDef *opdef); -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, float value, const OpDef *opdef); -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, double value, const OpDef *opdef); -GE_FUNC_VISIBILITY uint32_t GetOpAttrListSize(const std::string &key, bool value, const OpDef *opdef); - -GE_FUNC_VISIBILITY bool GetBytesAttr(const std::string &key, std::string *value, const OpDef *opdef); -GE_FUNC_VISIBILITY bool GetBytesAttr(const std::string &key, std::string *value, const ModelDef *model_def); - -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const std::string &value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const char *value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const char *key, const char *value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const uint32_t value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const int32_t value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const int64_t value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const float value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const double value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const bool value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const void *value, size_t size, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttr(const std::string &key, const AttrDef_ListValue &value, ModelDef *model_def); - -GE_FUNC_VISIBILITY void AddModelAttrList(const std::string &key, const double value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttrList(const std::string &key, const float value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttrList(const std::string &key, const uint32_t value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttrList(const std::string &key, const int32_t value, ModelDef *model_def); -GE_FUNC_VISIBILITY void AddModelAttrList(const std::string &key, const std::string &value, ModelDef *model_def); - -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, std::string *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, int32_t *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, int64_t *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, uint32_t *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, float *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, double *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, bool *value, const ModelDef *model_def); -GE_FUNC_VISIBILITY bool GetModelAttr(const std::string &key, AttrDef_ListValue *value, const ModelDef *model_def); - -GE_FUNC_VISIBILITY bool HasOpAttr(const OpDef *opdef, const std::string &attr_name); - -GE_FUNC_VISIBILITY void SetAttrDef(const std::string &value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrDef(const char *value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrDef(const uint32_t value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrDef(const int32_t value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrDef(const float value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrDef(const double value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrDef(const bool value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrList(const std::string &value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrList(const bool value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrList(const float value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrList(const double value, AttrDef *out); -GE_FUNC_VISIBILITY void SetAttrList(const uint32_t value, AttrDef *out); - -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, std::string *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, int32_t *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, int64_t *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, uint32_t *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, float *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, double *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, bool *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, AttrDef_ListValue *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, NamedAttrs *&value, AttrDefMap *attr); -GE_FUNC_VISIBILITY bool GetAttrDefValue(const std::string &key, const NamedAttrs *&value, const AttrDefMap &attr); - -GE_FUNC_VISIBILITY bool GetAttrDefListValue(const std::string &key, int idx, int32_t *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefListValue(const std::string &key, int idx, uint32_t *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefListValue(const std::string &key, int idx, float *value, const AttrDefMap &attr); -GE_FUNC_VISIBILITY bool GetAttrDefListValue(const std::string &key, int idx, double *value, const AttrDefMap &attr); -} - -#endif // INC_FRAMEWORK_COMMON_OP_ATTR_VALUE_UTIL_H_ diff --git a/inc/graphengine/inc/framework/common/op/ge_op_utils.h b/inc/graphengine/inc/framework/common/op/ge_op_utils.h deleted file mode 100644 index bc965d13d..000000000 --- a/inc/graphengine/inc/framework/common/op/ge_op_utils.h +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_OP_GE_OP_UTILS_H_ -#define INC_FRAMEWORK_COMMON_OP_GE_OP_UTILS_H_ - -#include -#include - -#include "common/op/attr_value_util.h" -#include "register/register_types.h" -#include "register/register_error_codes.h" -#include "common/util.h" -#include "graph/attr_value.h" -#include "graph/ge_tensor.h" -#include "graph/node.h" -#include "graph/op_desc.h" -#include "proto/insert_op.pb.h" - -namespace ge { -using domi::Status; - -// Add Sub Mul -GE_FUNC_VISIBILITY extern const uint32_t ADD_INPUT_NUM; -GE_FUNC_VISIBILITY extern const uint32_t SUB_INPUT_NUM; -GE_FUNC_VISIBILITY extern const uint32_t MUL_INPUT_NUM; - -// Permute -GE_FUNC_VISIBILITY extern const int32_t PERMUTE_ORDER_NUM; - -// Ssd PriroBox -GE_FUNC_VISIBILITY extern const double SSD_PRIORBOX_ASPECT_RATIO_VALUE; - -GE_FUNC_VISIBILITY extern const uint32_t STRIDEDSLICE_INPUT_NUM; - -// Switch -GE_FUNC_VISIBILITY extern const uint32_t SWITCH_INPUT_NUM; -GE_FUNC_VISIBILITY extern const uint32_t SWITCH_OUTPUT_NUM; -GE_FUNC_VISIBILITY extern const uint32_t SWITCH_FALSE_OUTPUT; -GE_FUNC_VISIBILITY extern const uint32_t SWITCH_TRUE_OUTPUT; -GE_FUNC_VISIBILITY extern const uint32_t SWITCH_DATA_INPUT; -GE_FUNC_VISIBILITY extern const uint32_t SWITCH_PRED_INPUT; - -// Merge -GE_FUNC_VISIBILITY extern const uint32_t MERGE_DATA_OUTPUT; -GE_FUNC_VISIBILITY extern const uint32_t MERGE_INDEX_OUTPUT; - -// FunctionOp -GE_FUNC_VISIBILITY extern const uint32_t IF_COND_INPUT; -GE_FUNC_VISIBILITY extern const uint32_t FOR_START_INPUT; -GE_FUNC_VISIBILITY extern const uint32_t FOR_LIMIT_INPUT; -GE_FUNC_VISIBILITY extern const uint32_t FOR_DELTA_INPUT; -GE_FUNC_VISIBILITY extern const uint32_t FOR_DATA_INPUT; - -GE_FUNC_VISIBILITY extern const int NORMAL_TENSOR_SIZE; - -class GE_FUNC_VISIBILITY OpUtils { - public: - /// - /// @ingroup domi_ome - /// @brief Check whether check_value is in [min_enum_value, max_enum_value] - /// @return true Within - /// @return false out of range - // - static inline bool CheckEnumValid(int32_t check_value, int32_t min_enum_value, int32_t max_enum_value) { - return check_value < min_enum_value ? false : (check_value >= max_enum_value ? false : true); - } - - /// - /// @ingroup domi_omg - /// @brief Determine whether to manually calculate the tensor size based on the values of format and dim - /// @param [in] format, Format information of the tensor - /// @param [in] real_dim_cnt, Tensor dim - /// @return true Manually calculate the size based on dim and datatype - /// @return false skip - /// - static bool IsComputDimsSize(const int32_t format, const uint32_t real_dim_cnt); - - /// - /// @brief Extract AIPP parameters from AttrDefMap and splice them - /// @param [in] aipp_attr attr of operator - /// @param [out] aipp_params aipp parameters - /// @return enum of tagCCAippInputFormat - /// - static Status ConvertAippParams(const GeAttrValue::NamedAttrs &aipp_attr, domi::AippOpParams *aipp_params); - static Status TransferDim(const std::vector &dim, std::vector &dim_vector); - template - static void SliceData(const std::vector &input, int64_t chunk_size, std::vector &output, - int64_t begin, int64_t out_dim, int64_t stride); - template - static Status SetDataByDataType(size_t out_size, const std::vector &chunk_input, - const std::vector &chunk_output, GeTensor *output); - template - static Status SetOutputSliceDataByDataType(void *data, int64_t data_size, const std::vector &input_dims, - const std::vector &begin, const std::vector &output_dims, - ge::GeTensor *output, const std::vector &stride); - static Status SetOutputSliceData(void *data, int64_t data_size, int32_t data_type, std::vector &input_dims, - std::vector &begin, std::vector &output_dims, ge::GeTensor *output, - std::vector &stride); - - /// - /// @ingroup domi_omg - /// @brief Convert the convolutional weight data from [h, w, c, k] to [k, c, h, w] - /// @param [in] input Weight data in HWCK format - /// @param [in] H value of H dimension - /// @param [in] W value of W dimension - /// @param [in] C value of C dimension - /// @param [in] K value of K dimension - /// @param [out] output Data pointer after conversion. The format is KCHW. - /// - static void TransDataHWCK2KCHW(const void *input, int64_t H, int64_t W, int64_t C, int64_t K, void **output); - /// - /// @ingroup domi_omg - /// @brief Converts the convolutional weight data from [k, c, h, w] to [h, w, c, k]. - /// @param [in] input Weight data in HWCK format - /// @param [in] K value of K dimension - /// @param [in] C value of C dimension - /// @param [in] H value of H dimension - /// @param [in] W value of W dimension - /// @param [out] output Data pointer after conversion. The format is HWCK - /// - static void TransDataKCHW2HWCK(const void *input, int64_t K, int64_t C, int64_t H, int64_t W, void *output); - - static vector GetWeights(const ge::Node &node); - static vector GetWeights(ge::ConstNodePtr node); - static vector MutableWeights(const ge::Node &node); - static vector MutableWeights(const ge::NodePtr node); - static Status SetWeights(ge::Node &node, const vector &weights); - static Status SetWeights(ge::NodePtr node, const vector &weights); - static Status GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType type, std::vector &dims); - - private: - static uint32_t GetRealDimCnt(const GeTensorDesc &tensor_desc); -}; -} // namespace ge -#endif // INC_FRAMEWORK_COMMON_OP_GE_OP_UTILS_H_ diff --git a/inc/graphengine/inc/framework/common/op/op_parser_util.h b/inc/graphengine/inc/framework/common/op/op_parser_util.h deleted file mode 100644 index 43254ca95..000000000 --- a/inc/graphengine/inc/framework/common/op/op_parser_util.h +++ /dev/null @@ -1,419 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_OP_OP_PARSER_UTIL_H_ -#define INC_FRAMEWORK_COMMON_OP_OP_PARSER_UTIL_H_ - -#include -#include -#include - -namespace ge { -// general -const float DEFAULT_ALPHA_VALUE = 1.0; -const float DEFAULT_BETA_VALUE = 0.0; -const uint32_t NORMAL_INPUT_NUM = 1; -const uint32_t NORMAL_OUTPUT_NUM = 1; -const uint32_t NORMAL_WORKSPACE_NUM = 0; -const int32_t NORMAL_1D_DIM_NUM = 1; -const int32_t NORMAL_SCALE_DIM_NUM = 0; -const int NORMAL_TENSOR_SIZE = 4; -const uint32_t DEFAULT_REAL_DIM_CNT = 4; - -// const -const uint32_t CONST_OP_INPUT_NUM = 0; -const uint32_t CONST_OP_NORMAL_WEIGHT_SIZE = 1; - -// MatMul -const uint32_t MATMUL_INPUT_NUM = 2; - -// ActivationGrad -const int32_t ACTIVATIONGRAD_INPUT_NUM = 2; - -// FusedBatchNorm -const int32_t FUSED_BATCH_NORM_WORKSPACE_NUM = 1; -const int32_t FUSED_BATCH_NORM_INPUT_NUM = 5; -const int32_t FUSED_BATCH_NORM_OUTPUT_NUM = 5; -// FusedBatchNormGrad -const int32_t FUSEDBATCHNORMGRAD_WORKSPACE_NUM = 1; -const int32_t FUSEDBATCHNORMGRAD_INPUT_NUM = 5; -const int32_t FUSEDBATCHNORMGRAD_OUTPUT_NUM = 3; - -// conv -const uint32_t CONVOLUTION_WORKSPACE_NUM = 1; -const uint32_t CONVOLUTION_PAD_SIZE = 4; -const uint32_t CONVOLUTION_STRIDE_SIZE = 2; -const uint32_t CONVOLUTION_DILATION_SIZE = 2; -const int32_t CONVOLUTION_ADJ_SIZE = 2; -const int32_t CONVOLUTION_TARGET_SHAPE_SIZE = 2; - -// ConvGradFilter -const uint32_t CONVGRADFILTER_WORKSPACE_NUM = 1; -const uint32_t CONVGRADFILTER_INPUT_NUM = 3; - -// Pooling -const uint32_t POOLING_WINDOW_SIZE = 2; -const uint32_t POOLING_STRIDE_SIZE = 2; -const uint32_t POOLING_PAD_SIZE = 4; - -// Add Sub Mul -const uint32_t ADD_INPUT_NUM = 2; -const uint32_t SUB_INPUT_NUM = 2; -const uint32_t MUL_INPUT_NUM = 2; -const uint32_t DIV_INPUT_NUM = 2; -const uint32_t ADD_WORKSPACE_NUM = 1; -const uint32_t SUB_WORKSPACE_NUM = 1; -const uint32_t MUL_WORKSPACE_NUM = 1; -const uint32_t DIV_WORKSPACE_NUM = 1; - -const int32_t DEFAULT_AXIS_VALUE = -1; - -const int32_t RESHAPE_AXIS_DEFAULT_VALUE = 0; -const int32_t RESHAPE_NUM_AXES_DEFAULT_VALUE = -1; -const uint32_t RESHAPE_WORKSPACE_NUM = 1; - -const uint32_t FLATTEN_WORKSPACE_NUM = 1; - -const int32_t CONCAT_MIN_INPUT_SIZE = 1; -const int32_t CONCAT_DEFAULT_AXIS = 1; -const uint32_t CONCAT_WORKSPACE_NUM = 1; - -// The value for LRN parameters -const uint32_t LRN_DEFAULT_NORM_REGION = 0; -const float LRN_DEFAULT_K = 1.0; -const uint32_t LRN_DEFAULT_LOCAL_SIZE = 5; -const float LRN_DEFAULT_ALPHA = 1.0; -const float LRN_DEFAULT_BETA = 0.75; - -/// -/// @ingroup domi_common -/// @brief roipooling default value -/// -const uint32_t ROIPOOLING_DEFAULT_POOLED_H = 0; -const uint32_t ROIPOOLING_DEFAULT_POOLED_W = 0; -const float ROIPOOLING_DEFAULT_SPATIAL_SCALE = 1; -const int32_t ROIPOOLING_DEFAULT_SAMPLING_RATIO = -1; - -// DetectionOutput -const int32_t DETECTIONOUTPUT_INPUT_SIZE = 3; -const int32_t DETECTIONOUTPUT_OUTPUT_SIZE = 2; -const int32_t DETECTIONOUTPUT_WORKSPACE_NUM = 1; -const int DETECTIONOUTPUT_CLASS_NUM = 20; // Number of background categories -const int DETECTIONOUTPUT_NUM_CLASSES_DEFAULT_VALUE = 21; -const float DETECTIONOUTPUT_NMS_THRESHOLD_DEFAULT_VALUE = 0.3; -const float DETECTIONOUTPUT_CONFIDENCE_THRESHOLD_DEFAULT_VALUE = 0.8; - -// Proposal -const int32_t PROPOSAL_INPUT_SIZE = 3; -const int32_t PROPOSAL_OUTPUT_MAX_SIZE = 2; -const int32_t PROPOSAL_WORKSPACE_NUM = 1; -const float PROPOSAL_BASE_SIZE_DEFAULT_VALUE = 16; -const float PROPOSAL_RATIO_DIM_0_DEFAULT_VALUE = 0.5; -const float PROPOSAL_RATIO_DIM_1_DEFAULT_VALUE = 1; -const float PROPOSAL_RATIO_DIM_2_DEFAULT_VALUE = 2; -const float PROPOSAL_SCALE_DIM_0_DEFAULT_VALUE = 8; -const float PROPOSAL_SCALE_DIM_1_DEFAULT_VALUE = 16; -const float PROPOSAL_SCALE_DIM_2_DEFAULT_VALUE = 32; -const float PROPOSAL_MIN_SIZE_DEFAULT_VALUE = 16; -const int PROPOSAL_PRE_NMS_TOPN_DEFAULT_VALUE = 6000; -const int PROPOSAL_POST_NMS_TOPN_DEFAULT_VALUE = 304; -const float PROPOSAL_NMS_THRESH_DEFAULT_VALUE = 0.7; -const float PROPOSAL_FILTER_THRESH_DEFAULT_VALUE = 0; - -// TVM OP -const uint32_t DEFAULT_KERNEL_BLOCK_DIM = 1; - -// Softmax -const int32_t SOFTMAX_WORKSPACE_NUM = 1; - -// SoftmaxCrossEntropy -const int32_t SOFTMAXCROSSENTROPY_INPUT_NUM = 2; -const int32_t SOFTMAXCROSSENTROPY_OUTPUT_NUM = 2; - -// Permute -const int32_t PERMUTE_INPUT_NUM = 1; -const int32_t PERMUTE_OUTPUT_NUM = 1; -const int32_t PERMUTE_WORKSPACE_NUM = 1; -const int32_t PERMUTE_ORDER_NUM = 4; - -// Ssd normalize -const int SSD_NORMALIZE_INPUT_SIZE = 1; -const float SSD_NORMALIZE_EPS_DEFAULT_VALUE = 2e-7; - -// SsdPriroBox -const int32_t SSD_PRIOR_BOX_WORKSPACE_NUM = 1; -const int32_t SSD_PRIOR_BOX_INPUT_NUM = 2; -const bool SSD_PRIOR_BOX_FLIP_VALUE = true; -const bool SSD_PRIOR_BOX_CLIP_VALUE = false; -const double SSD_PRIOR_BOX_ASPECT_OFFSET_VALUE = 0.5; -const double SSD_PRIORBOX_VARIANCE_VALUE = 0.1; -const double SSD_PRIORBOX_VARIANCE_SIZE_ONE = 1; -const double SSD_PRIORBOX_VARIANCE_SIZE_FOUR = 4; -const double SSD_PRIORBOX_ASPECT_RATIO_VALUE = 1.0; -const int SSD_PRIOR_BOX_CODETYPE_CORNER_VALUE = 1; -const int SSD_PRIOR_BOX_CODETYPE_CENTER_SIZE_VALUE = 2; -const int SSD_PRIOR_BOX_CODETYPE_CORNER_SIZE_VALUE = 3; - -// Ssd DetectionOutput -const int32_t SSD_DETECTIONOUTPUT_INPUT_SIZE = 3; -const int32_t SSD_DETECTIONOUTPUT_INPUT_SIZE_AFTER_FUSION = 2; -const int32_t SSD_DETECTIONOUTPUT_OUTPUT_SIZE = 2; -const int32_t SSD_DETECTIONOUTPUT_OUTPUT_SIZE_AFTER_FUSION = 3; -const int32_t SSD_DETECTIONOUTPUT_WORKSPACE_NUM = 1; -const int32_t SSD_DETECTIONOUTPUT_WORKSPACE_NUM_AFTER_FUSION = 0; -const bool SSD_DETECTIONOUTPUT_SHARED_LOCATION_DEFAULT_VALUE = true; -const int32_t SSD_DETECTIONOUTPUT_BACKGROUND_LABEL_ID_DEFAULT_VALUE = 0; -const float SSD_DETECTIONOUTPUT_NMS_THRESHOLD_DEFAULT_VALUE = 0.3; -const int32_t SSD_DETECTIONOUTPUT_TOP_K_DEFAULT_VALUE = 200; -const float SSD_DETECTIONOUTPUT_ETA_DEFAULT_VALUE = 1.0; -const int32_t SSD_DETECTIONOUTPUT_KEEP_TOP_K_DEFAULT_VALUE = 200; -const bool SSD_DETECTIONOUTPUT_VARIANCE_ENCODED_IN_TARGET_DEFAULT_VALUE = false; -const float SSD_DETECTIONOUTPUT_CONFIDENCE_THRESHOLD_DEFAULT_VALUE = 0.1; - -// Refinedet DetectionOutput -const int32_t REFINEDET_DETECTIONOUTPUT_INPUT_SIZE = 5; -const int32_t REFINEDET_DETECTIONOUTPUT_INPUT_SIZE_AFTER_FUSION = 2; -const int32_t REFINEDET_DETECTIONOUTPUT_OUTPUT_SIZE = 2; -const int32_t REFINEDET_DETECTIONOUTPUT_OUTPUT_SIZE_AFTER_FUSION = 3; -const int32_t REFINEDET_DETECTIONOUTPUT_WORKSPACE_NUM = 1; -const bool REFINEDET_DETECTIONOUTPUT_SHARED_LOCATION_DEFAULT_VALUE = true; -const int32_t REFINEDET_DETECTIONOUTPUT_BACKGROUND_LABEL_ID_DEFAULT_VALUE = 0; -const float REFINEDET_DETECTIONOUTPUT_NMS_THRESHOLD_DEFAULT_VALUE = 0.3; -const int32_t REFINEDET_DETECTIONOUTPUT_TOP_K_DEFAULT_VALUE = 200; -const float REFINEDET_DETECTIONOUTPUT_ETA_DEFAULT_VALUE = 1.0; -const bool REFINEDET_DETECTIONOUTPUT_VARIANCE_ENCODED_IN_TARGET_DEFAULT_VALUE = false; -const int32_t REFINEDET_DETECTIONOUTPUT_KEEP_TOP_K_DEFAULT_VALUE = 200; -const float REFINEDET_DETECTIONOUTPUT_CONFIDENCE_THRESHOLD_DEFAULT_VALUE = 0.1; -const float REFINEDET_DETECTIONOUTPUT_OBJECTNESS_SCORE_DEFAULT_VALUE = 0; - -// Channel axpy -const int32_t CHANNEL_AXPY_INPUT_NUM = 3; -const int32_t CHANNEL_AXPY_INPUT_DIM_SIZE = 4; -const int32_t CHANNEL_AXPY_WORKSPACE_NUM = 1; - -// Psroi pooling -const int PSROI_POOLING_INPUT_COUNT = 2; -const int PSROI_POOLING_WORKSPACE_NUM = 1; - -// MaxPoolWithArgmax -const uint32_t MAX_POOL_WITH_ARGMAX_OUTPUT_NUM = 2; -const uint32_t MAX_POOL_GRAD_WITH_ARGMAX_INPUT_NUM = 3; - -// AvgPoolGrad -const uint32_t AVG_POOL_GRAD_INPUT_NUM = 2; - -// ROIAlign -const int32_t ROIALIGN_INPUT_SIZE = 2; -const int32_t ROIALIGN_WORKSPACE_NUM = 1; -const int32_t ROIALIGN_DEFAULT_POOLED_H = 1; -const int32_t ROIALIGN_DEFAULT_POOLED_W = 1; - -// Correlation -const uint32_t CORRELATION_INPUT_NUM = 2; -const int CORRELATION_WORKSPACE_NUM = 1; - -// Detectionpostprocess -const int32_t POSTPROCESS_INPUT_SIZE = 4; -const int32_t POSTPROCESS_OUTPUT_SIZE = 2; -const int32_t POSTPROCESS_WORKSPACE_NUM = 1; -const uint32_t POSTPROCESS_CLS_NUM_DEFAULT_VALUE = 12; -const uint32_t POSTPROCESS_POST_NMS_TOPN_DEFAULT_VALUE = 100; -const float POSTPROCESS_NMS_THRESH_DEFAULT_VALUE = 0.3; -const float POSTPROCESS_CONF_THRESH_DEFAULT_VALUE = 0.5; -const float POSTPROCESS_BBOX_REG_WEIGHT_DIM_DEFAULT_VALUE = 1.0; -const int32_t POSTPROCESS_BBOX_REG_WEIGHT_SIZE_DEFAULT_VALUE = 4; - -// Split -const int32_t SPLIT_INPUT_NUM = 2; -const int32_t SPLIT_DEFAULT_AXIS_VALUE = 1; -const int32_t SPLIT_MIN_OUTPUT_SIZE = 1; - -const uint32_t STRIDEDSLICE_INPUT_NUM = 4; -// Slice -const int32_t SLICE_INPUT_NUM = 3; -const int32_t SLICE_WEIGHT_NUM = 2; - -// GatherNd -const int32_t GATHERND_INPUT_NUM = 2; -// ArgMax -const int32_t ARGMAX_INPUT_NUM = 2; -const int32_t ARGMAX_REAL_INPUT_NUM = 1; - -// HighWay -const int32_t HIGHWAY_INPUT_NUM = 4; -const int32_t HIGHWAY_WORKSPACE_NUM = 1; -// RealDiv -const int32_t REALDIV_INPUT_NUM = 2; - -// Range -const int32_t RANGE_INPUT_NUM = 3; -const int32_t RANGE_OUTPUT_NUM = 1; -const int32_t RANGE_INPUT_DIM_SIZE = 0; - -// Pad -const int32_t PAD_WEIGHT_NUM = 1; -const int32_t PAD_DIM_SIZE = 2; -const int32_t PAD_DIM0 = 4; -const int32_t PAD_DIM1 = 2; -const int32_t PAD_WEIGHT_WITH_CONSTANT_NUM = 2; -const int32_t PAD_CONSTATNT_DEFAULT_VALUE = 0; -const int32_t PAD_PADDINGS_SIZE = 8; - -// Tile -const int32_t TILE_WEIGHT_NUM = 1; -const int32_t TILE_MULTIPLES_DIM_SIZE = 1; - -// DecodeBbox -const int32_t DECODE_BBOX_INPUT_NUM = 2; - -// GenerateRpnProposals -const int32_t GENERATE_RPN_PROPOSAL_INPUT_SIZE = 2; -const int32_t GENERATE_RPN_PROPOSAL_OUTPUT_SIZE = 3; - -// Decode_BBox -const int32_t DECODE_BBOX_INPUT_SIZE = 2; -const int32_t DEFAULT_DECODE_CLIP_VALUE = 0; - -// FastRcnnPredictions -const int32_t FASTRCNN_PREDICTIONS_INPUT_SIZE = 2; -const int32_t FASTRCNN_PREDICTIONS_OUTPUT_SIZE = 4; - -const int32_t CLIP_BOXES_INPUT_NUM = 1; -const int32_t CLIP_BOXES_WEIGHT_SIZE = 1; -const int32_t CLIP_BOXES_WEIGHT_ITEM_SIZE = 2; -const int32_t CLIP_BOXES_OUTPUT_NUM = 1; - -const int32_t FLOORDIV_INPUT_NUM = 2; -// Mean -const int32_t MEAN_WEIGHT_SIZE = 1; -const int32_t MEAN_WEIGHT_DIM_SIZE = 1; -const int32_t MEAN_WEIGHT_DIM = 2; -const int32_t MEAN_FIRST_AXIS = 2; -const int32_t MEAN_SECOND_AXIS = 3; -const int32_t MEAN_STRIDE_PLACE_HOLD = 1; -// Switch -const uint32_t SWITCH_INPUT_NUM = 2; -const uint32_t SWITCH_OUTPUT_NUM = 2; -// Merge -const uint32_t MERGE_INPUT_NUM = 2; -// Greater -const uint32_t GREATER_OUTPUT_NUM = 1; -const uint32_t GREATER_INPUT_NUM = 0; -const uint32_t GREATER_WEIGHT_NUM = 2; - -// Yolo region -const uint32_t YOLO_REGION_OUTPUT_NUM = 3; -const uint32_t YOLO_REGION_WORKSPACE_NUM = 1; -const uint32_t YOLO_REGION_COORDS = 4; -const uint32_t YOLO_REGION_CLASSES = 20; -const uint32_t YOLO_REGION_BOXES = 1; -const bool YOLO_REGION_BACKGROUND = false; -const bool YOLO_REGION_SOFTMAX = false; -const bool YOLO_REGION_SOFTMAX_TREE = false; - -// Yolo detectionoutput -const uint32_t YOLO_DETECTIONOUTPUT_INPUT_SIZE = 4; -const uint32_t YOLO_DETECTIONOUTPUT_OUTPUT_SIZE = 2; -const uint32_t YOLO_DETECTION_OUTPUT_WORKSPACE_NUM = 1; -const uint32_t YOLO_DETECTION_OUTPUT_CLASSES = 20; -const uint32_t YOLO_DETECTION_OUTPUT_BOXES_V2 = 5; -const uint32_t YOLO_DETECTION_OUTPUT_BOXES_V3 = 3; -const bool YOLO_DETECTION_OUTPUT_RELATIVE = true; -const float YOLO_DETECTION_OUTPUT_OBJECTNESS_THRESHOLD = 0.5; -const float YOLO_DETECTION_OUTPUT_CLASS_THRESHOLD = 0.5; -const uint32_t YOLO_DETECTION_OUTPUT_POST_TOP_K = UINT_MAX; -const float YOLO_DETECTION_OUTPUT_NMS_THRESHOLD = 0; -const float YOLO_DETECTION_OUTPUT_IOU_THRESHOLD_DECAY = 1.0; -const float YOLO_DETECTION_OUTPUT_COOR_SCALE_FACTOR = 1.0; - -// Reorg -const int32_t REORG_DEFAULT_STRIDE = 2; -const uint32_t REORG_INPUT_COUNT = 1; -// Reshape -const int32_t RESHAPE_INPUT_NUM = 2; -// Maximum -const int32_t MAXIMUM_INPUT_NUM = 2; - -// Spatialtf -const int32_t SPATIALTF_WORKSPACE_NUM = 1; - -const int32_t REVERSE_DEFAULT_AXIS = 1; -// Crop -const int32_t CROP_AXIS = 2; -const int32_t CROP_INPUT_NUM = 2; - -// ConvGradInput -const uint32_t CONVGRADINPUT_WORKSPACE_NUM = 1; -const uint32_t CONVGRADINPUT_INPUT_NUM = 3; - -// RNN -const uint32_t RNN_WORKSPACE_NUM = 1; - -// Cropandresize -const int32_t CROPANDRESIZE_WEIGHT_NUM = 1; -const int32_t CROPANDRESIZE_CROP_DIM_SIZE = 1; -const int32_t CROP_DIM0 = 2; - -// Attention decoder weight index -const uint32_t ATTENTION_DECODER_WEIGHT_ATTENW0 = 0; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTENTION0_KERNEL = 1; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTNOUTPUTPROJECTION_KERNEL = 2; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTENTION_DECODER_KERNEL = 3; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL0_GATES_KERNEL = 4; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL0_CANDIDATE_KERNEL = 5; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL1_GATES_KERNEL = 6; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL1_CANDIDATE_KERNEL = 7; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTENTION0_BIAS = 8; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTNOUTPUTPROJECTION_BIAS = 9; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTENTION_DECODER_BIAS = 10; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL0_GATES_BIAS = 11; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL0_CANDIDATE_BIAS = 12; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL1_GATES_BIAS = 13; -const uint32_t ATTENTION_DECODER_WEIGHT_CELL1_CANDIDATE_BIAS = 14; -const uint32_t ATTENTION_DECODER_WEIGHT_EMBEDDING = 15; -const uint32_t ATTENTION_DECODER_WEIGHT_ATTENVA = 16; -const uint32_t ATTENTION_DECODER_WEIGHT_DECODER_INITIAL = 17; -// Attention decoder weight size -const uint32_t ATTENTION_DECODER_WEIGHT_SIZE = 18; - -const uint32_t ATTENTION_DECODER_INPUT_SIZE = 2; -const uint32_t ATTENTION_DECODER_WORKSPACE_NUM = 1; -const uint32_t ATTENTION_DECODER_INPUT_DECODER_INPUTS = 0; -const uint32_t ATTENTION_DECODER_INPUT_DECODER_INITIAL_HIDDEN = 1; - -const int ATTENTION_DECODER_ALGO_NORMAL = 0; -const int ATTENTION_DECODER_SYMBOLS = 10000; -const int ATTENTION_DECODER_EMBEDDING_SIZE = 128; -const int ATTENTION_DECODER_ATTENTION_NUM_HIDDEN = 256; -const int ATTENTION_DECODER_DECODER_NUM_HIDDEN = 128; -const int ATTENTION_DECODER_DECODER_NUM_LAYERS = 2; -const int ATTENTION_DECODER_RNN_UNBIDIRECTIONAL = 0; -const int ATTENTION_DECODER_SEQLEN_VALUE = 57; -const int ATTENTION_DECODER_GRU = 3; - -// Logicaland -const int32_t LOGICAL_AND_INPUT_NUM = 2; -const int32_t EQUAL_INPUT_NUM = 2; - -static const int32_t OP_WEIGHT_MEM_BASE_OFFSET = 512; - -// MultiShape -const uint32_t MULTI_SHAPE_INPUT_NUM = 2; - -// Shufflechannel -const uint32_t SHUFFLECHANNEL_DEFAULT_GROUP = 1; -} // namespace ge -#endif // INC_FRAMEWORK_COMMON_OP_OP_PARSER_UTIL_H_ diff --git a/inc/graphengine/inc/framework/common/op_types.h b/inc/graphengine/inc/framework/common/op_types.h deleted file mode 100644 index fa41c1b6c..000000000 --- a/inc/graphengine/inc/framework/common/op_types.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_OP_TYPES_H_ -#define INC_FRAMEWORK_COMMON_OP_TYPES_H_ - -#include -#include - -namespace ge { -class GE_FUNC_VISIBILITY OpTypeContainer { - public: - static OpTypeContainer *Instance() { - static OpTypeContainer instance; - return &instance; - } - ~OpTypeContainer() = default; - - void Register(const std::string &op_type) { op_type_list_.insert(op_type); } - - bool IsExisting(const std::string &op_type) { - auto iter_find = op_type_list_.find(op_type); - return iter_find != op_type_list_.end(); - } - - protected: - OpTypeContainer() {} - - private: - std::set op_type_list_; -}; - -class GE_FUNC_VISIBILITY OpTypeRegistrar { - public: - explicit OpTypeRegistrar(const std::string &op_type) { OpTypeContainer::Instance()->Register(op_type); } - ~OpTypeRegistrar() {} -}; - -#define REGISTER_OPTYPE_DECLARE(var_name, str_name) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *var_name; - -#define REGISTER_OPTYPE_DEFINE(var_name, str_name) \ - const char *var_name = str_name; \ - const OpTypeRegistrar g_##var_name##_reg(str_name); - -#define IS_OPTYPE_EXISTING(str_name) (OpTypeContainer::Instance()->IsExisting(str_name)) -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_OP_TYPES_H_ diff --git a/inc/graphengine/inc/framework/common/profiling/ge_profiling.h b/inc/graphengine/inc/framework/common/profiling/ge_profiling.h deleted file mode 100644 index 7017aca32..000000000 --- a/inc/graphengine/inc/framework/common/profiling/ge_profiling.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_GE_PROFILING_H_ -#define INC_FRAMEWORK_COMMON_GE_PROFILING_H_ - -#include "ge/ge_api_error_codes.h" -#include "toolchain/prof_callback.h" - -const int MAX_DEV_NUM = 64; - -enum ProfCommandHandleType { - kProfCommandhandleInit = 0, - kProfCommandhandleStart, - kProfCommandhandleStop, - kProfCommandhandleFinalize, - kProfCommandhandleModelSubscribe, - kProfCommandhandleModelUnsubscribe -}; - -struct ProfCommandHandleData { - uint64_t profSwitch; - uint32_t devNums; // length of device id list - uint32_t devIdList[MAX_DEV_NUM]; - uint32_t modelId; -}; - -GE_FUNC_VISIBILITY ge::Status RegProfCtrlCallback(MsprofCtrlCallback func); -GE_FUNC_VISIBILITY ge::Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func); -GE_FUNC_VISIBILITY ge::Status RegProfReporterCallback(MsprofReporterCallback func); -GE_FUNC_VISIBILITY ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len); - -#endif // INC_FRAMEWORK_COMMON_GE_PROFILING_H_ diff --git a/inc/graphengine/inc/framework/common/profiling/ge_runner_profiling.h b/inc/graphengine/inc/framework/common/profiling/ge_runner_profiling.h deleted file mode 100644 index 011797a37..000000000 --- a/inc/graphengine/inc/framework/common/profiling/ge_runner_profiling.h +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_GE_RUNNER_PROFILING_H_ -#define INC_FRAMEWORK_COMMON_GE_RUNNER_PROFILING_H_ - -#include "profiling/ge_profiling.h" - -GE_FUNC_VISIBILITY bool IsInitialize(); - -#endif // INC_FRAMEWORK_COMMON_GE_RUNNER_PROFILING_H_ diff --git a/inc/graphengine/inc/framework/common/scope_guard.h b/inc/graphengine/inc/framework/common/scope_guard.h deleted file mode 100644 index ac66c6aa8..000000000 --- a/inc/graphengine/inc/framework/common/scope_guard.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_SCOPE_GUARD_H_ -#define INC_FRAMEWORK_COMMON_SCOPE_GUARD_H_ - -#include -#include - -/// Usage: -/// Acquire Resource 1 -/// MAKE_GUARD([&] { Release Resource 1 }) -/// Acquire Resource 2 -// MAKE_GUARD([&] { Release Resource 2 }) -#define GE_MAKE_GUARD(var, callback) ::ge::ScopeGuard make_guard_##var(callback) -#define GE_DISMISS_GUARD(var) make_guard_##var.Dismiss() - -namespace ge { -class GE_FUNC_VISIBILITY ScopeGuard { - public: - // Noncopyable - ScopeGuard(ScopeGuard const &) = delete; - ScopeGuard &operator=(ScopeGuard const &) = delete; - - explicit ScopeGuard(const std::function &on_exit_scope) : on_exit_scope_(on_exit_scope), dismissed_(false) {} - - ~ScopeGuard() { - if (!dismissed_) { - if (on_exit_scope_ != nullptr) { - try { - on_exit_scope_(); - } catch (std::bad_function_call &e) { } - catch (...) { } - } - } - } - - void Dismiss() { dismissed_ = true; } - - private: - std::function on_exit_scope_; - bool dismissed_; -}; -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_SCOPE_GUARD_H_ diff --git a/inc/graphengine/inc/framework/common/string_util.h b/inc/graphengine/inc/framework/common/string_util.h deleted file mode 100644 index c1216d90a..000000000 --- a/inc/graphengine/inc/framework/common/string_util.h +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_STRING_UTIL_H_ -#define INC_FRAMEWORK_COMMON_STRING_UTIL_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include -#include - -#include -#include -#include -#include -#include - -namespace ge { -class GE_FUNC_VISIBILITY StringUtils { - public: - static std::string &Ltrim(std::string &s) { -#if __cplusplus >= 201103L - (void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](const int32_t c) { return std::isspace(c) == 0; })); -#else - (void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), - std::not1(std::ptr_fun(std::isspace)))); -#endif - return s; - } - // lint -esym(551,*) - static std::string &Rtrim(std::string &s) { /*lint !e618*/ -#if __cplusplus >= 201103L - (void)s.erase(std::find_if(s.rbegin(), s.rend(), [](const int32_t c) { return std::isspace(c) == 0; }).base(), - s.end()); -#else - (void)s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), - s.end()); -#endif - return s; - } - // lint -esym(551,*) - /// - /// @ingroup domi_common - /// @brief delete spaces at the beginning and end of a string - /// @param [in] string to be trimmed - /// @return string after trim - /// - static std::string &Trim(std::string &s) { return Ltrim(Rtrim(s)); } - - /// - /// @ingroup domi_common - /// @brief string splitting - /// @param [in] str string to be trimmed - /// @param [in] delim separator - /// @return string array after segmentation - /// - static std::vector Split(const std::string &str, char delim) { - std::vector elems; - - if (str.empty()) { - elems.emplace_back(""); - return elems; - } - - std::stringstream ss(str); - std::string item; - - while (getline(ss, item, delim)) { - elems.push_back(item); - } - - auto str_size = str.size(); - if (str_size > 0 && str[str_size - 1] == delim) { - elems.emplace_back(""); - } - - return elems; - } - /// - /// @ingroup domi_common - /// @brief obtain the file name - /// @param [in] s path name - /// @return file name - /// - static std::string GetFileName(std::string &s) { - if (s.empty()) { - return ""; - } - std::vector files = StringUtils::Split(s, '/'); - - return files.empty() ? "" : files[files.size() - 1]; - } - /// - /// @ingroup domi_common - /// @brief full replacement - /// @link - /// @param [in] str str string to be replaced - /// @param [in] old_value old Characters Before Replacement - /// @param [in] new_value new Characters Before Replacement - /// @return string after replacement - /// - static std::string ReplaceAll(std::string str, const std::string &old_value, const std::string &new_value) { - std::string::size_type cur_pos = 0; - std::string::size_type old_length = old_value.length(); - std::string::size_type new_length = new_value.length(); - // cycle replace - for (; cur_pos != std::string::npos; cur_pos += new_length) { - if ((cur_pos = str.find(old_value, cur_pos)) != std::string::npos) { - (void)str.replace(cur_pos, old_length, new_value); - } else { - break; - } - } - return str; - } - - /// - /// @ingroup domi_common - /// @brief checks whether a character string starts with a character string (prefix) - /// @link - /// @param [in] str string to be compared - /// @param [in] str_x prefix - /// @return if the value is a prefix, true is returned. Otherwise, false is returned - /// - static bool StartWith(const std::string &str, const std::string str_x) { - return ((str.size() >= str_x.size()) && (str.compare(0, str_x.size(), str_x) == 0)); - } - - /// - /// @ingroup domi_common - /// @brief format string - /// @link - /// @param [in] format specifies the character string format - /// @param [in] ... format Filling Content - /// @return formatted string - /// - static std::string FormatString(const char *format, ...) { - const uint32_t MAX_BUFFER_LEN = 1024; // the stack memory plint check result must be less than 1024 - va_list args; - va_start(args, format); - char buffer[MAX_BUFFER_LEN] = {0}; - int32_t ret = vsnprintf_s(buffer, MAX_BUFFER_LEN, MAX_BUFFER_LEN - 1, format, args); - va_end(args); - return ret > 0 ? buffer : ""; - } -}; -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_STRING_UTIL_H_ diff --git a/inc/graphengine/inc/framework/common/taskdown_common.h b/inc/graphengine/inc/framework/common/taskdown_common.h deleted file mode 100644 index 090e7e260..000000000 --- a/inc/graphengine/inc/framework/common/taskdown_common.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_TASKDOWN_COMMON_H_ -#define INC_FRAMEWORK_COMMON_TASKDOWN_COMMON_H_ - -#include "runtime/rt.h" - -namespace ge { - -const int CC_FUSION_OP_MAX = 32; - -typedef enum tagCcStatus { - CC_STATUS_SUCCESS = 0, /**< succ */ - CC_STATUS_NOT_INITIALIZED = 1, /**< not init */ - CC_STATUS_ALLOC_FAILED = 2, /**< alloc mem failed */ - CC_STATUS_BAD_PARAM = 3, /**< para check failed */ - CC_STATUS_INTERNAL_ERROR = 4, /**< internal error */ - CC_STATUS_KERNEL_ERROR = 5, /**< kernel error */ - CC_STATUS_RUNTIME_ERROR = 6, /**< runtime error */ - CC_STATUS_NOT_SUPPORTED = 7, /**< unsupport error */ - CC_STATUS_INVALID_VALUE = 7, /**< invalid value error for blas*/ - CC_STATUS_RESERVED /**< just for check */ -} ccStatus_t; - -typedef enum tagccKernelType { - CCE_AI_CORE = 0, /* cce aicore */ - CCE_AI_CPU = 1, /* cce aicpu */ - TE = 2, /* te operator*/ - CUSTOMIZED = 3, /* customized operator */ - TE_AI_CORE = 4, /* te aicore operator*/ - TE_AI_CPU = 5, /* te aicpu operator */ - AI_CPU = 6, /* aicpu */ - CUST_AI_CPU = 7, /* custom aicpu*/ - INVALID = 8, /* unknown kernel type */ -} ccKernelType; - -typedef struct tagOpContext { - ccKernelType kernelType; - uint32_t opId; - uint32_t kernelFuncId; - uint32_t opIndex; - uint32_t opCount; - uint32_t opIndex2[CC_FUSION_OP_MAX]; - bool isFlowtable; - uint16_t *argsOffset; - uint32_t argsCount; - uint64_t genDataBaseAddr; - uint64_t genDataBaseSize; - uint64_t genWeightBaseAddr; - uint64_t genWeightBaseSize; - uint64_t genVariableBaseAddr; - uint64_t genVariableBaseSize; - uint64_t l2ctrlSize; -} ccOpContext; -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_TASKDOWN_COMMON_H_ diff --git a/inc/graphengine/inc/framework/common/types.h b/inc/graphengine/inc/framework/common/types.h deleted file mode 100644 index 91759b8f9..000000000 --- a/inc/graphengine/inc/framework/common/types.h +++ /dev/null @@ -1,1115 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_TYPES_H_ -#define INC_FRAMEWORK_COMMON_TYPES_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "framework/common/fmk_error_codes.h" -#include "framework/common/fmk_types.h" -#include "framework/common/op_types.h" -#include "register/register_types.h" - -#if !defined(__ANDROID__) && !defined(ANDROID) -#define DOMI_DYNAMIC_CAST static_cast -#define DOMI_DYNAMIC_POINTER_CAST std::static_pointer_cast -#else -#define DOMI_DYNAMIC_CAST static_cast -#define DOMI_DYNAMIC_POINTER_CAST std::static_pointer_cast -#endif - -namespace ge { -// dump -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMP_MODEL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMP_ALL_MODEL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMP_STATUS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMP_LAYER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMP_FILE_PATH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMP_MODE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OP_DEBUG_AICORE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OP_DEBUG_ATOMIC; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OP_DEBUG_ALL; - -// Supported public properties name -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROP_OME_START_TIME; // Start time -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROP_OME_DUMP_PATH; // Dump path -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROP_OME_LOG_PATH; // Log path - -// Profile-related constants -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CCE_PROFILE_ON; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CCE_PROFILE_OFF; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OME_PROFILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string CCE_PROFILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string RTS_PROFILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROFILER_JOBCTX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROFILER_TARGET_PATH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string RTS_PROFILE_PATH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROFILE_STOP_KEY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROFILE_STOP_VALUE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::map PROFILE_COMPONENT_MAP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROFILE_CONFIG; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string PROFILE_MODEL_ID; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string MODEL_ATTR_TASKS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string MODEL_ATTR_TASK_GEN_BASE_ADDR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string MODEL_ATTR_TASK_GEN_WEIGHT_ADDR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string MODEL_ATTR_FUSION_MODEL_DEF; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int MODEL_MAX_SIZE; // Max size of 2 GB minus 1 byte. -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint64_t FILE_HEADER_MAX_SIZE; // Max size of 3 GB. - -#if !defined(__ANDROID__) && !defined(ANDROID) -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint64_t ALLOC_MEMORY_MAX_SIZE; // Max size of 8 GB. -#else -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint64_t ALLOC_MEMORY_MAX_SIZE; // Max size of 512M. -#endif - -template -static std::pair flip_pair(const std::pair &p) { - return std::pair(p.second, p.first); -} - -template -static std::map flip_map(std::map src) { - std::map dst; - std::transform(src.begin(), src.end(), std::inserter(dst, dst.begin()), flip_pair); - return dst; -} - -REGISTER_OPTYPE_DECLARE(DATA, "Data"); -REGISTER_OPTYPE_DECLARE(AIPPDATA, "AippData"); -REGISTER_OPTYPE_DECLARE(CONVOLUTION, "Convolution"); -REGISTER_OPTYPE_DECLARE(CORRELATION, "Correlation"); -REGISTER_OPTYPE_DECLARE(CORRELATIONV2, "Correlation_V2"); -REGISTER_OPTYPE_DECLARE(DECONVOLUTION, "Deconvolution"); -REGISTER_OPTYPE_DECLARE(POOLING, "Pooling"); -REGISTER_OPTYPE_DECLARE(ELTWISE, "Eltwise"); -REGISTER_OPTYPE_DECLARE(RELU, "ReLU"); -REGISTER_OPTYPE_DECLARE(RELU6, "ReLU6"); -REGISTER_OPTYPE_DECLARE(SIGMOID, "Sigmoid"); -REGISTER_OPTYPE_DECLARE(ABSVAL, "AbsVal"); -REGISTER_OPTYPE_DECLARE(TANH, "TanH"); -REGISTER_OPTYPE_DECLARE(PRELU, "PReLU"); -REGISTER_OPTYPE_DECLARE(BATCHNORM, "BatchNorm"); -REGISTER_OPTYPE_DECLARE(FUSIONBATCHNORM, "FusionBatchNorm"); -REGISTER_OPTYPE_DECLARE(SCALE, "Scale"); -REGISTER_OPTYPE_DECLARE(FULL_CONNECTION, "FullConnection"); -REGISTER_OPTYPE_DECLARE(SOFTMAX, "Softmax"); -REGISTER_OPTYPE_DECLARE(PLUS, "Plus"); -REGISTER_OPTYPE_DECLARE(ACTIVATION, "Activation"); -REGISTER_OPTYPE_DECLARE(FLATTEN, "Flatten"); -REGISTER_OPTYPE_DECLARE(ADD, "Add"); -REGISTER_OPTYPE_DECLARE(SUB, "Sub"); -REGISTER_OPTYPE_DECLARE(MUL, "Mul"); -REGISTER_OPTYPE_DECLARE(MATMUL, "MatMul"); -REGISTER_OPTYPE_DECLARE(RSQRT, "Rsqrt"); -REGISTER_OPTYPE_DECLARE(BIASADD, "BiasAdd"); -REGISTER_OPTYPE_DECLARE(RESHAPE, "Reshape"); -REGISTER_OPTYPE_DECLARE(REFORMAT, "ReFormat"); -REGISTER_OPTYPE_DECLARE(DEPCONVOLUTION, "ConvolutionDepthwise"); -REGISTER_OPTYPE_DECLARE(DROPOUT, "Dropout"); -REGISTER_OPTYPE_DECLARE(DROPOUTDOMASK, "DropOutDoMask"); -REGISTER_OPTYPE_DECLARE(DROPOUTDOMASKV3, "DropOutDoMaskV3"); -REGISTER_OPTYPE_DECLARE(DROPOUTDOMASKV3D, "DropOutDoMaskV3D"); -REGISTER_OPTYPE_DECLARE(DROPOUTGENMASK, "DropOutGenMask"); -REGISTER_OPTYPE_DECLARE(CONCAT, "Concat"); -REGISTER_OPTYPE_DECLARE(ROIPOOLING, "ROIPooling"); -REGISTER_OPTYPE_DECLARE(PROPOSAL, "Proposal"); -REGISTER_OPTYPE_DECLARE(FSRDETECTIONOUTPUT, "FSRDetectionOutput"); -REGISTER_OPTYPE_DECLARE(DETECTIONPOSTPROCESS, "Detectpostprocess"); -REGISTER_OPTYPE_DECLARE(LRN, "LRN"); -REGISTER_OPTYPE_DECLARE(TRANSDATA, "TransData"); -REGISTER_OPTYPE_DECLARE(PERMUTE, "Permute"); -REGISTER_OPTYPE_DECLARE(SSDNORMALIZE, "SSDNormalize"); -REGISTER_OPTYPE_DECLARE(SSDPRIORBOX, "SSDPriorBox"); -REGISTER_OPTYPE_DECLARE(NETOUTPUT, "NetOutput"); -REGISTER_OPTYPE_DECLARE(SSDDETECTIONOUTPUT, "SSDDetectionOutput"); -REGISTER_OPTYPE_DECLARE(REFINEDETDETECTIONOUTPUT, "RefinedetDetectionOutput"); -REGISTER_OPTYPE_DECLARE(CHANNELAXPY, "ChannelAxpy"); -REGISTER_OPTYPE_DECLARE(PSROIPOOLING, "PSROIPooling"); -REGISTER_OPTYPE_DECLARE(POWER, "Power"); -REGISTER_OPTYPE_DECLARE(POW, "Pow"); -REGISTER_OPTYPE_DECLARE(ROIALIGN, "ROIAlign"); -REGISTER_OPTYPE_DECLARE(PYTHON, "Python"); -REGISTER_OPTYPE_DECLARE(FREESPACEEXTRACT, "FreespaceExtract"); -REGISTER_OPTYPE_DECLARE(SPATIALTF, "SpatialTransform"); -REGISTER_OPTYPE_DECLARE(SHAPE, "Shape"); -REGISTER_OPTYPE_DECLARE(SHAPEN, "ShapeN"); -REGISTER_OPTYPE_DECLARE(ARGMAX, "ArgMax"); -REGISTER_OPTYPE_DECLARE(GATHERND, "GatherNd"); -REGISTER_OPTYPE_DECLARE(GATHER, "Gather"); -REGISTER_OPTYPE_DECLARE(REALDIV, "RealDiv"); -REGISTER_OPTYPE_DECLARE(PACK, "Pack"); -REGISTER_OPTYPE_DECLARE(SLICE, "Slice"); -REGISTER_OPTYPE_DECLARE(SLICED, "SliceD"); -REGISTER_OPTYPE_DECLARE(FLOORDIV, "FloorDiv"); -REGISTER_OPTYPE_DECLARE(SQUEEZE, "Squeeze"); -REGISTER_OPTYPE_DECLARE(UNSQUEEZE, "Unsqueeze"); -REGISTER_OPTYPE_DECLARE(STRIDEDSLICE, "StridedSlice"); -REGISTER_OPTYPE_DECLARE(RANGE, "Range"); -REGISTER_OPTYPE_DECLARE(RPNPROPOSALS, "GenerateRpnProposals"); -REGISTER_OPTYPE_DECLARE(DECODEBBOX, "DecodeBBox"); -REGISTER_OPTYPE_DECLARE(PAD, "Pad"); -REGISTER_OPTYPE_DECLARE(PADV2, "PadV2"); -REGISTER_OPTYPE_DECLARE(MIRRORPAD, "MirrorPad"); -REGISTER_OPTYPE_DECLARE(TILE, "Tile"); -REGISTER_OPTYPE_DECLARE(SIZE, "Size"); -REGISTER_OPTYPE_DECLARE(CLIPBOXES, "Clipboxes"); -REGISTER_OPTYPE_DECLARE(FASTRCNNPREDICTIONS, "FastrcnnPredictions"); -REGISTER_OPTYPE_DECLARE(SPLIT, "Split"); -REGISTER_OPTYPE_DECLARE(SPLITV, "SplitV"); -REGISTER_OPTYPE_DECLARE(EXPANDDIMS, "ExpandDims"); -REGISTER_OPTYPE_DECLARE(EMPTY, "Empty"); -REGISTER_OPTYPE_DECLARE(MEAN, "Mean"); -REGISTER_OPTYPE_DECLARE(GREATER, "Greater"); -REGISTER_OPTYPE_DECLARE(SWITCH, "Switch"); -REGISTER_OPTYPE_DECLARE(SWITCHN, "SwitchN"); -REGISTER_OPTYPE_DECLARE(REFSWITCH, "RefSwitch"); -REGISTER_OPTYPE_DECLARE(MERGE, "Merge"); -REGISTER_OPTYPE_DECLARE(REFMERGE, "RefMerge"); -REGISTER_OPTYPE_DECLARE(ENTER, "Enter"); -REGISTER_OPTYPE_DECLARE(REFENTER, "RefEnter"); -REGISTER_OPTYPE_DECLARE(LOOPCOND, "LoopCond"); -REGISTER_OPTYPE_DECLARE(NEXTITERATION, "NextIteration"); -REGISTER_OPTYPE_DECLARE(REFNEXTITERATION, "RefNextIteration"); -REGISTER_OPTYPE_DECLARE(EXIT, "Exit"); -REGISTER_OPTYPE_DECLARE(REFEXIT, "RefExit"); -REGISTER_OPTYPE_DECLARE(CONTROLTRIGGER, "ControlTrigger"); -REGISTER_OPTYPE_DECLARE(SYMBOLICGRADIENT, "SymbolicGradient"); -REGISTER_OPTYPE_DECLARE(REMOTECALL, "RemoteCall"); -REGISTER_OPTYPE_DECLARE(_IF, "_If"); -REGISTER_OPTYPE_DECLARE(STATELESSIF, "StatelessIf"); -REGISTER_OPTYPE_DECLARE(IF, "If"); -REGISTER_OPTYPE_DECLARE(CASE, "Case"); -REGISTER_OPTYPE_DECLARE(_WHILE, "_While"); -REGISTER_OPTYPE_DECLARE(WHILE, "While"); -REGISTER_OPTYPE_DECLARE(STATELESSWHILE, "StatelessWhile"); -REGISTER_OPTYPE_DECLARE(FOR, "For"); -REGISTER_OPTYPE_DECLARE(PARTITIONEDCALL, "PartitionedCall"); -REGISTER_OPTYPE_DECLARE(STATEFULPARTITIONEDCALL, "StatefulPartitionedCall"); -REGISTER_OPTYPE_DECLARE(FAKEPARAM, "FakeParam"); -REGISTER_OPTYPE_DECLARE(TRANSPOSE, "Transpose"); -REGISTER_OPTYPE_DECLARE(TRANSPOSED, "TransposeD"); -REGISTER_OPTYPE_DECLARE(CAST, "Cast"); -REGISTER_OPTYPE_DECLARE(REGION, "Region"); -REGISTER_OPTYPE_DECLARE(YOLO, "Yolo"); -REGISTER_OPTYPE_DECLARE(YOLODETECTIONOUTPUT, "YoloDetectionOutput"); -REGISTER_OPTYPE_DECLARE(FILL, "Fill"); -REGISTER_OPTYPE_DECLARE(RANK, "Rank"); -REGISTER_OPTYPE_DECLARE(REVERSE, "Reverse"); -REGISTER_OPTYPE_DECLARE(UNPACK, "Unpack"); -REGISTER_OPTYPE_DECLARE(YOLO2REORG, "Yolo2Reorg"); -REGISTER_OPTYPE_DECLARE(REDUCESUM, "ReduceSum"); -REGISTER_OPTYPE_DECLARE(SUM, "Sum"); -REGISTER_OPTYPE_DECLARE(CONSTANT, "Const"); -REGISTER_OPTYPE_DECLARE(RESIZEBILINEAR, "ResizeBilinear"); -REGISTER_OPTYPE_DECLARE(RESIZEBILINEARGRAD, "ResizeBilinearGrad"); -REGISTER_OPTYPE_DECLARE(MAXIMUM, "Maximum"); -REGISTER_OPTYPE_DECLARE(FRAMEWORKOP, "FrameworkOp"); -REGISTER_OPTYPE_DECLARE(ARG, "_Arg"); -REGISTER_OPTYPE_DECLARE(FUSEDBATCHNORMGRAD, "FusedBatchNormGrad"); -REGISTER_OPTYPE_DECLARE(LSTM, "LSTM"); -REGISTER_OPTYPE_DECLARE(HIGHWAY, "HighWay"); -REGISTER_OPTYPE_DECLARE(RNN, "RNN"); -REGISTER_OPTYPE_DECLARE(ATTENTIONDECODER, "AttentionDecoder"); -REGISTER_OPTYPE_DECLARE(LOGICAL_NOT, "LogicalNot"); -REGISTER_OPTYPE_DECLARE(LOGICAL_AND, "LogicalAnd"); -REGISTER_OPTYPE_DECLARE(LOGICAL_OR, "LogicalOr"); -REGISTER_OPTYPE_DECLARE(EQUAL, "Equal"); -REGISTER_OPTYPE_DECLARE(NOTEQUAL, "NotEqual"); -REGISTER_OPTYPE_DECLARE(INTERP, "Interp"); -REGISTER_OPTYPE_DECLARE(SHUFFLECHANNEL, "ShuffleChannel"); -REGISTER_OPTYPE_DECLARE(AIPP, "Aipp"); -REGISTER_OPTYPE_DECLARE(MULTISHAPE, "MultiShape"); -REGISTER_OPTYPE_DECLARE(RECIPROCAL, "Reciprocal"); -REGISTER_OPTYPE_DECLARE(SELU, "Selu"); -REGISTER_OPTYPE_DECLARE(ELU, "Elu"); -REGISTER_OPTYPE_DECLARE(ACOSH, "Acosh"); -REGISTER_OPTYPE_DECLARE(ASINH, "Asinh"); -REGISTER_OPTYPE_DECLARE(MINIMUM, "Minimum"); -REGISTER_OPTYPE_DECLARE(CLIP, "Clip"); -REGISTER_OPTYPE_DECLARE(L2NORMALIZE, "L2Normalize"); -REGISTER_OPTYPE_DECLARE(CROPANDRESIZE, "CropAndResize"); -REGISTER_OPTYPE_DECLARE(UNUSEDCONST, "UnusedConst"); -REGISTER_OPTYPE_DECLARE(SPARSETODENSE, "SparseToDense"); -REGISTER_OPTYPE_DECLARE(NONMAXSUPPRESSION, "NonMaxSuppression"); -REGISTER_OPTYPE_DECLARE(TOPKV2, "TopKV2"); -REGISTER_OPTYPE_DECLARE(INVERTPERMUTATION, "InvertPermutation"); -REGISTER_OPTYPE_DECLARE(MULTINOMIAL, "Multinomial"); -REGISTER_OPTYPE_DECLARE(REVERSESEQUENCE, "ReverseSequence"); -REGISTER_OPTYPE_DECLARE(REDUCEPROD, "ReduceProd"); -REGISTER_OPTYPE_DECLARE(REDUCEMAX, "ReduceMax"); -REGISTER_OPTYPE_DECLARE(REDUCEMIN, "ReduceMin"); -REGISTER_OPTYPE_DECLARE(EXTRACTIMAGEPATCHES, "ExtractImagePatches"); -REGISTER_OPTYPE_DECLARE(SQRT, "Sqrt"); -REGISTER_OPTYPE_DECLARE(REDUCEALL, "ReduceAll"); -REGISTER_OPTYPE_DECLARE(RESIZENEARESTNEIGHBOR, "ResizeNearestNeighbor"); -REGISTER_OPTYPE_DECLARE(SPACETOBATCHND, "SpaceToBatchND"); -REGISTER_OPTYPE_DECLARE(BATCHTOSPACEND, "BatchToSpaceND"); -REGISTER_OPTYPE_DECLARE(ASSERT, "Assert"); -REGISTER_OPTYPE_DECLARE(GREATEREQUAL, "GreaterEqual"); -REGISTER_OPTYPE_DECLARE(FLOOR, "Floor"); -REGISTER_OPTYPE_DECLARE(RANDOMUNIFORM, "RandomUniform"); -REGISTER_OPTYPE_DECLARE(BATCHMATMUL, "BatchMatMul"); -REGISTER_OPTYPE_DECLARE(LESSEQUAL, "LessEqual"); -REGISTER_OPTYPE_DECLARE(ONEHOT, "OneHot"); -REGISTER_OPTYPE_DECLARE(LAYERNORM, "LayerNorm"); -REGISTER_OPTYPE_DECLARE(SPACETODEPTH, "SpaceToDepth"); -REGISTER_OPTYPE_DECLARE(DEPTHTOSPACE, "DepthToSpace"); -REGISTER_OPTYPE_DECLARE(RINT, "Rint"); -REGISTER_OPTYPE_DECLARE(ATAN, "Atan"); -REGISTER_OPTYPE_DECLARE(ATAN2, "Atan2"); -REGISTER_OPTYPE_DECLARE(ATANH, "Atanh"); -REGISTER_OPTYPE_DECLARE(ACOS, "Acos"); -REGISTER_OPTYPE_DECLARE(ASIN, "Asin"); -REGISTER_OPTYPE_DECLARE(NEG, "Neg"); -REGISTER_OPTYPE_DECLARE(LOG, "Log"); -REGISTER_OPTYPE_DECLARE(TAN, "Tan"); -REGISTER_OPTYPE_DECLARE(ROUND, "Round"); -REGISTER_OPTYPE_DECLARE(UPSAMPLE, "Upsample"); -REGISTER_OPTYPE_DECLARE(FLOORMOD, "FloorMod"); -REGISTER_OPTYPE_DECLARE(LESS, "Less"); -REGISTER_OPTYPE_DECLARE(ZEROSLIKE, "ZerosLike"); -REGISTER_OPTYPE_DECLARE(EXP, "Exp"); -REGISTER_OPTYPE_DECLARE(WHERE, "Where"); -REGISTER_OPTYPE_DECLARE(FAKEQUANTWITHMINMAXVARS, "FakeQuantWithMinMaxVars"); -REGISTER_OPTYPE_DECLARE(SOFTPLUS, "Softplus"); -REGISTER_OPTYPE_DECLARE(SOFTSIGN, "Softsign"); -REGISTER_OPTYPE_DECLARE(COSH, "Cosh"); -REGISTER_OPTYPE_DECLARE(SINH, "Sinh"); -REGISTER_OPTYPE_DECLARE(RETINAMULTIANCHORS, "RetinaMultiAnchor"); -REGISTER_OPTYPE_DECLARE(SQUAREDDIFFERENCE, "SquaredDifference"); -REGISTER_OPTYPE_DECLARE(REQUIREDSPACETOBATCHPADDINGS, "RequiredSpaceToBatchPaddings"); // for retinanet scope fusion -REGISTER_OPTYPE_DECLARE(SSDPOSTPROCESSOR, "SSDPostProcessor"); -REGISTER_OPTYPE_DECLARE(SSDANCHORGENERATOR, "SSDAnchorGenerator"); -REGISTER_OPTYPE_DECLARE(RETINANETBOXES, "RetinanetBoxes"); -REGISTER_OPTYPE_DECLARE(RETINANETCLIPPEDBOXES, "RetinanetClippedBoxes"); -REGISTER_OPTYPE_DECLARE(RETINANETFILTEREDDETECTIONS, "RetinanetFilteredDetections"); -REGISTER_OPTYPE_DECLARE(RETINANETPOSTPROCESSOR, "RetinanetPostProcessor"); -REGISTER_OPTYPE_DECLARE(RETINANETANCHORS, "RetinanetAnchors"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNMAP, "FasterRCNNMap"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNMAP1, "FasterRCNNMap1"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNSECONDSTAGEPOSTPROCESSOR, "FasterRCNNSecondStagePostprocessor"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNROIINTERPOOLING, "FasterRCNNROIInterPooling"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNFIRSTSTAGEPOSTPROCESSOR, "FasterRCNNFirstStagePostprocessor"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNGRIDANCHORGENERATOR, "FasterRCNNGridAnchorGenerator"); -REGISTER_OPTYPE_DECLARE(ROIINTERPOOLING, "ROIInterPooling"); -REGISTER_OPTYPE_DECLARE(FASTERRCNNCLIPTOWINDOW, "FasterRCNNClipToWindow"); -REGISTER_OPTYPE_DECLARE(EMBEDLOOKUP, "EmbedLookup"); -REGISTER_OPTYPE_DECLARE(HASHLOOKUP, "HashLookup"); -REGISTER_OPTYPE_DECLARE(LSH_PROJ, "LshProject"); -REGISTER_OPTYPE_DECLARE(SVDF, "SVDF"); -REGISTER_OPTYPE_DECLARE(IDENTITY, "Identity"); -REGISTER_OPTYPE_DECLARE(PLACEHOLDERWITHDEFAULT, "PlaceholderWithDefault"); -REGISTER_OPTYPE_DECLARE(IDENTITYN, "IdentityN"); -REGISTER_OPTYPE_DECLARE(GETSPAN, "GetSpan"); -REGISTER_OPTYPE_DECLARE(STOPGRADIENT, "StopGradient"); -REGISTER_OPTYPE_DECLARE(PREVENTGRADIENT, "PreventGradient"); -REGISTER_OPTYPE_DECLARE(GUARANTEECONST, "GuaranteeConst"); -REGISTER_OPTYPE_DECLARE(BROADCASTGRADIENTARGS, "BroadcastGradientArgs"); -REGISTER_OPTYPE_DECLARE(BROADCASTARGS, "BroadcastArgs"); -REGISTER_OPTYPE_DECLARE(CONCATV2, "ConcatV2"); -REGISTER_OPTYPE_DECLARE(CONCATOFFSET, "ConcatOffset"); -REGISTER_OPTYPE_DECLARE(LESSEQUAL, "LessEqual"); -REGISTER_OPTYPE_DECLARE(SELECT, "Select"); -REGISTER_OPTYPE_DECLARE(CONFUSIONMATRIX, "ConfusionMatrix"); -REGISTER_OPTYPE_DECLARE(PLACEHOLDER, "PlaceHolder"); -REGISTER_OPTYPE_DECLARE(END, "End"); -REGISTER_OPTYPE_DECLARE(BASICLSTMCELL, "BasicLSTMCell"); -REGISTER_OPTYPE_DECLARE(GETNEXT, "GetNext"); -REGISTER_OPTYPE_DECLARE(INITDATA, "InitData"); -REGISTER_OPTYPE_DECLARE(TRANSSHAPE, "TransShape") -REGISTER_OPTYPE_DECLARE(REFIDENTITY, "RefIdentity"); -REGISTER_OPTYPE_DECLARE(BITCAST, "Bitcast"); - -// ANN dedicated operator -REGISTER_OPTYPE_DECLARE(ANN_MEAN, "AnnMean"); -REGISTER_OPTYPE_DECLARE(ANN_CONVOLUTION, "AnnConvolution"); -REGISTER_OPTYPE_DECLARE(ANN_DEPCONVOLUTION, "AnnDepthConv"); -REGISTER_OPTYPE_DECLARE(ANN_FULLCONNECTION, "AnnFullConnection"); -REGISTER_OPTYPE_DECLARE(ANN_NETOUTPUT, "AnnNetOutput"); -REGISTER_OPTYPE_DECLARE(ANN_DATA, "AnnData"); -REGISTER_OPTYPE_DECLARE(ANN_RESHAPE, "AnnReshape"); -REGISTER_OPTYPE_DECLARE(ANN_ADD, "AnnAdd"); -REGISTER_OPTYPE_DECLARE(ANN_MUL, "AnnMul"); -REGISTER_OPTYPE_DECLARE(ANN_SUB, "AnnSub"); -REGISTER_OPTYPE_DECLARE(ANN_DIV, "AnnDiv"); -REGISTER_OPTYPE_DECLARE(ANN_DEQUANTIZE, "AnnDequant"); -REGISTER_OPTYPE_DECLARE(ANN_QUANTIZE, "AnnQuant"); -REGISTER_OPTYPE_DECLARE(ANN_PAD, "AnnPad"); -REGISTER_OPTYPE_DECLARE(ANN_RESIZE_BILINEAR, "AnnResizeBilinear"); - -// Training operator -REGISTER_OPTYPE_DECLARE(GATHERV2, "GatherV2"); -REGISTER_OPTYPE_DECLARE(CONVGRADFILTER, "Conv2DBackpropFilter"); -REGISTER_OPTYPE_DECLARE(CONV2D, "Conv2D"); -REGISTER_OPTYPE_DECLARE(CONV2DBACKPROPINPUT, "Conv2DBackpropInput"); -REGISTER_OPTYPE_DECLARE(FUSEDBATCHNORM, "FusedBatchNorm"); -REGISTER_OPTYPE_DECLARE(BIASADDGRAD, "BiasAddGrad"); -REGISTER_OPTYPE_DECLARE(ACTIVATIONGRAD, "ReluGrad"); -REGISTER_OPTYPE_DECLARE(MAXPOOLWITHARGMAX, "MaxPoolWithArgmax"); -REGISTER_OPTYPE_DECLARE(MAXPOOLGRADWITHARGMAX, "MaxPoolGradWithArgmax"); -REGISTER_OPTYPE_DECLARE(SPARSESOFTMAXCROSSENTROPYWITHLOGITS, "SparseSoftmaxCrossEntropyWithLogits"); -REGISTER_OPTYPE_DECLARE(SNAPSHOT, "Snapshot"); -REGISTER_OPTYPE_DECLARE(LAYERNORM, "LayerNorm"); -REGISTER_OPTYPE_DECLARE(HUBERLOSSGRAD, "HuberLossGrad"); -REGISTER_OPTYPE_DECLARE(HUBERLOSS, "HuberLoss"); -REGISTER_OPTYPE_DECLARE(NEGATIVE, "Negative"); -REGISTER_OPTYPE_DECLARE(SSDCAST, "SSDCast"); -REGISTER_OPTYPE_DECLARE(SSDSQUEEZEFUSION, "SsdSqueezeFusion"); -REGISTER_OPTYPE_DECLARE(SPARSESOFTMAXCROSSENTROPY, "SsdSparseSoftmaxCrossEntropy"); -REGISTER_OPTYPE_DECLARE(SPARSESOFTMAXCROSSENTROPYGRAD, "SsdSparseSoftmaxCrossEntropyGrad"); -REGISTER_OPTYPE_DECLARE(CONCATFIVE2FOUR, "ConcatFive2Four"); -REGISTER_OPTYPE_DECLARE(CONCATFOUR2FIVE, "ConcatFour2Five"); -REGISTER_OPTYPE_DECLARE(SSDREALDIVTILEMUL, "SSDRealdivTileMul"); -REGISTER_OPTYPE_DECLARE(SSDSUMMULREALDIVMEAN, "SSDSumMulRealdivMean"); - -REGISTER_OPTYPE_DECLARE(MEANGRAD, "MeanGrad"); -REGISTER_OPTYPE_DECLARE(TRANSLATE, "Translate"); -REGISTER_OPTYPE_DECLARE(ADDN, "AddN"); -REGISTER_OPTYPE_DECLARE(L2LOSS, "L2Loss"); -REGISTER_OPTYPE_DECLARE(MULTIPLY, "Multiply"); -REGISTER_OPTYPE_DECLARE(RELU6GRAD, "Relu6Grad"); -REGISTER_OPTYPE_DECLARE(AVGPOOLGRAD, "AvgPoolGrad"); -REGISTER_OPTYPE_DECLARE(DEPTHWISECONV2DBACKPROPFILTER, "DepthwiseConv2dNativeBackpropFilter"); -REGISTER_OPTYPE_DECLARE(DEPTHWISECONV2DBACKPORPINPUT, "DepthwiseConv2dNativeBackpropInput"); -REGISTER_OPTYPE_DECLARE(DEPTHWISECONV2DFORWARDNATIVE, "DepthwiseConv2dNative"); -REGISTER_OPTYPE_DECLARE(DROPOUTGRAD, "DropOutGrad"); -REGISTER_OPTYPE_DECLARE(APPLYRMSPROPMIXEDPRECISION, "apply_rms_prop_mixed_precision"); -REGISTER_OPTYPE_DECLARE(APPLYRMSPROP, "ApplyRMSProp"); -REGISTER_OPTYPE_DECLARE(LARS, "Lars"); -REGISTER_OPTYPE_DECLARE(DYNAMICSTITCH, "DynamicStitch"); - -// Variable sink related -REGISTER_OPTYPE_DECLARE(VARIABLEV2, "VariableV2"); -REGISTER_OPTYPE_DECLARE(VARHANDLEOP, "VarHandleOp"); -REGISTER_OPTYPE_DECLARE(TEMPORARYVARIABLE, "TemporaryVariable"); -REGISTER_OPTYPE_DECLARE(DESTROYTEMPORARYVARIABLE, "DestroyTemporaryVariable"); -REGISTER_OPTYPE_DECLARE(VARIABLE, "Variable"); - -REGISTER_OPTYPE_DECLARE(READVARIABLEOP, "ReadVariableOp"); - -REGISTER_OPTYPE_DECLARE(VARISINITIALIZEDOP, "VarIsInitializedOp"); -REGISTER_OPTYPE_DECLARE(ISVARIABLEINITIALIZED, "IsVariableInitialized"); - -REGISTER_OPTYPE_DECLARE(ASSIGN, "Assign"); -REGISTER_OPTYPE_DECLARE(ASSIGNVARIABLEOP, "AssignVariableOp"); - -REGISTER_OPTYPE_DECLARE(ASSIGNADD, "AssignAdd"); -REGISTER_OPTYPE_DECLARE(ASSIGNADDVARIABLEOP, "AssignAddVariableOp"); - -REGISTER_OPTYPE_DECLARE(ASSIGNSUB, "AssignSub"); -REGISTER_OPTYPE_DECLARE(ASSIGNSUBVARIABLEOP, "AssignSubVariableOp"); - -REGISTER_OPTYPE_DECLARE(APPLYMOMENTUM, "ApplyMomentum"); -REGISTER_OPTYPE_DECLARE(RESOURCEAPPLYMOMENTUM, "ResourceApplyMomentum"); -REGISTER_OPTYPE_DECLARE(SGD, "SGD"); -REGISTER_OPTYPE_DECLARE(NOOP, "NoOp"); -REGISTER_OPTYPE_DECLARE(LAYERNORMGRAD, "LayerNormGrad"); - -REGISTER_OPTYPE_DECLARE(SQUARE, "Square"); -REGISTER_OPTYPE_DECLARE(HCOMBROADCAST, "HcomBroadcast"); -REGISTER_OPTYPE_DECLARE(HCOMALLGATHER, "HcomAllGather"); -REGISTER_OPTYPE_DECLARE(HCOMALLREDUCE, "HcomAllReduce"); -REGISTER_OPTYPE_DECLARE(HCOMREDUCESCATTER, "HcomReduceScatter"); -REGISTER_OPTYPE_DECLARE(HCOMREDUCE, "HcomReduce"); -REGISTER_OPTYPE_DECLARE(HCOMSEND, "HcomSend"); -REGISTER_OPTYPE_DECLARE(HCOMRECEIVE, "HcomReceive"); -REGISTER_OPTYPE_DECLARE(HCOMREMOTEREAD, "HcomRemoteRead"); -REGISTER_OPTYPE_DECLARE(HCOMREMOTEREFREAD, "HcomRemoteRefRead"); -REGISTER_OPTYPE_DECLARE(HCOMREMOTEWRITE, "HcomRemoteWrite"); -REGISTER_OPTYPE_DECLARE(HCOMREMOTESCATTERWRITE, "HcomRemoteScatterWrite"); - -REGISTER_OPTYPE_DECLARE(VARASSIGN, "VarAssign"); -REGISTER_OPTYPE_DECLARE(VARISINITIALIZEDOP, "VarIsInitializedOp"); -REGISTER_OPTYPE_DECLARE(LogTimeStamp, "LogTimeStamp"); -REGISTER_OPTYPE_DECLARE(PARALLELCONCATSTART, "_ParallelConcatStart"); -REGISTER_OPTYPE_DECLARE(CONSTANTOP, "Constant"); -REGISTER_OPTYPE_DECLARE(STREAMSWITCH, "StreamSwitch"); -REGISTER_OPTYPE_DECLARE(STREAMSWITCHN, "StreamSwitchN"); -REGISTER_OPTYPE_DECLARE(STREAMACTIVE, "StreamActive"); -REGISTER_OPTYPE_DECLARE(MEMCPYASYNC, "MemcpyAsync"); -REGISTER_OPTYPE_DECLARE(MEMCPYADDRASYNC, "MemcpyAddrAsync"); -REGISTER_OPTYPE_DECLARE(STREAMMERGE, "StreamMerge"); -REGISTER_OPTYPE_DECLARE(ENDGRAPH, "EndGraph"); -REGISTER_OPTYPE_DECLARE(MODELEXIT, "ModelExit"); -REGISTER_OPTYPE_DECLARE(SEND, "Send"); -REGISTER_OPTYPE_DECLARE(RECV, "Recv"); -REGISTER_OPTYPE_DECLARE(ENDOFSEQUENCE, "EndOfSequence"); - -REGISTER_OPTYPE_DECLARE(LABELSET, "LabelSet"); -REGISTER_OPTYPE_DECLARE(LABELGOTO, "LabelGoto"); -REGISTER_OPTYPE_DECLARE(LABELGOTOEX, "LabelGotoEx"); -REGISTER_OPTYPE_DECLARE(LABELSWITCH, "LabelSwitch"); -REGISTER_OPTYPE_DECLARE(LABELSWITCHBYINDEX, "LabelSwitchByIndex"); - -REGISTER_OPTYPE_DECLARE(ATOMICADDRCLEAN, "AtomicAddrClean"); - -REGISTER_OPTYPE_DECLARE(ABS_GRAD, "AbsGrad"); -REGISTER_OPTYPE_DECLARE(ACCUMULATE_N_V2, "AccumulateNV2"); -REGISTER_OPTYPE_DECLARE(ACOS_GRAD, "AcosGrad"); -REGISTER_OPTYPE_DECLARE(ACOSH_GRAD, "AcoshGrad"); -REGISTER_OPTYPE_DECLARE(ANY, "Any"); -REGISTER_OPTYPE_DECLARE(APPROXIMATE_EQUAL, "ApproximateEqual"); -REGISTER_OPTYPE_DECLARE(ASIN_GRAD, "AsinGrad"); -REGISTER_OPTYPE_DECLARE(ASINH_GRAD, "AsinhGrad"); -REGISTER_OPTYPE_DECLARE(ATAN_GRAD, "AtanGrad"); -REGISTER_OPTYPE_DECLARE(BROADCAST_TO, "BroadcastTo"); -REGISTER_OPTYPE_DECLARE(ELU_GRAD, "EluGrad"); -REGISTER_OPTYPE_DECLARE(ADD_V2, "AddV2"); -REGISTER_OPTYPE_DECLARE(DATAFORMATDIMMAP, "DataFormatDimMap"); -REGISTER_OPTYPE_DECLARE(DATAFORMATVECPERMUTE, "DataFormatVecPermute"); -REGISTER_OPTYPE_DECLARE(BESSELI0e, "BesselI0e"); -REGISTER_OPTYPE_DECLARE(BESSELI1e, "BesselI1e"); -REGISTER_OPTYPE_DECLARE(DEQUANTIZE, "Dequantize"); -REGISTER_OPTYPE_DECLARE(APPLYADADELTA, "ApplyAdadelta"); -REGISTER_OPTYPE_DECLARE(APPLYADAGRAD, "ApplyAdagrad"); -REGISTER_OPTYPE_DECLARE(APPLYADAGRADDA, "ApplyAdagradDA"); -REGISTER_OPTYPE_DECLARE(APPLYADAM, "ApplyAdam"); -REGISTER_OPTYPE_DECLARE(APPLYADAMAX, "ApplyAdaMax"); -REGISTER_OPTYPE_DECLARE(APPLYADDSIGN, "ApplyAddSign"); -REGISTER_OPTYPE_DECLARE(APPLYCENTEREDRMSPROP, "ApplyCenteredRMSProp"); -REGISTER_OPTYPE_DECLARE(APPLYFTRL, "ApplyFtrl"); -REGISTER_OPTYPE_DECLARE(APPLYFTRLV2, "ApplyFtrlv2"); -REGISTER_OPTYPE_DECLARE(APPLYGRADIENTDESCENT, "ApplyGradientDescent"); -REGISTER_OPTYPE_DECLARE(APPLYPOWERSIGN, "ApplyPowerSign"); -REGISTER_OPTYPE_DECLARE(APPLYPROXIMALADAGRAD, "ApplyProximalAdagrad"); -REGISTER_OPTYPE_DECLARE(APPLYPROXIMALGRADIENTDESCENT, "ApplyProximalGradientDescent"); - -REGISTER_OPTYPE_DECLARE(FOCAL_LOSS, "FocalLoss"); -REGISTER_OPTYPE_DECLARE(FOCAL_LOSS_GRAD, "FocalLossGrad"); -REGISTER_OPTYPE_DECLARE(SMOOTHL1_LOSS, "SmoothL1Loss"); -REGISTER_OPTYPE_DECLARE(SMOOTHL1_LOSS_grad, "SmoothL1LossGrad"); -REGISTER_OPTYPE_DECLARE(REDUCEMEAN, "ReduceMean"); -REGISTER_OPTYPE_DECLARE(CONCAT_V2, "ConcatV2"); -REGISTER_OPTYPE_DECLARE(ONEHOT_V2, "OneHotV2"); -REGISTER_OPTYPE_DECLARE(SLICE_V2, "SliceV2"); -REGISTER_OPTYPE_DECLARE(TILE_V2, "TileV2"); -REGISTER_OPTYPE_DECLARE(SUM_V2, "SumV2"); -// Common operator type when operators have the same name -REGISTER_OPTYPE_DECLARE(DETECTIONOUTPUT, "DetectionOutput"); - -// custom operator -REGISTER_OPTYPE_DECLARE(CUSTOMOP, "CustomOp"); -REGISTER_OPTYPE_DECLARE(CUSTOMOP_NCHW, "CustomOpNchw"); -REGISTER_OPTYPE_DECLARE(CUSTOMOP_NHWC, "CustomOpNhwc"); -REGISTER_OPTYPE_DECLARE(CUSTOMOP_NC1HWC0, "CustomOpNc1hwc0"); - -// Depthwise 4d_2_6d,6d_2_4d -REGISTER_OPTYPE_DECLARE(DEPTHWISEWEIGHT4D26D, "depthwise_weight_4d_2_6d"); -REGISTER_OPTYPE_DECLARE(DEPTHWISEWEIGHT6D24D, "depthwise_weight_6d_2_4d"); - -REGISTER_OPTYPE_DECLARE(SQRTGRAD, "SqrtGrad"); -REGISTER_OPTYPE_DECLARE(SIGMOIDGRAD, "SigmoidGrad"); - -// Horovod operator -REGISTER_OPTYPE_DECLARE(HVDCALLBACKALLREDUCE, "HorovodAllreduce"); -REGISTER_OPTYPE_DECLARE(HVDCALLBACKALLGATHER, "HorovodAllgather"); -REGISTER_OPTYPE_DECLARE(HVDCALLBACKBROADCAST, "HorovodBroadcast"); -REGISTER_OPTYPE_DECLARE(HVDWAIT, "HorovodWait"); - -// aicpu op for online_infer dynamic_dims -REGISTER_OPTYPE_DECLARE(GETDYNAMICDIMS, "GetDynamicDims"); - -// profiling training trace node -REGISTER_OPTYPE_DECLARE(PROFILINGTRAININGTRACE, "ProfilingTrainingTrace"); - -enum InputMode { INPUT = 0, CONST_INPUT }; - -// Definition of the processing status enum of the process module -enum ModelProcessState { - INIT_STATE = 0, // init status - WAIT_EVENT_STATE, // Wait for the event status - IND_RSLT_STATE, // The model execution result is being output to the high level - STOPPED_STATE, // Model execution completed. The model enters this state after Model Manager::Stop - RESERVED_STATE, // reserved -}; - -// Indicates the enun definition of the execution mode of the access module -enum SysMode { - INFERENCE = 0, // Normal, that is, Inference mode - DEBUG, // Debug mode - TIME, // Model execution time mode, including the execution time of each OP - STOP, // STOP mode - RESET, // RESET mode - PERFORMANCE, // Impact of enabling the performance model: 1. The input data of the model is considered ready and does - // not need to be converted - ANDROID_DEBUG, // Exports Android platform computing data - RESERVED, // reserved -}; - -// @brief encryption type of the model file -enum ModelEncryptType { - UNENCRYPTED, // not encrypted - ENCRYPTED // encrypted -}; - -/// -/// @brief signature verification -/// -enum ModelCheckType { - CHECK, // signature verification - UNCHECK // no verification -}; - -/// -/// @brief dynamic input type -/// -enum DynamicInputType { - FIXED = 0, // default mode - DYNAMIC_BATCH = 1, - DYNAMIC_IMAGE = 2, - DYNAMIC_DIMS = 3 -}; - -/// -/// @brief magic number of the model file -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_FILE_MAGIC_NUM; - -/// -/// @brief model header length -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_FILE_HEAD_LEN; - -/// -/// @brief model name length -/// -static constexpr uint32_t MODEL_NAME_LENGTH = 32; - -/// -/// @brief length of user-defined information -/// -static constexpr uint32_t USER_DEFINE_INFO_LENGTH = 32; - -/// -/// @brief length of the model file signature -/// -static constexpr uint32_t MODEL_FILE_CHECKSUM_LENGTH = 64; - -/// -/// @brief length of the reserved field in the model file header -/// -static constexpr uint32_t MODEL_FILE_RESERVED_LENGTH = 75; - -/// -/// @ingroup domi_omg -/// @brief INPUT node type -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string INPUT_TYPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMMY_DATA; - -/// -/// @ingroup domi_omg -/// @brief AIPP flag, indicating the aipp conv operator -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string AIPP_CONV_FLAG; - -/// -/// @ingroup domi_omg -/// @brief AIPP flag, indicating the aipp data operator -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string AIPP_DATA_FLAG; - -// flag of the Data operator, indicating that the input will be input to the dynamic AIPP operator -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string INPUT_TO_DYNAMIC_AIPP; - -// records the W dimension of the model input corresponding to the dynamic AIPP -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string AIPP_RELATED_DATA_DIM_W; - -// H dimension of the model input corresponding to the dynamic AIPP -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string AIPP_RELATED_DATA_DIM_H; - -// DATA node type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DATA_TYPE; - -// DATA Operator Type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string AIPP_DATA_TYPE; - -// framework Operator Type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string FRAMEWORK_OP_TYPE; - -// DATA node type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ANN_DATA_TYPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ANN_NETOUTPUT_TYPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ANN_DEPTHCONV_TYPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ANN_CONV_TYPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ANN_FC_TYPE; -// convolution node type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_NET_OUTPUT; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_END_GRAPH; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_OP_DEBUG; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OP_TYPE_OP_DEBUG; - -// convolution node type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OP_TYPE_CONVOLUTION; -// adds a convolutional node name for the hard AIPP -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string AIPP_CONV_OP_NAME; -// delimiter of operator configuration items -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string OP_CONF_DELIMITER; - -// op attr name -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_VALUE1; - -// op attr name, used to 6d_2_4d C channel -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_INPUT_CVALUE; - -// op attr name -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_VALUE1; - -// alpha default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float ALPHA_DEFAULT_VALUE; - -// beta default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float BETA_DEFAULT_VALUE; - -// coef default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float COEF_DEFAULT_VALUE; - -// coef value of Relu6 -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float RELU6_COEF; - -// stride default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t STRIDE_DEFAULT_VALUE; - -// pad default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t PAD_DEFAULT_VALUE; - -// dilation default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int DILATION_DEFAULT_VALUE; - -// kernel default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t KERNEL_DEFAULT_VALUE; - -// default conv Group Size -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t DEFAULT_CONV_GROUP; - -// default deconv adj -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t DEFAULT_DECONV_ADJ; - -// indicate num 1 -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NUM_ONE; - -// dim default size value -static const int32_t DIM_DEFAULT_SIZE = 4; - -// the shape of c must be the mutiply of 16 for depthwise -static const uint32_t DEPTHWISE_DIM_C_BASE_NUM = 16; - -// C1HWNCoC0 dim size -static const int32_t DIM_C1HWNCoC0_SIZE = 6; -// C1HWNCoC0 C0 value -static const int C1HWCOC_C0_VALUE = 16; -// spatial default dim size -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int32_t SPATIAL_DIM_DEFAULT_SIZE; - -// dim extension default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int32_t DIM_DEFAULT_VALUE; - -// the first item in the weight list of opdef is filter -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int32_t WEIGHT_FILTER_INDEX; - -// the second item in the weight list of opdef is bias. -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int32_t WEIGHT_BIAS_INDEX; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int32_t TENSOR_ND_SUPPORT_SIZE; - -// default NCHW index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NCHW_DIM_N; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NCHW_DIM_C; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NCHW_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NCHW_DIM_W; - -// default C1HWNCoC0 index -static const uint32_t C1HWNCoC0_DIM_C1 = 0; -static const uint32_t C1HWNCoC0_DIM_H = 1; -static const uint32_t C1HWNCoC0_DIM_W = 2; -static const uint32_t C1HWNCoC0_DIM_N = 3; -static const uint32_t C1HWNCoC0_DIM_Co = 4; -static const uint32_t C1HWNCoC0_DIM_C0 = 5; - -// default KCHW index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t KCHW_DIM_K; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t KCHW_DIM_C; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t KCHW_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t KCHW_DIM_W; - -// default HWCK index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWCK_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWCK_DIM_W; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWCK_DIM_C; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWCK_DIM_K; - -// default NHWC index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NHWC_DIM_N; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NHWC_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NHWC_DIM_W; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t NHWC_DIM_C; - -// default CHWN index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHWN_DIM_N; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHWN_DIM_C; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHWN_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHWN_DIM_W; - -// default CHW index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHW_DIM_C; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHW_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t CHW_DIM_W; - -// default HWC index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWC_DIM_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWC_DIM_W; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t HWC_DIM_C; -// default Pad index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t PAD_H_HEAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t PAD_H_TAIL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t PAD_W_HEAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t PAD_W_TAIL; - -// default window index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t WINDOW_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t WINDOW_W; - -// default stride index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t STRIDE_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t STRIDE_W; - -// default dilation index -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t DILATION_H; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t DILATION_W; - -// the num of XRBG channel -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t XRGB_CHN_NUM; - -// default tensor format -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int DEFAULT_FORMAT; - -// default global pooling -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const bool DEFAULT_GLOBAL_POOLING; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_VERSION; // model version 1.0 - -// Number of inputs of the Eltwise operator -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const int ELTWISE_MIN_INPUT_SIZE; - -// flowctrl -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_STREAM_SWITCH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_STREAM_ACTIVE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_FLOWCTRL_LOOP_PER_ITER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_FLOWCTRL_LOOP_COND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_FLOWCTRL_LOOP_INCREMENT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_FLOWCTRL_LOOP_RESETVALUE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_FLOWCTRL_LOOP_ASSIGNADD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_FLOWCTRL_LOOP_ASSIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_ATOMIC_ADDR_CLEAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t TRUE_STREAM_ID; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t STREAM_SWITCH_INPUT_NUM; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_GLOBAL_STEP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_GLOBAL_STEP_ASSIGNADD; - -static const int PLATFORM_VERSION_LEN = 20; - -// Definition of the file header of the model file -struct ModelFileHeader { - uint32_t magic = MODEL_FILE_MAGIC_NUM; // magic number of DOMI - uint32_t headsize = MODEL_FILE_HEAD_LEN; // length of the model header. The value is fixed at 256 - uint32_t version = MODEL_VERSION; // version 1.0 - uint8_t checksum[MODEL_FILE_CHECKSUM_LENGTH] = {0}; // signature - uint32_t length = 0; // Ciphertext length. In the non-encryption model, the length is the plaintext length. - uint8_t is_encrypt = ModelEncryptType::UNENCRYPTED; // whether encrypted 0:not encrypt, 1:encrypt - uint8_t is_checksum = ModelCheckType::CHECK; // whether to check the checksum - uint8_t modeltype = 0; // 0:IR model 1:standard model 2: OM Tiny model - uint8_t genmode = 0; // 0:offline generate 1:online generate - uint8_t name[MODEL_NAME_LENGTH] = {0}; // Model name, which contains 32 characters - uint32_t ops = 0; // Computing power (Kops) - uint8_t userdefineinfo[USER_DEFINE_INFO_LENGTH] = {0}; // User-defined information. The value contains 32 characters - uint32_t om_ir_version = 0; - uint32_t model_num = 0; - uint8_t platform_version[PLATFORM_VERSION_LEN] = {0}; - uint8_t platform_type = {0}; - uint8_t reserved[MODEL_FILE_RESERVED_LENGTH] = {0}; // Reserved field 75 -}; - -static constexpr uint8_t TARGET_TYPE_LTTE_8BIT = 0; -static constexpr uint8_t TARGET_TYPE_MINI_8BIT = 1; -static constexpr uint8_t TARGET_TYPE_TINY_8BIT = 2; - -static constexpr int32_t PARTITION_TYPE_MODEL_DEF = 0; -static constexpr int32_t PARTITION_TYPE_WEIGHTS = 1; -static constexpr int32_t PARTITION_TYPE_TASK_INFO = 2; - -// number of partitions in the current model -static constexpr uint32_t PARTITION_SIZE = 5; - -enum ModelPartitionType { MODEL_DEF = 0, WEIGHTS_DATA, TASK_INFO, TBE_KERNELS, CUST_AICPU_KERNELS }; - -struct ModelPartitionMemInfo { - ModelPartitionType type; - uint32_t mem_offset; - uint32_t mem_size; -}; - -struct ModelPartitionTable { - uint32_t num; - ModelPartitionMemInfo partition[0]; -}; - -#define SIZE_OF_MODEL_PARTITION_TABLE(table) (sizeof(ModelPartitionTable) + sizeof(ModelPartitionMemInfo) * (table).num) - -static constexpr int32_t PTHREAD_CREAT_SUCCESS = 0; // pthread_creat success - -// Filter format -typedef enum tagDomiFilterFormat { - DOMI_FILTER_KCHW, // KCHW - DOMI_FILTER_HWCK, // HWCK - DOMI_FILTER_RESERVED -} domiFilterFormat_t; - -// Const data trans type -typedef enum tagDomiConstDataTransType { - DOMI_CONST_DATA_NOT_CHANGE = 0, // No action is required - DOMI_CONST_DATA_TRANS_MATMUL, // The const input to MatMul and needs to be transposed - DOMI_CONST_DATA_RESERVED -} domiConstDataTransType_t; - -// mode of activation -typedef enum tagDomiActivationMode { - DOMI_ACTIVATION_SIGMOID = 0, // sigmoid - DOMI_ACTIVATION_RELU, // ReLU - DOMI_ACTIVATION_TANH, // tanh - DOMI_ACTIVATION_CLIPPED_RELU, // clipped ReLU - DOMI_ACTIVATION_ELU, // ELU - DOMI_ACTIVATION_LEAKY_RELU, - DOMI_ACTIVATION_ABS, // Abs - DOMI_ACTIVATION_RELU1, // relu1 - DOMI_ACTIVATION_SOFTSIGN, // softsign - DOMI_ACTIVATION_SOFTPLUS, // softplus - DOMI_ACTIVATION_HARDSIGMOID, // hardsigmoid - DOMI_ACTIVATION_THRESHOLD_RELU, // threshold - DOMI_ACTIVATION_SELU, // selu - DOMI_ACTIVATION_LINEAR, // linear - DOMI_ACTIVATION_RESERVED -} domiActivationMode_t; - -// mode of batchnorm -typedef enum tagDomiBatchNormMode { - DOMI_BATCHNORM_PER_ACTIVATION = 0, // bnScale, bnBias tensor dims are 1xCxHxW - DOMI_BATCHNORM_SPATIAL, // bnScale, bnBias tensor dims are 1xCx1x1 - DOMI_BATCHNORM_RESERVED -} domiBatchNormMode_t; - -// eltwise mode -typedef enum tagDomiEltwiseMode { - DOMI_ELTWISE_PROD = 0, // prod - DOMI_ELTWISE_SUM, // sum - DOMI_ELTWISE_MAX, // max - DOMI_ELTWISE_RESERVED -} domiEltwiseMode_t; - -// mode of padding -typedef enum tagDomiPaddingMode { - DOMI_PADDING_CEIL = 0, // Default padding mode - DOMI_PADDING_DIRECTASSIGN, // Default padding mode: NOTSET - DOMI_PADDING_VALID, // VALID padding mode - DOMI_PADDING_SAME, // Padding values of 0 are always used - DOMI_PADDING_CEIL_NEW, // Padding values of 0 are always used - DOMI_PADDING_VALID_NEW, // Padding values of 0 are always used - DOMI_PADDING_SAME_NEW, // Padding values of 0 are always used - DOMI_PADDING_RESERVED -} domiPaddingMode_t; - -// algorithm of convolution forward -typedef enum tagDomiConvolutionFwdAlgo { - DOMI_CONVOLUTION_FWD_ALGO_GEMM = 0, // matrix gemm algo - DOMI_CONVOLUTION_FWD_ALGO_WINOGRAD, // Winograd Transform algo - DOMI_CONVOLUTION_FWD_ALGO_GEMM_ACCU_FLOAT32, // accumulate in L0c with FP32 - DOMI_CONVOLUTION_FWD_ALGO_RESERVED -} domiConvolutionFwdAlgo_t; - -typedef enum tagDomiFullConnectFwdAlgo { - DOMI_FULLCONNECT_FWD_ALGO_HALF = 0, // accumulate in L0c with FP16 - DOMI_FULLCONNECT_FWD_ALGO_FLOAT32 // accumulate in L0c with FP32 -} domiFullConnectFwdAlgo_t; - -typedef enum tagDomiPooingFwdAlgo { - DOMI_POOLING_FWD_ALGO_HALF = 0, // accumulate in L0c with FP16 - DOMI_POOLING_FWD_ALGO_FLOAT32 // accumulate in L0c with FP32 -} domiPooingFwdAlgo_t; - -// mode of convolution -typedef enum tagDomiConvolutionMode { - DOMI_CONV_CONVOLUTION = 0, // math convolution - DOMI_CONV_CROSS_CORRELATION, // cross-correlation convolution - DOMI_CONV_DECONVOLUTION, // deconvolution, also named transposed convolution - DOMI_CONV_MODE_DEPTHWISE, // depthwise convolution - DOMI_CONV_MODE_RESERVED -} domiConvolutionMode_t; - -// softmax mode -typedef enum tagDomiSoftmaxMode { - DOMI_SOFTMAX_MODE_INSTANCE = 0, // compute the softmax over all C, H, W for each N - DOMI_SOFTMAX_MODE_CHANNEL, // compute the softmax over all C for each H, W, N - DOMI_SOFTMAX_MODE_HEIGHT, // compute the softmax over all H for each N, C, W - DOMI_SOFTMAX_MODE_WIDTH, // compute the softmax over all W for each N, C, H - DOMI_SOFTMAX_MODE_RESERVED -} domiSoftmaxMode_t; - -// softmax algorithm -typedef enum tagDomiSoftmaxAlgo { - DOMI_SOFTMAX_FAST = 0, // straightforward implementation - DOMI_SOFTMAX_ACCURATE, // subtract max from every point to avoid overflow - DOMI_SOFTMAX_LOG, // perform the Log softmax operation to avoid overflow - DOMI_SOFTMAX_ACCURATE_FP32, - DOMI_SOFTMAX_RESERVED -} domiSoftmaxAlgo_t; - -// algorithm of convolution backward -typedef enum tagDomiConvolutionBwdAlgo { - DOMI_CONVOLUTION_BWD_ALGO_GEMM = 0, // matrix gemm algo - DOMI_CONVOLUTION_BWD_ALGO_WINOGRAD, // Winograd Transform algo - DOMI_CONVOLUTION_BWD_ALGO_RESERVED -} domiConvolutionBwdAlgo_t; - -// mode of pooling -typedef enum tagDomiPoolingMode { - DOMI_POOLING_MAX = 0, // max pooling - DOMI_POOLING_AVG, // average pooling - DOMI_POOLING_L2, // L2 pooling - DOMI_POOLING_RESERVED -} domiPoolingMode_t; - -// propagate Nan -typedef enum tagDomiNanPropagation { - DOMI_NAN_NOT_PROPAGATE = 0, // Nan numbers are not propagated - DOMI_NAN_PROPAGATE, // Nan numbers are propagated - DOMI_NAN_PROPAGATE_RESERVED -} domiNanPropagation_t; - -// mode of cropandresize -typedef enum tagDomiCropAndResizeMode { - DOMI_RESIZE_METHOD_BILINEAR = 0, // resize bilinear - DOMI_RESIZE_METHOD_NEAREST, // resize nearest - DOMI_RESIZE_RESERVED -} domiCropAndResizeMode_t; - -// yolo version -typedef enum tagDomiYoloVersion { DOMI_YOLO_V2 = 1, DOMI_YOLO_V3, DOMI_YOLO_TRSERVED } domiYoloVersion_t; - -typedef enum tagDomiRNNScopePassType { - DOMI_STATIC_BIDIRECTIONAL_RNN_GENERAL_PASS = 0, - DOMI_DYNAMIC_BIDIRECTIONAL_RNN_GENERAL_PASS, - DOMI_DYNAMIC_BIDIRECTIONAL_RNN_BIDAF_PASS -} domiRNNScopePassType; - -// RNNDataLayout -typedef enum tagDomiRNNDataLayout { - DOMI_RNN_ND_TBX = 0, // data[max_time,batch_size,Xt] - DOMI_RNN_ND_BTX, // data[batch_size,max_time,Xt] - DOMI_RNN_5D_TX1BX, // data[max_time,Xt,1,batch_size,Xt] - DOMI_RNN_5D_BX1TX, // dataa[batch_size,Xt,1,max_time,Xt] - DOMI_RNN_4DTBX1, - DOMI_ENN_DL_RESERVED -} domiRNNDataLayout_t; - -// RNNInputMode -typedef enum tagDomiRNNInputMode { DOMI_RNN_LINEAR_INPUT = 0, DOMI_RNN_SKIP_INPUT } domiRNNInputMode_t; - -// RNNDirectionMode -typedef enum tagDomiRNNDirectionMode { DOMI_RNN_UNIDIRECTIONAL = 0, DOMI_RNN_BIDIRECTIONAL } domiDirectionMode_t; - -typedef enum tagDomiPoolingCeilMode { DOMI_POOLING_FLOOR = 0, DOMI_POOLING_CEIL } domiPoolingCeilMode_t; - -// RNNMode -typedef enum tagDomiRNNActivationMode { - DOMI_RNN_ACTIVATION_SIGMOID = 0, // sigmoid - DOMI_RNN_ACTIVATION_TANH, // tanh - DOMI_RNN_ACTIVATION_RELU, // ReLU - DOMI_RNN_ACTIVATION_RELU1, // ReLU1 - DOMI_RNN_ACTIVATION_RELU6, // ReLU6 - DOMI_RNN_ACTIVATION_RESERVED -} domiRNNActivationMode_t; - -typedef enum tagDomiRNNLSTMOutMode { - DOMI_RNN_LSTM_OUT_SEPARATE = 0, - DOMI_RNN_LSTM_OUT_CONCAT, - DOMI_RNN_LSTM_OUT_RESERVED -} domiRNNLSTMOutPutMode_t; -typedef enum tagDomiRNNLSTMStateOutMode { - DOMI_RNN_LSTM_STATE_OUT_SEPARATE = 0, - DOMI_RNN_LSTM_STATE_OUT_CONCAT_ALL, - DOMI_RNN_LSTM_STATE_OUT_RESERVED -} domiRNNLSTMStateOutMode_t; - -typedef enum tagDomiRNNMode { - DOMI_RNN_RELU = 0, - DOMI_RNN_TANH, - DOMI_LSTM, - DOMI_GRU, - DOMI_RNN_MODE_RESERVED -} domiRNNMode_t; - -typedef enum tagDomiResizeBilinearMode { - DOMI_RESIZE_OUTPUT_DIM_BY_ZOOM_FACTOR = 0, // Output dimension specified by zoom factor - DOMI_RESIZE_OUTPUT_DIM_BY_SHRINK_FACTOR, // specified by shrink factor - DOMI_RESIZE_OUTPUT_DIM_EXPLICIT, // specified explicitly - DOMI_RESIZE_OUTPUT_DIM_RESERVED -} domiResizeOutputDimMode_t; - -#pragma pack(1) // single-byte alignment -// DUMP file struct -struct FileHeader { - int32_t Version; // version - int32_t Output_Offset; // output offset address - char Reserved[24] = {0}; // 24 bytes reserved -}; - -struct BasicInfo { - struct FileHeader header; // file header - int32_t stream_id; // stread id - uint64_t start_time; // start time - uint64_t end_time; // end time - uint32_t input_size; // input memory size - uint32_t output_size; // output memory size - uint32_t weight_size; // weight Memory Size - uint32_t workspace_size; // workspace - uint32_t total_size; // total memory size -}; -#pragma pack() // Cancels single-byte alignment -enum class MemorySizeCalcType { NORMAL = 0, ALWAYS_EMPTY }; -} // namespace ge - -namespace domi { -/// @brief Data structure definition related to task sinking -enum BuildMode { - GEN_TASK_WITHOUT_L2FUSION = 3, // Carrying task data (L2 convergence function disabled) - GEN_TASK_WITHOUT_FUSION = 4, // Carrying task data (all convergence functions disabled) - GEN_TASK_WITH_FUSION = 5 // Carrying task data (with UB/L1/L2 enabled for all convergence functions) -}; -} // namespace domi - -#endif // INC_FRAMEWORK_COMMON_TYPES_H_ diff --git a/inc/graphengine/inc/framework/common/util.h b/inc/graphengine/inc/framework/common/util.h deleted file mode 100644 index bd84d0ace..000000000 --- a/inc/graphengine/inc/framework/common/util.h +++ /dev/null @@ -1,424 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_COMMON_UTIL_H_ -#define INC_FRAMEWORK_COMMON_UTIL_H_ - -#include -#include -#include -#include -#include -#include - -#include "framework/common/debug/ge_log.h" -#include "framework/common/debug/log.h" -#include "framework/common/scope_guard.h" -#include "framework/common/ge_inner_error_codes.h" -#include "mmpa/mmpa_api.h" - -#define GE_CHECK_POSITIVE_SIZE_RANGE(size) \ - do { \ - if (size <= 0) { \ - DOMI_LOGE("param[%s] is not a positive number", #size); \ - return PARAM_INVALID; \ - } \ - } while (0) - -#define CHECK_FALSE_EXEC(expr, exec_expr, ...) \ - { \ - bool b = (expr); \ - if (!b) { \ - exec_expr; \ - } \ - } - -// new ge marco -// Encapsulate common resource releases -#define GE_MAKE_GUARD_RTMEM(var) \ - GE_MAKE_GUARD(var, [&] { \ - if (var) GE_CHK_RT(rtFreeHost(var)); \ - }); - -#define GE_MAKE_GUARD_RTSTREAM(var) \ - GE_MAKE_GUARD(var, [&] { \ - if (var) GE_CHK_RT(rtStreamDestroy(var)); \ - }); - -// For propagating errors when calling a function. -#define GE_RETURN_IF_ERROR(expr) \ - do { \ - const ::ge::Status _status = (expr); \ - if (_status) return _status; \ - } while (0) - -#define GE_RETURN_WITH_LOG_IF_ERROR(expr, ...) \ - do { \ - const ::ge::Status _status = (expr); \ - if (_status) { \ - DOMI_LOGE(__VA_ARGS__); \ - return _status; \ - } \ - } while (0) - -// check whether the parameter is true. If it is, return FAILED and record the error log -#define GE_RETURN_WITH_LOG_IF_TRUE(condition, ...) \ - do { \ - if (condition) { \ - DOMI_LOGE(__VA_ARGS__); \ - return ge::FAILED; \ - } \ - } while (0) - -// Check if the parameter is false. If yes, return FAILED and record the error log -#define GE_RETURN_WITH_LOG_IF_FALSE(condition, ...) \ - do { \ - bool _condition = (condition); \ - if (!_condition) { \ - DOMI_LOGE(__VA_ARGS__); \ - return ge::FAILED; \ - } \ - } while (0) - -// Checks whether the parameter is true. If so, returns PARAM_INVALID and records the error log -#define GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(condition, ...) \ - do { \ - if (condition) { \ - DOMI_LOGE(__VA_ARGS__); \ - return ge::PARAM_INVALID; \ - } \ - } while (0) - -// Check if the parameter is false. If yes, return PARAM_INVALID and record the error log -#define GE_RT_PARAM_INVALID_WITH_LOG_IF_FALSE(condition, ...) \ - do { \ - bool _condition = (condition); \ - if (!_condition) { \ - DOMI_LOGE(__VA_ARGS__); \ - return ge::PARAM_INVALID; \ - } \ - } while (0) - -// Check if the parameter is null. If yes, return PARAM_INVALID and record the error -#define GE_CHECK_NOTNULL(val) \ - do { \ - if (val == nullptr) { \ - REPORT_INNER_ERROR("E19999", "Param:%s is nullptr, check invalid", #val); \ - DOMI_LOGE("[Check][Param:%s]null is invalid.", #val); \ - return ge::PARAM_INVALID; \ - } \ - } while (0) - -// Check if the parameter is null. If yes, just return and record the error -#define GE_CHECK_NOTNULL_JUST_RETURN(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return; \ - } \ - } while (0) - -// Check whether the parameter is null. If so, execute the exec_expr expression and record the error log -#define GE_CHECK_NOTNULL_EXEC(val, exec_expr) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - exec_expr; \ - } \ - } while (0) - -// Check whether the parameter is null. If yes, return directly and record the error log -#define GE_RT_VOID_CHECK_NOTNULL(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return; \ - } \ - } while (0) - -// Check if the parameter is null. If yes, return false and record the error log -#define GE_RT_FALSE_CHECK_NOTNULL(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return false; \ - } \ - } while (0) - -// Check if the parameter is out of bounds -#define GE_CHECK_SIZE(size) \ - do { \ - if (size == 0) { \ - DOMI_LOGE("param[%s] is out of range", #size); \ - return ge::PARAM_INVALID; \ - } \ - } while (0) - -// Check if the value on the left is greater than or equal to the value on the right -#define GE_CHECK_GE(lhs, rhs) \ - do { \ - if (lhs < rhs) { \ - DOMI_LOGE("param[%s] is less than[%s]", #lhs, #rhs); \ - return ge::PARAM_INVALID; \ - } \ - } while (0) - -// Check if the value on the left is less than or equal to the value on the right -#define GE_CHECK_LE(lhs, rhs) \ - do { \ - if (lhs > rhs) { \ - DOMI_LOGE("param[%s] is greater than[%s]", #lhs, #rhs); \ - return ge::PARAM_INVALID; \ - } \ - } while (0) - -#define GE_DELETE_NEW_SINGLE(var) \ - do { \ - if (var != nullptr) { \ - delete var; \ - var = nullptr; \ - } \ - } while (0) - -#define GE_DELETE_NEW_ARRAY(var) \ - do { \ - if (var != nullptr) { \ - delete[] var; \ - var = nullptr; \ - } \ - } while (0) - -#define GE_FREE_RT_LOG(addr) \ - do { \ - if (addr != nullptr) { \ - rtError_t error = rtFree(addr); \ - if (error != RT_ERROR_NONE) { \ - GELOGE(RT_FAILED, "Call rtFree failed, error: %#x", error); \ - } \ - addr = nullptr; \ - } \ - } while (0) - -/** - * @ingroup domi_common - * @brief version of om.proto file - */ -static constexpr int32_t OM_PROTO_VERSION = 2; - -/** - * Finding an Integer Ceiling Value Without Precision Loss - */ -#define CEIL(N, n) (((N) + (n)-1) / (n)) - -namespace ge { -using google::protobuf::Message; - -/// -/// @ingroup domi_common -/// @brief Maximum file path length -/// -const int32_t DOMI_MAX_PATH_LEN = 256; - -/// -/// @ingroup domi_common -/// @brief proto file in bianary format -/// @param [in] file path of proto file -/// @param [out] proto memory for storing the proto file -/// @return true success -/// @return false fail -/// -GE_FUNC_VISIBILITY bool ReadProtoFromBinaryFile(const char *file, Message *proto); - -/// -/// @ingroup domi_common -/// @brief Reads the proto structure from an array. -/// @param [in] data proto data to be read -/// @param [in] size proto data size -/// @param [out] proto Memory for storing the proto file -/// @return true success -/// @return false fail -/// -GE_FUNC_VISIBILITY bool ReadProtoFromArray(const void *data, int size, Message *proto); - -/// -/// @ingroup domi_proto -/// @brief Reads the proto file in the text format. -/// @param [in] file path of proto file -/// @param [out] message Memory for storing the proto file -/// @return true success -/// @return false fail -/// -GE_FUNC_VISIBILITY bool ReadProtoFromText(const char *file, google::protobuf::Message *message); - -GE_FUNC_VISIBILITY bool ReadProtoFromMem(const char *data, int size, google::protobuf::Message *message); - -/// -/// @ingroup: domi_common -/// @brief: get length of file -/// @param [in] input_file: path of file -/// @return long: File length. If the file length fails to be obtained, the value -1 is returned. -/// -GE_FUNC_VISIBILITY extern long GetFileLength(const std::string &input_file); - -/// -/// @ingroup domi_common -/// @brief Reads all data from a binary file. -/// @param [in] file_name path of file -/// @param [out] buffer Output memory address, which needs to be released by the caller. -/// @param [out] length Output memory size -/// @return false fail -/// @return true success -/// -GE_FUNC_VISIBILITY bool ReadBytesFromBinaryFile(const char *file_name, char **buffer, int &length); - -GE_FUNC_VISIBILITY bool ReadBytesFromBinaryFile(const char *file_name, std::vector &buffer); - -/// -/// @ingroup domi_common -/// @brief Recursively Creating a Directory -/// @param [in] directory_path Path, which can be a multi-level directory. -/// @return 0 success -/// @return -1 fail -/// -GE_FUNC_VISIBILITY extern int CreateDirectory(const std::string &directory_path); - -/// -/// @ingroup domi_common -/// @brief Obtains the current time string. -/// @return Time character string in the format : %Y%m%d%H%M%S, eg: 20171011083555 -/// -GE_FUNC_VISIBILITY std::string CurrentTimeInStr(); - -/// -/// @ingroup domi_common -/// @brief onverts Vector of a number to a string. -/// @param [in] v Vector of a number -/// @return string -/// -template -GE_FUNC_VISIBILITY std::string ToString(std::vector &v) { - std::stringstream ss; - ss << "["; - for (T x : v) { - ss << x; - ss << ", "; - } - std::string strRet = - ss.str().substr(0, ss.str().length() - 2); // Delete the two extra characters at the end of the line. - strRet += "]"; - return strRet; -} - -/// -/// @ingroup domi_common -/// @brief Converts RepeatedField to String. -/// @param [in] rpd_field RepeatedField -/// @return string -/// -template -GE_FUNC_VISIBILITY std::string ToString(const google::protobuf::RepeatedField &rpd_field) { - std::stringstream ss; - ss << "["; - for (T x : rpd_field) { - ss << x; - ss << ", "; - } - std::string strRet = - ss.str().substr(0, ss.str().length() - 2); // Delete the two extra characters at the end of the line. - strRet += "]"; - return strRet; -} - -/// -/// @ingroup domi_common -/// @brief Obtains the absolute time (timestamp) of the current system. -/// @return Timestamp, in microseconds (US) -/// -/// -GE_FUNC_VISIBILITY uint64_t GetCurrentTimestamp(); - -/// -/// @ingroup domi_common -/// @brief Obtains the absolute time (timestamp) of the current system. -/// @return Timestamp, in seconds (US) -/// -/// -GE_FUNC_VISIBILITY uint32_t GetCurrentSecondTimestap(); - -/// -/// @ingroup domi_common -/// @brief Check whether the product of two int64 numbers exceeds the int64 range. -/// @param [in] a -/// @param [in] b -/// @return false: true: The result is within the normal int64 range. -/// -GE_FUNC_VISIBILITY bool CheckInt64MulOverflow(int64_t a, int64_t b); - -/// -/// @ingroup domi_common -/// @brief Absolute path for obtaining files. -/// @param [in] path of input file -/// @param [out] Absolute path of a file. If the absolute path cannot be obtained, an empty string is returned -/// -GE_FUNC_VISIBILITY std::string RealPath(const char *path); - -/// -/// @ingroup domi_common -/// @brief Check whether the specified input file path is valid. -/// 1. The specified path cannot be empty. -/// 2. The path can be converted to an absolute path. -/// 3. The file path exists and is readable. -/// @param [in] file_path path of input file -/// @param [out] result -/// -GE_FUNC_VISIBILITY bool CheckInputPathValid(const std::string &file_path, const std::string &atc_param = ""); - -/// -/// @ingroup domi_common -/// @brief Checks whether the specified output file path is valid. -/// @param [in] file_path path of output file -/// @param [out] result -/// -GE_FUNC_VISIBILITY bool CheckOutputPathValid(const std::string &file_path, const std::string &atc_param = ""); - -/// -/// @ingroup domi_common -/// @brief Check whether the file path meets the whitelist verification requirements. -/// @param [in] filePath file path -/// @param [out] result -/// -GE_FUNC_VISIBILITY bool ValidateStr(const std::string &filePath, const std::string &mode); - -/// -/// @ingroup domi_common -/// @brief Check whether the file is normal file. -/// @param [in] file_path file path -/// @param [out] result -/// -GE_FUNC_VISIBILITY bool IsValidFile(const char *file_path); - -/// -/// @ingroup domi_common -/// @brief Check path invalid -/// @param [in] path, path to be checked -/// @param [in] length, length of path -/// @return 0 success -/// @return -1 fail -/// -GE_FUNC_VISIBILITY Status CheckPath(const char *path, size_t length); -} // namespace ge - -#endif // INC_FRAMEWORK_COMMON_UTIL_H_ diff --git a/inc/graphengine/inc/framework/engine/dnnengine.h b/inc/graphengine/inc/framework/engine/dnnengine.h deleted file mode 100644 index 8a0f3b65b..000000000 --- a/inc/graphengine/inc/framework/engine/dnnengine.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_ENGINE_DNNENGINE_H_ -#define INC_FRAMEWORK_ENGINE_DNNENGINE_H_ - -#include -#include -#include - -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" -#include "graph/types.h" - -namespace ge { -enum PriorityEnum { - COST_0 = 0, - COST_1, - COST_2, - COST_3, - COST_9 = 9, - COST_10 = 10, -}; - -struct DNNEngineAttribute { - std::string engine_name; - std::vector mem_type; - uint32_t compute_cost; - enum RuntimeType runtime_type; // HOST, DEVICE - // If engine input format must be specific, set this attribute, else set FORMAT_RESERVED - Format engine_input_format; - Format engine_output_format; -}; - -class GE_FUNC_VISIBILITY DNNEngine { - public: - virtual ~DNNEngine() = default; - virtual Status Initialize(const std::map &options) = 0; - virtual Status Finalize() = 0; - virtual void GetAttributes(DNNEngineAttribute &attr) const = 0; -}; -} // namespace ge - -#endif // INC_FRAMEWORK_ENGINE_DNNENGINE_H_ diff --git a/inc/graphengine/inc/framework/executor/ge_executor.h b/inc/graphengine/inc/framework/executor/ge_executor.h deleted file mode 100644 index fcca561c9..000000000 --- a/inc/graphengine/inc/framework/executor/ge_executor.h +++ /dev/null @@ -1,298 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_ -#define INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_ - -#include -#include -#include - -#include "common/dynamic_aipp.h" -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" -#include "common/types.h" -#include "graph/tensor.h" -#include "graph/ge_tensor.h" -#include "runtime/base.h" - -namespace ge { -class SingleOp; -class DynamicSingleOp; - -struct RunModelData { - uint32_t index; // Data index - uint32_t modelId; - std::vector blobs; // All input/output data buffer - uint32_t timestamp; // Data creation time - uint32_t timeout; // Processing timeout - uint64_t request_id = 0; // Request ID - uint64_t dynamic_batch_size = 0; // Dynamic batch size scene, set dynamic size, not supported by default:0 - uint64_t dynamic_image_height = 0; // Dynamic image size scene, set image height, not supported by default:0 - uint64_t dynamic_image_width = 0; // Dynamic image size scene, set image width, not supported by default:0 - std::vector dynamic_dims; // Dynamic dims scene, set dynamic dims, not supported by default:empty -}; - -class GE_FUNC_VISIBILITY GeExecutor { - public: - GeExecutor(); - ~GeExecutor() = default; - ge::Status Initialize(); - ge::Status Finalize(); - - ge::Status UnloadModel(uint32_t modelId); - - // Get input and output descriptor - ge::Status GetModelDescInfo(uint32_t model_id, std::vector &input_desc, - std::vector &output_desc, bool new_model_desc = false); - - /// - /// @ingroup ge - /// @brief Set dynamic batch size - /// @param [in] model_id: model id allocate from manager - /// @param [in] dynamic_input_addr: dynamic input addr created by user - /// @param [in] length: length of dynamic input addr - /// @param [in] batch_size: batch size entered by user in dynamic multi-batch scenario - /// @return execute result - /// - ge::Status SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t batch_size); - - /// - /// @ingroup ge - /// @brief Set dynamic image info - /// @param [in] model_id: model id allocate from manager - /// @param [in] dynamic_input_addr: dynamic input addr created by user - /// @param [in] length: length of dynamic input addr - /// @param [in] image_height: image height entered by user in dynamic multi-resolution scenario - /// @param [in] image_width: image width entered by user in dynamic multi-resolution scenario - /// @return execute result - /// - ge::Status SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height, - uint64_t image_width); - - /// - /// @ingroup ge - /// @brief Set dynamic dims info - /// @param [in] model_id: model id allocate from manager - /// @param [in] dynamic_input_addr: dynamic input addr created by user - /// @param [in] length: length of dynamic input addr - /// @param [in] dynamic_dim_num: number of dynamic dimension - /// @param [in] dynamic_dims: array of dynamic dimensions - /// @return execute result - /// - ge::Status SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, uint64_t length, - const std::vector &dynamic_dims); - - /// - /// @ingroup ge - /// @brief Get current dynamic dims info by combined dims - /// @param [in] model_id: model id allocate from manager - /// @param [in] dynamic_dims: cur gear dynamic dims value - /// @param [out] cur_dynamic_dims: current dynamic dims - /// @return execute result - /// - ge::Status GetCurDynamicDims(uint32_t model_id, const std::vector &dynamic_dims, - std::vector &cur_dynamic_dims); - - /// - /// @ingroup ge - /// @brief Get dynamic batch_info - /// @param [in] model_id - /// @param [out] batch_info - /// @param [out] dynamic_type - /// @return execute result - /// - ge::Status GetDynamicBatchInfo(uint32_t model_id, std::vector> &batch_info, - int32_t &dynamic_type); - - /// - /// @ingroup ge - /// @brief Get combined dynamic dims info - /// @param [in] model_id - /// @param [out] batch_info - /// @return execute result - /// - ge::Status GetCombinedDynamicDims(uint32_t model_id, std::vector> &batch_info); - - /// - /// @ingroup ge - /// @brief Get user designeate shape order - /// @param [in] model_id - /// @param [out] user_designate_shape_order - /// @return execute result - /// - ge::Status GetUserDesignateShapeOrder(uint32_t model_id, std::vector &user_designate_shape_order); - - ge::Status GetCurShape(const uint32_t model_id, std::vector &batch_info, int32_t &dynamic_type); - - /// - /// @ingroup ge - /// @brief Set dynamic image info - /// @param [in] model_id: model id allocate from manager - /// @param [in] dynamic_input_addr: dynamic input addr created by user - /// @param [in] length: length of dynamic input addr - /// @param [in] aippBatchPara: kAippDynamicBatchPara vector by user in dynamic aipp - /// @param [in] aippParms: kAippDynamicPara by user in dynamic aipp - /// @return execute result - /// - ge::Status SetDynamicAippData(uint32_t model_id, void *dynamic_input_addr, uint64_t length, - const std::vector &aippBatchPara, - const kAippDynamicPara &aippParms); - - ge::Status GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info); - - ge::Status GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name, - std::string &attr_value); - - ge::Status GetModelAttr(uint32_t model_id, std::vector &dynamic_output_shape_info); - - ge::Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index); - - ge::Status CommandHandle(const ge::Command &command); - - ge::Status SetDump(const DumpConfig &dump_config); - - /// - /// @ingroup ge - /// @brief Query model memory consuming interface - /// @param [in] model_id Offline model ID - /// @param [out] max_size Memory size - /// @return SUCCESS - /// @return FAILED - /// - ge::Status GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size); - - /// - /// @ingroup ge - /// @brief Load data from model file to memory - /// @param [in] const std::string &path: Offline model file path - /// @param [out] ModelData &model_data: Offline model memory data - /// @return SUCCESS handle successfully / others handle failed - /// - ge::Status LoadDataFromFile(const std::string &path, ge::ModelData &model_data); - - /// - /// @ingroup ge - /// @brief Load model from offline model memory data - /// @param [in] ModelData &model_data: Offline model data - /// @param [in] void *dev_ptr: Input/Output memory address - /// @param [in] size_t mem_size: Input/Output memory length - /// @param [in] void *weight_ptr: Weight memory address - /// @param [in] size_t weight_size: Weight memory length - /// @param [out] uint32_t &model_id: Corresponding identification after model loading - /// @return SUCCESS handle successfully / others handle failed - /// - ge::Status LoadModelFromData(uint32_t &model_id, const ge::ModelData &model_data, void *dev_ptr, size_t mem_size, - void *weight_ptr, size_t weight_size); - - /// - /// @ingroup ge - /// @brief Load task list from ModelData with queue. - /// @param [out] model_id: model id allocate from manager. - /// @param [in] model_data: Model data load from offline model. - /// @param [in] input_queue_ids: input queue ids create from user. - /// @param [in] output_queue_ids: input queue ids create from user. - /// @return: 0 for success / others for fail - /// - ge::Status LoadModelWithQ(uint32_t &model_id, const ge::ModelData &model_data, - const std::vector &input_queue_ids, - const std::vector &output_queue_ids); - - /// - /// @ingroup ge - /// @brief Synchronous execution of offline model(Do not create thread) - /// @param [in] uint32_t model_id: Model ID to execute - /// @param [in] void* stream: stream to execute - /// @param [in] bool async_mode: is asynchronize mode. - /// @param [in] const domi::InputData *input_data: Model input data - /// @param [out] domi::OutputData *output_data: Model output data - /// @return SUCCESS handle successfully / others handle failed - /// - ge::Status ExecModel(uint32_t model_id, void *stream, const ge::RunModelData &input_data, - ge::RunModelData &output_data, bool async_mode = false); - - /// - /// @ingroup ge - /// @brief Synchronous execution of offline model(Do not create thread) - /// @param [in] uint32_t model_id: Model ID to execute - /// @param [in] void* stream: stream to execute - /// @param [in] bool async_mode: is asynchronize mode. - /// @param [in] const domi::InputData *input_data: Model input data - /// @param [in] const std::vector &input_desc: description of model input data - /// @param [out] domi::OutputData *output_data: Model output data - /// @param [out] std::vector &output_desc: description of model output data - /// @return SUCCESS handle successfully / others handle failed - /// - ge::Status ExecModel(uint32_t model_id, void *stream, const ge::RunModelData &run_input_data, - const std::vector &input_desc, ge::RunModelData &run_output_data, - std::vector &output_desc, bool async_mode = false); - - /// - /// @ingroup ge - /// @brief Get weight memory size from model file - /// @param [in] const std::string &path: Offline model file path - /// @param [out] size_t &mem_size Execution memory size - /// @param [out] size_t &weight_size Weight memory space size - /// @return SUCCESS handle successfully / others handle failed - /// - ge::Status GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size); - - /// - /// @ingroup ge - /// @brief Get weight memory size from model file - /// @param [in] const void *model_data Offline model buffer - /// @param [in] size_t model_size Offline model buffer length - /// @param [out] size_t &mem_size Execution memory size - /// @param [out] size_t &weight_size Weight memory space size - /// @return SUCCESS handle successfully / others handle failed - /// - ge::Status GetMemAndWeightSize(const void *model_data, size_t model_size, size_t &mem_size, size_t &weight_size); - - static ge::Status LoadSingleOp(const std::string &modelName, const ge::ModelData &modelData, void *stream, - SingleOp **single_op); - - static ge::Status LoadSingleOpV2(const std::string &modelName, const ge::ModelData &modelData, void *stream, - SingleOp **single_op, const uint64_t model_id); - - static ge::Status ExecuteAsync(SingleOp *executor, const std::vector &inputs, - std::vector &outputs); - - static ge::Status LoadDynamicSingleOp(const std::string &model_name, const ge::ModelData &modelData, void *stream, - DynamicSingleOp **single_op); - - static ge::Status LoadDynamicSingleOpV2(const std::string &model_name, const ge::ModelData &modelData, void *stream, - DynamicSingleOp **single_op, const uint64_t model_id); - - static ge::Status ExecuteAsync(DynamicSingleOp *executor, const std::vector &input_desc, - const std::vector &inputs, std::vector &output_desc, - std::vector &outputs); - - static ge::Status ReleaseSingleOpResource(void *stream); - - static ge::Status GetDeviceIdByModelId(uint32_t model_id, uint32_t &device_id); - - ge::Status GetBatchInfoSize(uint32_t model_id, size_t &shape_count); - ge::Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info); - ge::Status GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector &input_dims, - std::vector &output_dims); - ge::Status GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info); - - private: - static bool isInit_; -}; -} // namespace ge - -#endif // INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_ diff --git a/inc/graphengine/inc/framework/ge_runtime/davinci_model.h b/inc/graphengine/inc/framework/ge_runtime/davinci_model.h deleted file mode 100644 index 91e701591..000000000 --- a/inc/graphengine/inc/framework/ge_runtime/davinci_model.h +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_GE_RUNTIME_DAVINCI_MODEL_H_ -#define INC_FRAMEWORK_GE_RUNTIME_DAVINCI_MODEL_H_ - -#include -#include - -#include "ge_runtime/op_info.h" -#include "ge_runtime/task_info.h" - -namespace ge { -namespace model_runner { -class DavinciModel { - public: - DavinciModel(const std::vector> &task_info_list, - const std::vector> &data_info_list, - const std::vector> &output_info_list, - const std::vector> &constant_info_list, - const std::vector &variable_info_list, - const std::vector &wait_active_stream_list, - const std::vector &force_copy_stream_list, uint64_t mem_size = 0, uint64_t weight_size = 0, - uint64_t var_size = 0, uintptr_t logic_mem_base = 0, uintptr_t logic_weight_base = 0, - uintptr_t logic_var_base = 0, uint32_t stream_num = 0, uint32_t batch_num = 0, uint32_t event_num = 0, - int32_t priority = 0) - : task_info_list_(task_info_list), - data_info_list_(data_info_list), - output_info_list_(output_info_list), - constant_info_list_(constant_info_list), - variable_info_list_(variable_info_list), - wait_active_stream_list_(wait_active_stream_list), - force_copy_stream_list_(force_copy_stream_list), - mem_size_(mem_size), - weight_size_(weight_size), - var_size_(var_size), - logic_mem_base_(logic_mem_base), - logic_weight_base_(logic_weight_base), - logic_var_base_(logic_var_base), - stream_num_(stream_num), - batch_num_(batch_num), - event_num_(event_num), - priority_(priority) {} - ~DavinciModel() {} - - uint64_t GetMemSize() const { return mem_size_; } - uint64_t GetWeightSize() const { return weight_size_; } - uint64_t GetVarSize() const { return var_size_; } - - uintptr_t GetLogicMemBase() const { return logic_mem_base_; } - uintptr_t GetLogicWeightBase() const { return logic_weight_base_; } - uintptr_t GetLogicVarBase() const { return logic_var_base_; } - - uint32_t GetStreamNum() const { return stream_num_; } - uint32_t GetBatchNum() const { return batch_num_; } - uint32_t GetEventNum() const { return event_num_; } - - const std::vector &GetWaitActiveStreams() const { return wait_active_stream_list_; } - const std::vector &GetForceCopyStreams() const { return force_copy_stream_list_; } - - int32_t GetPriority() const { return priority_; } - - const std::vector> &GetTaskInfoList() const { return task_info_list_; } - const std::vector> &GetDataInfoList() const { return data_info_list_; } - const std::vector> &GetOutputInfoList() const { return output_info_list_; } - const std::vector> &GetConstantInfoList() const { return output_info_list_; } - const std::vector &GetVariableInfoList() const { return variable_info_list_; } - - private: - std::vector> task_info_list_; - std::vector> data_info_list_; - std::vector> output_info_list_; - std::vector> constant_info_list_; - std::vector variable_info_list_; - - std::vector wait_active_stream_list_; - std::vector force_copy_stream_list_; - - uint64_t mem_size_; - uint64_t weight_size_; - uint64_t var_size_; - - uintptr_t logic_mem_base_; - uintptr_t logic_weight_base_; - uintptr_t logic_var_base_; - - uint32_t stream_num_; - uint32_t batch_num_; - uint32_t event_num_; - - int32_t priority_; - - // Disable to copy constructor and assignment operator - DavinciModel &operator=(const DavinciModel &) = delete; - DavinciModel(const DavinciModel &) = delete; -}; -} // namespace model_runner -} // namespace ge - -#endif // INC_FRAMEWORK_GE_RUNTIME_DAVINCI_MODEL_H_ diff --git a/inc/graphengine/inc/framework/ge_runtime/model_runner.h b/inc/graphengine/inc/framework/ge_runtime/model_runner.h deleted file mode 100644 index e495dfdfd..000000000 --- a/inc/graphengine/inc/framework/ge_runtime/model_runner.h +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_GE_RUNTIME_MODEL_RUNNER_H_ -#define INC_FRAMEWORK_GE_RUNTIME_MODEL_RUNNER_H_ - -#include -#include -#include - -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" -#include "ge_runtime/davinci_model.h" - -namespace ge { -namespace model_runner { -class RuntimeModel; -using RuntimeInfo = std::tuple; -class ModelRunner { - public: - static ModelRunner &Instance(); - - bool LoadDavinciModel(uint32_t device_id, uint64_t session_id, uint32_t model_id, - std::shared_ptr davinci_model, std::shared_ptr listener); - - bool DistributeTask(uint32_t model_id); - - bool LoadModelComplete(uint32_t model_id); - - const std::vector &GetTaskIdList(uint32_t model_id) const; - - const std::vector &GetStreamIdList(uint32_t model_id) const; - - const std::map> &GetRuntimeInfoMap(uint32_t model_id) const; - - void *GetModelHandle(uint32_t model_id) const; - - bool UnloadModel(uint32_t model_id); - - bool RunModel(uint32_t model_id, const InputData &input_data, OutputData *output_data); - - bool GetInputOutputDescInfo(uint32_t model_id, bool zero_copy, std::vector *input_desc, - std::vector *output_desc, std::vector *input_format, - std::vector *output_format); - - private: - ModelRunner() = default; - ~ModelRunner() = default; - - std::unordered_map> runtime_models_; -}; -} // namespace model_runner -} // namespace ge - -#endif // INC_FRAMEWORK_GE_RUNTIME_MODEL_RUNNER_H_ diff --git a/inc/graphengine/inc/framework/ge_runtime/op_info.h b/inc/graphengine/inc/framework/ge_runtime/op_info.h deleted file mode 100644 index 22c16ed69..000000000 --- a/inc/graphengine/inc/framework/ge_runtime/op_info.h +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_GE_RUNTIME_OP_INFO_H_ -#define INC_FRAMEWORK_GE_RUNTIME_OP_INFO_H_ - -#include -#include -#include - -namespace ge { -namespace model_runner { -struct TensorInfo { - int64_t GetShapeSize() const { - int64_t res = 1; - if (dims.empty()) { - return 0; - } - for (auto dim : dims) { - res *= dim; - } - return res; - } - - int64_t GetDim(uint32_t index) { - if (index >= dims.size()) { - return 0; - } - return dims[index]; - } - - std::vector dims; - uint32_t datatype; - uint32_t format; - uint32_t real_dim_cnt; - uint32_t size; - bool is_output; -}; - -struct OpInfo { - uint32_t index; - std::string name; - std::string type; - bool var_is_broadcast; - std::vector input_addrs; - std::vector output_addrs; - std::vector input_tensors; - std::vector output_tensors; - std::vector weight_tensors; - std::vector src_name; - std::vector src_index; - std::string weight_data; -}; - -using TensorInfoPtr = std::shared_ptr; -using OpInfoPtr = std::shared_ptr; -} // namespace model_runner -} // namespace ge -#endif // INC_FRAMEWORK_GE_RUNTIME_OP_INFO_H_ diff --git a/inc/graphengine/inc/framework/ge_runtime/task_info.h b/inc/graphengine/inc/framework/ge_runtime/task_info.h deleted file mode 100644 index f59c6454e..000000000 --- a/inc/graphengine/inc/framework/ge_runtime/task_info.h +++ /dev/null @@ -1,405 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_GE_RUNTIME_TASK_INFO_H_ -#define INC_FRAMEWORK_GE_RUNTIME_TASK_INFO_H_ - -#include -#include -#include -#include -#include - -#include "cce/taskdown_api.h" - -namespace ge { -namespace model_runner { -enum TaskInfoType { - CCE = 0, - TBE, - AICPU, - LABEL_SET, - LABEL_SWITCH, - LABEL_GOTO, - EVENT_RECORD, - EVENT_WAIT, - FUSION_START, - FUSION_END, - HCCL, - PROFILER_TRACE, - MEMCPY_ASYNC, - STREAM_SWITCH, - STREAM_ACTIVE, - // Insert new task type here - REVSERVED = 23 -}; - -class TaskInfo { - public: - virtual ~TaskInfo() {} - uint32_t stream_id() const { return stream_id_; } - TaskInfoType type() const { return type_; } - std::string op_name() const { return op_name_; } - bool dump_flag() const { return dump_flag_; } - - protected: - TaskInfo(const std::string &op_name, uint32_t stream_id, TaskInfoType type, bool dump_flag) - : op_name_(op_name), stream_id_(stream_id), type_(type), dump_flag_(dump_flag) {} - - private: - std::string op_name_; - uint32_t stream_id_; - TaskInfoType type_; - bool dump_flag_; -}; - -class CceTaskInfo : public TaskInfo { - public: - CceTaskInfo(const std::string &op_name, uint32_t stream_id, const cce::ccOpContext &ctx, const std::string &stub_func, - uint32_t block_dim, const std::vector &args, uint32_t args_size, - const std::vector &sm_desc, const std::vector &flow_table, - const std::vector &args_offset, bool is_flowtable) - : TaskInfo(op_name, stream_id, TaskInfoType::CCE, false), - ctx_(ctx), - stub_func_(stub_func), - block_dim_(block_dim), - args_(args), - args_size_(args_size), - sm_desc_(sm_desc), - flow_table_(flow_table), - args_offset_(args_offset), - is_flowtable_(is_flowtable) {} - ~CceTaskInfo() override {} - - cce::ccOpContext cc_context() const { return ctx_; } - std::string stub_func() const { return stub_func_; } - uint32_t block_dim() const { return block_dim_; } - const std::vector &args() const { return args_; } - uint32_t args_size() const { return args_size_; } - const std::vector &sm_desc() const { return sm_desc_; } - const std::vector &flow_table() const { return flow_table_; } - const std::vector &args_offset() const { return args_offset_; } - bool is_flowtable() const { return is_flowtable_; } - - private: - cce::ccOpContext ctx_; - std::string stub_func_; - uint32_t block_dim_; - std::vector args_; - uint32_t args_size_; - std::vector sm_desc_; - std::vector flow_table_; - std::vector args_offset_; - bool is_flowtable_; -}; - -class TbeTaskInfo : public TaskInfo { - public: - TbeTaskInfo(const std::string &op_name, uint32_t stream_id, const std::string &stub_func, uint32_t block_dim, - const std::vector &args, uint32_t args_size, const std::vector &sm_desc, void *binary, - uint32_t binary_size, const std::vector &meta_data, const std::vector &input_data_addrs, - const std::vector &output_data_addrs, const std::vector &workspace_addrs, bool dump_flag) - : TaskInfo(op_name, stream_id, TaskInfoType::TBE, dump_flag), - stub_func_(stub_func), - block_dim_(block_dim), - args_(args), - args_size_(args_size), - sm_desc_(sm_desc), - binary_(binary), - binary_size_(binary_size), - meta_data_(meta_data), - input_data_addrs_(input_data_addrs), - output_data_addrs_(output_data_addrs), - workspace_addrs_(workspace_addrs) {} - ~TbeTaskInfo() override {} - - const std::string &stub_func() const { return stub_func_; } - uint32_t block_dim() const { return block_dim_; } - const std::vector &args() const { return args_; } - uint32_t args_size() const { return args_size_; } - const std::vector &sm_desc() const { return sm_desc_; } - void *binary() const { return binary_; } - uint32_t binary_size() const { return binary_size_; } - const std::vector &meta_data() const { return meta_data_; } - const std::vector &input_data_addrs() const { return input_data_addrs_; } - const std::vector &output_data_addrs() const { return output_data_addrs_; } - const std::vector &workspace_addrs() const { return workspace_addrs_; } - - void SetBinary(void *binary, uint32_t binary_size) { - binary_ = binary; - binary_size_ = binary_size; - } - - private: - std::string stub_func_; - uint32_t block_dim_; - std::vector args_; - uint32_t args_size_; - std::vector sm_desc_; - void *binary_; - uint32_t binary_size_; - std::vector meta_data_; - std::vector input_data_addrs_; - std::vector output_data_addrs_; - std::vector workspace_addrs_; -}; - -class AicpuTaskInfo : public TaskInfo { - public: - AicpuTaskInfo(const std::string &op_name, uint32_t stream_id, const string &so_name, const std::string &kernel_name, - const std::string &node_def, const std::string &ext_info, const std::vector &input_data_addrs, - const std::vector &output_data_addrs, bool dump_flag) - : TaskInfo(op_name, stream_id, TaskInfoType::AICPU, dump_flag), - so_name_(so_name), - kernel_name_(kernel_name), - node_def_(node_def), - ext_info_(ext_info), - input_data_addrs_(input_data_addrs), - output_data_addrs_(output_data_addrs) {} - ~AicpuTaskInfo() override {} - - const std::string &so_name() const { return so_name_; } - const std::string &kernel_name() const { return kernel_name_; } - const std::string &node_def() const { return node_def_; } - const std::vector &input_data_addrs() const { return input_data_addrs_; } - const std::vector &output_data_addrs() const { return output_data_addrs_; } - const std::string &ext_info() const { return ext_info_; } - - private: - std::string so_name_; - std::string kernel_name_; - std::string node_def_; - std::string ext_info_; - std::vector input_data_addrs_; - std::vector output_data_addrs_; -}; - -class LabelSetTaskInfo : public TaskInfo { - public: - LabelSetTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t label_id) - : TaskInfo(op_name, stream_id, TaskInfoType::LABEL_SET, false), label_id_(label_id) {} - ~LabelSetTaskInfo() override {} - uint32_t label_id() const { return label_id_; } - - private: - uint32_t label_id_; -}; - -class LabelGotoTaskInfo : public TaskInfo { - public: - LabelGotoTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t label_id) - : TaskInfo(op_name, stream_id, TaskInfoType::LABEL_GOTO, false), label_id_(label_id) {} - ~LabelGotoTaskInfo() override {} - uint32_t label_id() const { return label_id_; } - - private: - uint32_t label_id_; -}; - -class LabelSwitchTaskInfo : public TaskInfo { - public: - LabelSwitchTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t label_size, - const std::vector &label_list, void *cond) - : TaskInfo(op_name, stream_id, TaskInfoType::LABEL_SWITCH, false), - label_size_(label_size), - label_list_(label_list), - cond_(cond) {} - ~LabelSwitchTaskInfo() override {} - uint32_t label_size() const { return label_size_; } - const std::vector &label_list() const { return label_list_; } - void *cond() const { return cond_; } - - private: - uint32_t label_size_; - std::vector label_list_; - void *cond_; -}; - -class EventTaskInfo : public TaskInfo { - public: - uint32_t event_id() const { return event_id_; } - - protected: - EventTaskInfo(const std::string &op_name, uint32_t stream_id, TaskInfoType type, uint32_t event_id) - : TaskInfo(op_name, stream_id, type, false), event_id_(event_id) {} - ~EventTaskInfo() override {} - - uint32_t event_id_; -}; - -class EventRecordTaskInfo : public EventTaskInfo { - public: - EventRecordTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t event_id) - : EventTaskInfo(op_name, stream_id, TaskInfoType::EVENT_RECORD, event_id) {} - ~EventRecordTaskInfo() override {} -}; - -class EventWaitTaskInfo : public EventTaskInfo { - public: - EventWaitTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t event_id) - : EventTaskInfo(op_name, stream_id, TaskInfoType::EVENT_WAIT, event_id) {} - ~EventWaitTaskInfo() override {} -}; - -class FusionStartTaskInfo : public TaskInfo { - public: - explicit FusionStartTaskInfo(const std::string &op_name, uint32_t stream_id) - : TaskInfo(op_name, stream_id, TaskInfoType::FUSION_START, false) {} - ~FusionStartTaskInfo() override {} -}; - -class FusionEndTaskInfo : public TaskInfo { - public: - explicit FusionEndTaskInfo(const std::string &op_name, uint32_t stream_id) - : TaskInfo(op_name, stream_id, TaskInfoType::FUSION_END, false) {} - ~FusionEndTaskInfo() override {} -}; - -class HcclTaskInfo : public TaskInfo { - public: - HcclTaskInfo(const std::string &op_name, uint32_t stream_id, const std::string hccl_type, void *input_data_addr, - void *output_data_addr, int64_t workspace_size, int64_t hccl_stream_num, - const std::vector &private_def, void *ops_kernel_store, int32_t count, int64_t root_id, - int64_t op_type, int64_t data_type, const std::string &group, bool dump_flag) - : TaskInfo(op_name, stream_id, TaskInfoType::HCCL, dump_flag), - hccl_type_(hccl_type), - input_data_addr_(input_data_addr), - output_data_addr_(output_data_addr), - workspace_size_(workspace_size), - hccl_stream_num_(hccl_stream_num), - private_def_(private_def), - ops_kernel_store_(ops_kernel_store), - count_(count), - root_id_(root_id), - op_type_(op_type), - data_type_(data_type), - group_(group) {} - ~HcclTaskInfo() override {} - - const std::string &hccl_type() const { return hccl_type_; } - void *input_data_addr() const { return input_data_addr_; } - void *output_data_addr() const { return output_data_addr_; } - int64_t workspace_size() const { return workspace_size_; } - int64_t hccl_stream_num() const { return hccl_stream_num_; } - const std::vector &private_def() const { return private_def_; } - void *ops_kernel_store() const { return ops_kernel_store_; } - int32_t count() const { return count_; } - int64_t root_id() const { return root_id_; } - int64_t op_type() const { return op_type_; } - int64_t data_type() const { return data_type_; } - const std::string &group() const { return group_; } - - private: - std::string hccl_type_; - void *input_data_addr_; - void *output_data_addr_; - int64_t workspace_size_; - int64_t hccl_stream_num_; - std::vector private_def_; - void *ops_kernel_store_; - int32_t count_; - int64_t root_id_; - int64_t op_type_; - int64_t data_type_; - std::string group_; -}; - -class ProfilerTraceTaskInfo : public TaskInfo { - public: - ProfilerTraceTaskInfo(const std::string &op_name, uint32_t stream_id, uint64_t log_id, bool notify, uint32_t flat) - : TaskInfo(op_name, stream_id, TaskInfoType::PROFILER_TRACE, false), - log_id_(log_id), - notify_(notify), - flat_(flat) {} - ~ProfilerTraceTaskInfo() override {} - - uint64_t log_id() const { return log_id_; } - bool notify() const { return notify_; } - uint32_t flat() const { return flat_; } - - private: - uint64_t log_id_; - bool notify_; - uint32_t flat_; -}; - -class MemcpyAsyncTaskInfo : public TaskInfo { - public: - MemcpyAsyncTaskInfo(const std::string &op_name, uint32_t stream_id, void *dst, uint64_t dst_max, void *src, - uint64_t count, uint32_t kind, bool dump_flag) - : TaskInfo(op_name, stream_id, TaskInfoType::MEMCPY_ASYNC, dump_flag), - dst_(dst), - dst_max_(dst_max), - src_(src), - count_(count), - kind_(kind) {} - ~MemcpyAsyncTaskInfo() override {} - - void *dst() const { return dst_; } - uint64_t dst_max() const { return dst_max_; } - void *src() const { return src_; } - uint64_t count() const { return count_; } - uint32_t kind() const { return kind_; } - - private: - void *dst_; - uint64_t dst_max_; - void *src_; - uint64_t count_; - int32_t kind_; -}; - -class StreamSwitchTaskInfo : public TaskInfo { - public: - StreamSwitchTaskInfo(const std::string &op_name, uint32_t stream_id, int64_t true_stream_id, void *input_addr, - void *value_addr, int64_t cond, int64_t data_type) - : TaskInfo(op_name, stream_id, TaskInfoType::STREAM_SWITCH, false), - true_stream_id_(true_stream_id), - input_addr_(input_addr), - value_addr_(value_addr), - cond_(cond), - data_type_(data_type) {} - ~StreamSwitchTaskInfo() override {} - - int64_t true_stream_id() const { return true_stream_id_; } - void *input_addr() const { return input_addr_; } - void *value_addr() const { return value_addr_; } - int64_t cond() const { return cond_; } - int64_t data_type() const { return data_type_; } - - private: - int64_t true_stream_id_; - void *input_addr_; - void *value_addr_; - int64_t cond_; - int64_t data_type_; -}; - -class StreamActiveTaskInfo : public TaskInfo { - public: - StreamActiveTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t active_stream_id) - : TaskInfo(op_name, stream_id, TaskInfoType::STREAM_ACTIVE, false), active_stream_id_(active_stream_id) {} - ~StreamActiveTaskInfo() override {} - - uint32_t active_stream_id() const { return active_stream_id_; } - - private: - uint32_t active_stream_id_; -}; -} // namespace model_runner -} // namespace ge - -#endif // INC_FRAMEWORK_GE_RUNTIME_TASK_INFO_H_ diff --git a/inc/graphengine/inc/framework/generator/ge_generator.h b/inc/graphengine/inc/framework/generator/ge_generator.h deleted file mode 100644 index 24f969ddf..000000000 --- a/inc/graphengine/inc/framework/generator/ge_generator.h +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_GENERATOR_GE_GENERATOR_H_ -#define INC_FRAMEWORK_GENERATOR_GE_GENERATOR_H_ - -#include -#include -#include -#include -#include "ge/ge_ir_build.h" -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" -#include "graph/ge_tensor.h" -#include "graph/graph.h" -#include "graph/op_desc.h" -#include "graph/detail/attributes_holder.h" -#include "omg/omg_inner_types.h" - -namespace ge { -class GeRootModel; -class GE_FUNC_VISIBILITY GeGenerator { - public: - static GeGenerator &GetInstance() { - static GeGenerator Instance; - return Instance; - } - GeGenerator() = default; - - ~GeGenerator() { (void)Finalize(); } - - GeGenerator(const GeGenerator &) = delete; - - GeGenerator &operator=(const GeGenerator &) = delete; - - Status Initialize(const std::map &options); - Status Initialize(const std::map &options, OmgContext &context); - - Status Finalize(); - - Status GenerateOfflineModel(const Graph &graph, const std::string &file_name_prefix, - const std::vector &inputs = std::vector()); - - Status GenerateOnlineModel(const Graph &graph, const vector &inputs, ge::ModelBufferData &model); - - Status GenerateInfershapeGraph(const Graph &graph); - - /// - /// @ingroup ge - /// @brief: Build single OP in Model. - /// @param [in] op_desc: the OP description. - /// @param [in] inputs: input tensors. - /// @param [in] outputs: output tensors. - /// @param [in] model_file_name: name of model file. - /// @param [in] compile_flag: op build flag, accurate build is 0, fuzz build is 1 - /// @return SUCCESS or FAILED - /// - Status BuildSingleOpModel(OpDescPtr &op_desc, const std::vector &inputs, - const std::vector &outputs, const std::string &model_file_name, - int32_t compile_flag = 0); - /// - /// @ingroup ge - /// @brief: Build single Op into model buff. - /// @param [in] op_desc: the OP description. - /// @param [in] inputs: input tensors. - /// @param [in] outputs: output tensors. - /// @param [in] engine_type: engine type. - /// @param [in] compile_flag: op build flag, accurate build is 0, fuzz build is 1 - /// @param [out] model_buff: model buff of op. - /// @return SUCCESS or FAILED - Status BuildSingleOpModel(OpDescPtr &op_desc, const vector &inputs, const vector &outputs, - OpEngineType engine_type, ModelBufferData &model_buff); - Status BuildSingleOpModel(OpDescPtr &op_desc, const vector &inputs, const vector &outputs, - OpEngineType engine_type, int32_t compile_flag, ModelBufferData &model_buff); - /// - /// @ingroup ge - /// @brief: Build single Op into model buff. - /// @param [in] op_desc: the OP description. - /// @param [in] inputs: input tensors. - /// @param [in] outputs: output tensors. - /// @param [in] graph_name: graph name. - /// @param [out] graph: graph of single op. - /// @return SUCCESS or FAILED - Status BuildSingleOpGraph(OpDescPtr &op_desc, const vector &inputs, const vector &outputs, - std::string graph_name, Graph &graph); - - private: - Status GenerateModel(const Graph &graph, const string &file_name_prefix, const vector &inputs, - ge::ModelBufferData &model, bool is_offline = true); - Status BuildSingleOp(OpDescPtr &op_desc, const vector &inputs, const vector &outputs, - const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff, - bool is_offline = true, int32_t compile_flag = 0); - bool CheckNoAicore(const ComputeGraphPtr &graph); - void RemoveConst(const vector &inputs, vector &outputs); - Status CheckForSingleOp(OpDescPtr &op_desc, const vector &inputs, const vector &outputs); - - using GeRootModelPtr = std::shared_ptr; - Status SetModelNameForDump(const GeRootModelPtr &ge_root_model); - - class Impl; - - std::shared_ptr impl_; -}; -} // namespace ge - -#endif // INC_FRAMEWORK_GENERATOR_GE_GENERATOR_H_ diff --git a/inc/graphengine/inc/framework/generator/generator_api.h b/inc/graphengine/inc/framework/generator/generator_api.h deleted file mode 100644 index 56b83a20e..000000000 --- a/inc/graphengine/inc/framework/generator/generator_api.h +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_GENERATOR_GENERATOR_API_H_ -#define INC_FRAMEWORK_GENERATOR_GENERATOR_API_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef uint32_t Status_t; - -typedef void *OpAttr_t; -typedef void *OpTensor_t; - -/// -/// @ingroup ge -/// @brief Generate offline model for the op. -/// @param [in] op_type: type name of the op. -/// @param [in] in_tensor: input description array (created by OpTensorCreate). -/// @param [in] in_num: number of in_tensor. -/// @param [in] out_tensor: output description array (created by OpTensorCreate). -/// @param [in] out_num: number of out_tensor. -/// @param [in] attr: the attributes of the op (created by OpAttrCreate). -/// @param [in] om_file: file name for the om to save. -/// @return 0 for success / others for fail -/// -GE_FUNC_VISIBILITY extern Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int in_num, - const OpTensor_t *out_tensor, int out_num, const OpAttr_t attr, - const char *om_file); - -/// -/// @ingroup ge -/// @brief Create Tensor Description. -/// @param [in] format: tensor format of the data. -/// @param [in] datatype: tensor type of the data. -/// @param [in] shape: tensor shape array. -/// @param [in] num: number of shape. -/// @return OpTensor_t for success / nullptr for failure -/// -GE_FUNC_VISIBILITY extern OpTensor_t OpTensorCreate(int format, int datatype, const int64_t *shape, int num); - -/// -/// @ingroup ge -/// @brief Destroy Tensor Description. -/// @param [in] OpTensor_t tensor: created by OpTensorCreate. -/// @param [out] none -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t OpTensorDestroy(OpTensor_t tensor); - -/// -/// @ingroup ge -/// @brief Create an attribute holder. -/// @param [in] none -/// @param [out] none -/// @return OpAttr_t for success / nullptr for failure. -/// -GE_FUNC_VISIBILITY extern OpAttr_t OpAttrCreate(); - -/// -/// @ingroup ge -/// @brief Destroy Attribute holder. -/// @param [in] OpAttr_t attr: created by OpAttrCreate. -/// @param [out] none -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t OpAttrDestroy(OpAttr_t attr); - -/// -/// @ingroup ge -/// @brief Set a boolean attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attributed value. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrBool(OpAttr_t attr, const char *name, bool value); - -/// -/// @ingroup ge -/// @brief Set an integer attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrInt(OpAttr_t attr, const char *name, int64_t value); - -/// -/// @ingroup ge -/// @brief Set a float attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrFloat(OpAttr_t attr, const char *name, float value); - -/// -/// @ingroup ge -/// @brief Set a string attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value (can`t be nullptr, end with '\0'). -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrString(OpAttr_t attr, const char *name, const char *value); - -/// -/// @ingroup ge -/// @brief Set a boolean array attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value array. -/// @param [in] num: number of value array. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrBoolList(OpAttr_t attr, const char *name, const bool *value, int num); - -/// -/// @ingroup ge -/// @brief Set an integer array attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value array. -/// @param [in] num: number of value array. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrIntList(OpAttr_t attr, const char *name, const int64_t *value, int num); - -/// -/// @ingroup ge -/// @brief Set a float array attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value array. -/// @param [in] num: number of value array. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrFloatList(OpAttr_t attr, const char *name, const float *value, int num); - -/// -/// @ingroup ge -/// @brief Set a string array attribute to the attribute holder. -/// @param [in] attr: attribute holder (created by OpAttrCreate). -/// @param [in] name: attribute name (can`t be nullptr, end with '\0'). -/// @param [in] value: attribute value array (each value can`t be nullptr, end with '\0'). -/// @param [in] num: number of value array. -/// @return 0 for success / others for failure. -/// -GE_FUNC_VISIBILITY extern Status_t SetAttrStringList(OpAttr_t attr, const char *name, const char **value, int num); - -#ifdef __cplusplus -} -#endif - -#endif // INC_FRAMEWORK_GENERATOR_GENERATOR_API_H_ diff --git a/inc/graphengine/inc/framework/memory/memory_api.h b/inc/graphengine/inc/framework/memory/memory_api.h deleted file mode 100644 index a316fd590..000000000 --- a/inc/graphengine/inc/framework/memory/memory_api.h +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_MEMORY_MEMORY_API_H_ -#define INC_FRAMEWORK_MEMORY_MEMORY_API_H_ - -#include -#include - -#include "ge/ge_api_error_codes.h" -#include "graph//types.h" -#include "runtime/mem.h" - -namespace ge { -enum MemStorageType { - HBM = 0, - RDMA_HBM, - HOST_DDR, -}; - -struct HostVarInfo { - uint64_t base_addr; - uint64_t var_size; -}; - -struct TensorInfo { - std::string var_name; - std::vector dims; - DataType data_type; -}; - -/// -/// \param size [in] rdma pool memory size to be allocated. -/// \param mem_type [in] memory type for rdma pool. -/// \return Status result of function -GE_FUNC_VISIBILITY Status InitRdmaPool(size_t size, rtMemType_t mem_type = RT_MEMORY_HBM); - -/// -/// \param var_info [in] host variable addr infos. -/// \param mem_type [in] memory type for rdma pool. -/// \return Status result of function -GE_FUNC_VISIBILITY Status RdmaRemoteRegister(const std::vector &var_info, - rtMemType_t mem_type = RT_MEMORY_HBM); - -/// -/// \param tensor_info [in] description for tensor stored shared memory. -/// \param dev_addr [out] malloced shared memory addr. -/// \param memory_size [out] malloced shared memory size. -/// \return Status result of function -GE_FUNC_VISIBILITY Status MallocSharedMemory(const TensorInfo &tensor_info, uint64_t &dev_addr, uint64_t &memory_size); - -/// -/// \param var_name [in] var_name name of host variable. -/// \param base_addr [out] base_addr vase addr of host variable. -/// \param var_size [out] var_size memory_size of host variable. -/// \return Status result of function -GE_FUNC_VISIBILITY Status GetVarBaseAddrAndSize(const std::string &var_name, uint64_t &base_addr, uint64_t &var_size); -} // namespace ge -#endif // INC_FRAMEWORK_MEMORY_MEMORY_API_H_ diff --git a/inc/graphengine/inc/framework/memory/memory_assigner.h b/inc/graphengine/inc/framework/memory/memory_assigner.h deleted file mode 100644 index 173cc64e2..000000000 --- a/inc/graphengine/inc/framework/memory/memory_assigner.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_MEMORY_MEMORY_ASSIGNER_H_ -#define INC_FRAMEWORK_MEMORY_MEMORY_ASSIGNER_H_ - -#include - -#include "common/ge_inner_error_codes.h" -#include "graph/node.h" - -namespace ge { -const int64_t MEM_ALIGN_SIZE = 512; -class GE_FUNC_VISIBILITY MemoryAssigner { - public: - explicit MemoryAssigner(ge::ComputeGraphPtr compute_graph) : compute_graph_(std::move(compute_graph)) {} - virtual ~MemoryAssigner() = default; - - MemoryAssigner(const MemoryAssigner &) = delete; - - MemoryAssigner &operator=(const MemoryAssigner &) = delete; - - Status AssignMemory(bool is_loop_graph, map &mem_offset, size_t &zero_copy_mem_size); - - private: - ge::ComputeGraphPtr compute_graph_; -}; -} // namespace ge -#endif // INC_FRAMEWORK_MEMORY_MEMORY_ASSIGNER_H_ diff --git a/inc/graphengine/inc/framework/omg/ge_init.h b/inc/graphengine/inc/framework/omg/ge_init.h deleted file mode 100644 index 42fd89798..000000000 --- a/inc/graphengine/inc/framework/omg/ge_init.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_GE_INIT_H_ -#define INC_FRAMEWORK_OMG_GE_INIT_H_ -#include -#include -#include "common/ge_inner_error_codes.h" - -namespace ge { -class GE_FUNC_VISIBILITY GEInit { - public: - // GE Environment Initialize, return Status: SUCCESS,FAILED - static Status Initialize(const std::map &options); - - static std::string GetPath(); - - // GE Environment Finalize, return Status: SUCCESS,FAILED - static Status Finalize(); -}; -} // namespace ge - -#endif // INC_FRAMEWORK_OMG_GE_INIT_H_ diff --git a/inc/graphengine/inc/framework/omg/model_tool.h b/inc/graphengine/inc/framework/omg/model_tool.h deleted file mode 100644 index 8c425823d..000000000 --- a/inc/graphengine/inc/framework/omg/model_tool.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_MODEL_TOOL_H_ -#define INC_FRAMEWORK_OMG_MODEL_TOOL_H_ - -#include -#include - -#include "framework/common/debug/ge_log.h" -#include "proto/ge_ir.pb.h" - -namespace ge { -class GE_FUNC_VISIBILITY ModelTool { - public: - static Status GetModelInfoFromOm(const char *model_file, ge::proto::ModelDef &model_def, uint32_t &modeldef_size); - - static Status GetModelInfoFromPbtxt(const char *model_file, ge::proto::ModelDef &model_def); -}; -} // namespace ge - -#endif // INC_FRAMEWORK_OMG_MODEL_TOOL_H_ diff --git a/inc/graphengine/inc/framework/omg/omg.h b/inc/graphengine/inc/framework/omg/omg.h deleted file mode 100644 index a0cdb4498..000000000 --- a/inc/graphengine/inc/framework/omg/omg.h +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_OMG_H_ -#define INC_FRAMEWORK_OMG_OMG_H_ - -#include -#include -#include -#include -#include "framework/omg/omg_inner_types.h" -#include "framework/omg/parser/parser_inner_ctx.h" -#include "proto/ge_ir.pb.h" -#include "proto/om.pb.h" - -#include "graph/compute_graph.h" -#include "graph/graph.h" -#include "graph/model.h" -#include "runtime/kernel.h" - -using domi::Status; -using std::pair; -using std::string; -using std::unordered_map; -using std::vector; - -namespace ge { -/** - * @ingroup domi_omg - * @brief init omg context - * @return void - */ -GE_FUNC_VISIBILITY Status InitDomiOmgContext(const string &input_shape, const string &input_format, - const string &net_format, bool is_dynamic_input); - -/** - * @ingroup domi_omg - * @brief generate graph based on the input model file and weight file - * @param [out] graph graph - * @param [in] model_file path of model file - * @param [in] weights_file path of weight file - * @param [in] type type of the input model - * @param [in] op_conf op mapping configuration - * @param [in] target type of platform. If a tiny model is generated, set target to tiny - * @param [in] run_mode run model - * @param [in] enable_l2dynamic enable l2dynamic - * @param [in] is_dynamic_input dynamic input, true of false - * @param [in] atc_params multiply atc params - * @return Status result code - */ -GE_FUNC_VISIBILITY Status ParseGraph(ge::Graph &graph, const std::map &atc_params, - const char *model_file, const char *weights_file, domi::FrameworkType type, - const char *op_conf = nullptr, const char *target = nullptr, - RunMode run_mode = GEN_OM_MODEL, bool is_dynamic_input = false); - -/** - * @ingroup domi_omg - * @brief generates a simplified JSON file based on the key value of the offline model file in protobuf format - * @param [in] model_file path of offline model file - * @param [out] json_file path of json file - * @param [key] encrypted key - * @return Status result code - */ -GE_FUNC_VISIBILITY Status ConvertOm(const char *model_file, const char *json_file, bool is_covert_to_json); - -GE_FUNC_VISIBILITY Status ConvertPbtxtToJson(const char *model_file, const char *json_file); -/** - * @ingroup domi_omg - * @brief convert the model file in protobuf format into a JSON file. - * @param [in] framework type of model - * @param [in] om model_file path of offline model file - * @param [out] json_file path of json file - * @param [key] encrypted key - * @return Status result code - */ -GE_FUNC_VISIBILITY Status ConvertFwkModelToJson(domi::FrameworkType framework, const char *model_file, - const char *json_file); - -GE_FUNC_VISIBILITY void GetGroupName(ge::proto::ModelDef &model); - -GE_FUNC_VISIBILITY void FindParserSo(const string &path, vector &fileList, string &caffe_parser_path); - -GE_FUNC_VISIBILITY Status DumpInfershapeJson(const ge::Graph &graph, const char *json_file); - -GE_FUNC_VISIBILITY Status SetOutputNodeInfo(ge::Graph &graph, const std::string &output_type, - const std::string &output_format); - -GE_FUNC_VISIBILITY Status GetOutputLeaf(ge::NodePtr node, - std::vector> &output_nodes_info); - -GE_FUNC_VISIBILITY void GetOutputNodesNameAndIndex(std::vector> &output_nodes_info, - std::vector &output_nodes_name); - -GE_FUNC_VISIBILITY void UpdateOmgCtxWithParserCtx(); - -GE_FUNC_VISIBILITY void UpdateParserCtxWithOmgCtx(); - -GE_FUNC_VISIBILITY void PrintModelInfo(ge::proto::ModelDef *model_def, uint32_t modeldef_size); -} // namespace ge - -namespace domi { -/** - * @ingroup domi_omg - * @brief get omg context - * @return reference of OmgContext - */ -GE_FUNC_VISIBILITY ge::OmgContext &GetContext(); -} // namespace domi - -#endif // INC_FRAMEWORK_OMG_OMG_H_ diff --git a/inc/graphengine/inc/framework/omg/omg_inner_types.h b/inc/graphengine/inc/framework/omg/omg_inner_types.h deleted file mode 100644 index 0b799bf2c..000000000 --- a/inc/graphengine/inc/framework/omg/omg_inner_types.h +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_OMG_INNER_TYPES_H_ -#define INC_FRAMEWORK_OMG_OMG_INNER_TYPES_H_ - -#include -#include -#include -#include -#include -#include -#include -#include "framework/common/fmk_error_codes.h" -#include "register/register_fmk_types.h" -#include "graph/node.h" - -using domi::DOMI_TENSOR_ND; -using domi::DOMI_TENSOR_RESERVED; -using domi::domiTensorFormat_t; -using domi::FRAMEWORK_RESERVED; -using domi::FrameworkType; -using std::map; -using std::string; -using std::unordered_map; -using std::vector; - -namespace ge { -/** - * @ingroup domi_omg - * @brief run model - */ -enum RunMode { - GEN_OM_MODEL = 0, // generate offline model file - MODEL_TO_JSON = 1, // convert to JSON file - ONLY_PRE_CHECK = 3, // only for pre-check - PBTXT_TO_JSON = 5, // pbtxt to json - DISPLAY_OM_INFO = 6 // display model info -}; - -/// -/// @ingroup domi_omg -/// @brief high-precision mode -/// -enum HighPrecisionMode { - // the FP16 high-precision function is disabled in common mode - HIGH_PRECISION_DEFAULT = 0, - - // high-precision mode, enabling FP16 high-precision mode (Convolution/FullConnect/AvgPooling are involved) - HIGH_PRECISION_FP16 = 1 -}; - -/// -/// @ingroup domi_omg -/// @brief description buffer data -/// -struct OMGBufferData { - void *data; - uint32_t length; -}; - -struct OmgContext { - OmgContext() { format = DOMI_TENSOR_ND; } - domiTensorFormat_t format; - - // format of the input specified by the command line - std::unordered_map input_nodes_format_map; - std::vector output_formats; - - // user-designate input dims - std::vector>> user_input_dims; - // global input dims - std::map> input_dims; - - // resolve the mapping between operators with the same name and corresponding network. format e.g. - // Detectionoutput:SsdDetectiontOutput - std::map op_conf_map; - // save the output node of the network. key = operator name, value = index, index indicates the output index of the - // operator - std::map> out_nodes_map; - // user-designate out nodes (this is used for determing the orders) - std::vector> user_out_nodes; - // default out nodes (this is used for determing the orders) - std::vector> default_out_nodes; - // save the output node of the network, value = topName, - // topName indicates the output name of the operator. - std::vector user_out_nodes_top_vec; - // net out nodes (where user_out_nodes or leaf nodes) - std::vector net_out_nodes; - // net out nodes top names(only caffe has top) - std::vector out_top_names; - // net data nodes top names(only caffe has top) - std::vector data_top_names; - // preferential format used by the entire network - domiTensorFormat_t net_format = DOMI_TENSOR_RESERVED; - domi::FrameworkType type = domi::FRAMEWORK_RESERVED; - RunMode run_mode = ONLY_PRE_CHECK; - bool train_flag = false; - - std::string output_type; - - // Whether to use dynamic batch size or dynamic image size - bool is_dynamic_input = false; - std::string dynamic_batch_size; - std::string dynamic_image_size; - std::string dynamic_dims; - std::string dynamic_node_type; - std::vector> user_real_input_dims; - std::vector cur_dynamic_dims; - bool need_multi_batch = false; - std::vector data_nodes; - std::vector getnext_nosink_nodes; - bool fuzz_compile_flag = false; - std::string atc_cmdline; - bool user_attr_index_valid = false; -}; -} // namespace ge - -namespace domi { -/** - * @ingroup domi_omg - * @brief get OMG context - * @return OmgContext context - */ -GE_FUNC_VISIBILITY ge::OmgContext &GetContext(); - -struct TEBinInfo { - // It is obsolete. It will be automatically obtained from the binfilename field of the JSON file later. - // To be compatible with use cases written by previous users, fields are not deleted.(2018.11.21) - std::string bin_file_path; - std::string json_file_path; - std::string ddk_version; -}; -} // namespace domi - -#endif // INC_FRAMEWORK_OMG_OMG_INNER_TYPES_H_ diff --git a/inc/graphengine/inc/framework/omg/omg_types.h b/inc/graphengine/inc/framework/omg/omg_types.h deleted file mode 100644 index 771a53a4c..000000000 --- a/inc/graphengine/inc/framework/omg/omg_types.h +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_OMG_TYPES_H_ -#define INC_FRAMEWORK_OMG_OMG_TYPES_H_ - -#include "register/register_fmk_types.h" - -#endif // INC_FRAMEWORK_OMG_OMG_TYPES_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/model_parser.h b/inc/graphengine/inc/framework/omg/parser/model_parser.h deleted file mode 100644 index 4902339d5..000000000 --- a/inc/graphengine/inc/framework/omg/parser/model_parser.h +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_ -#define INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_ - -#include -#include "framework/omg/parser/parser_types.h" -#include "framework/omg/omg_inner_types.h" -#include "graph/attr_value.h" -#include "graph/compute_graph.h" -#include "graph/ge_tensor.h" -#include "graph/graph.h" -#include "graph/op_desc.h" -#include "graph/operator.h" -#include "graph/range_vistor.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/graph_utils.h" -#include "graph/utils/graph_utils_ex.h" -#include "graph/utils/op_desc_utils.h" -#include "graph/utils/tensor_utils.h" - -using Status = domi::Status; - -namespace domi { -using GetGraphCallback = std::function( - const google::protobuf::Message *root_proto, const std::string &graph)>; - -using GetGraphCallbackV2 = std::function; - -class GE_FUNC_VISIBILITY ModelParser { - public: - ModelParser() {} - - virtual ~ModelParser() {} - - /** - * @ingroup domi_omg - * @brief Analyze network model data - * @param [in] file Network model file path - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status Parse(const char *file, ge::Graph &graph) = 0; - - /** - * @ingroup domi_omg - * @brief Parse relevant data from memory and save it to graph - * @param [in] input Model file memory data - * @param [in] input Model file memory size - * @param [in|out] graph A graph for saving the model information after analysis - * @return SUCCESS - * @return FAILED - * @author - */ - virtual Status ParseFromMemory(const char *data, uint32_t size, ge::ComputeGraphPtr &graph) = 0; - - /** - * @ingroup domi_omg - * @brief Parse relevant data from memory and save it to graph - * @param [in] input Model file memory data - * @param [in] input Model file memory size - * @param [in|out] graph A graph for saving the model information after analysis - * @return SUCCESS - * @return FAILED - * @author - */ - virtual Status ParseFromMemory(const char *data, uint32_t size, ge::Graph &graph) = 0; - - /** - * @ingroup domi_omg - * @brief Analyze network model data - * @param [in] proto network model - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status ParseProto(const google::protobuf::Message *proto, ge::ComputeGraphPtr &graph) = 0; - - /** - * @ingroup domi_omg - * @brief Analyze callback model data in subgraph - * @param [in] proto network model - * @param [in] callback callback of subgraph - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status ParseProtoWithSubgraph(const google::protobuf::Message *proto, GetGraphCallback callback, - ge::ComputeGraphPtr &graph) = 0; - /** - * @ingroup domi_omg - * @brief Convert model files to JSON format - * @param [in] model_file Model file path to be converted - * @param [out] json_file Converted JSON file path - * @return SUCCESS - * @return Others failed - */ - virtual Status ToJson(const char *model_file, const char *json_file) { return domi::SUCCESS; } - - /* - * @ingroup domi_omg - * @brief Convert network data type - * @param [in] type Data type to be converted - * @return ge::DataType - */ - virtual ge::DataType ConvertToGeDataType(const uint32_t type) = 0; - - virtual Status ParseAllGraph(const google::protobuf::Message *root_proto, ge::ComputeGraphPtr &root_graph) = 0; - - /**` - * @ingroup domi_omg - * @brief Analyze network model data - * @param [in] proto serialized network model - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status ParseProto(const std::string &serialized_proto, ge::ComputeGraphPtr &graph) { return UNSUPPORTED; } - - /** - * @ingroup domi_omg - * @brief Analyze callback model data in subgraph - * @param [in] proto serialized network model - * @param [in] callback callback of subgraph - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status ParseProtoWithSubgraph(const std::string &serialized_proto, GetGraphCallbackV2 callback, - ge::ComputeGraphPtr &graph) { - return UNSUPPORTED; - } - - /** - * @ingroup domi_omg - * @brief Analyze callback model data in subgraph - * @param [in] partitioned_serialized partitioned serialized network model - * @param [in] const_value_map const value map, key: constant node name value: serialized constant output tensor - * @param [in] callback callback of subgraph - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status ParseProtoWithSubgraph(const std::vector &partitioned_serialized, - const std::map &const_value_map, - GetGraphCallbackV2 callback, - ge::ComputeGraphPtr &graph) { - return UNSUPPORTED; - } - - /** - * @ingroup domi_omg - * @brief Analyze network model data - * @param [in] partitioned_serialized partitioned serialized network model - * @param [in] const_value_map const value map, key: constant node name value: serialized constant output tensor - * @param [in|out] graph Save the network information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status ParseProto(const std::vector &partitioned_serialized, - const std::map &const_value_map, - ge::ComputeGraphPtr &graph) { - return UNSUPPORTED; - } -}; -} // namespace domi - -#endif // INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/op_parser.h b/inc/graphengine/inc/framework/omg/parser/op_parser.h deleted file mode 100644 index 70bec218c..000000000 --- a/inc/graphengine/inc/framework/omg/parser/op_parser.h +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_ -#define INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_ - -#include -#include "framework/omg/parser/parser_types.h" -#include "omg/omg_inner_types.h" -#include "proto/om.pb.h" -#include "graph/ge_tensor.h" -#include "graph/op_desc.h" -#include "graph/utils/op_desc_utils.h" - -using google::protobuf::Message; -using Status = domi::Status; - -namespace ge { -/** - * @ingroup domi_omg - * @brief Used to analyze operator information - * - */ -class GE_FUNC_VISIBILITY OpParser { - public: - /** - * @ingroup domi_omg - * @brief Deconstructor - */ - virtual ~OpParser() {} - - /** - * @ingroup domi_omg - * @brief Analytic operator parameters - * @param [in] op_src Parameter data to be resolved - * @param [out] graph Parsed parameter data - * @return SUCCESS - * @return FAILED - */ - virtual Status ParseParams(const Message *op_src, ge::OpDescPtr &op_desc) = 0; - - /** - * @ingroup domi_omg - * @brief Analytic operator parameters - * @param [in] op_src Parameter data to be resolved - * @param [out] Operator parameter data - * @return SUCCESS - * @return FAILED - */ - virtual Status ParseParams(const Message *op_src, ge::Operator &op_dest) = 0; - - /** - * @ingroup domi_omg - * @brief Analytic operator weight information - * @param [in] op_src Weight data to be resolved - * @param [out] op_dest Weight data after analysis - * @return SUCCESS - * @return FAILED - */ - virtual Status ParseWeights(const Message *op_src, ge::NodePtr &node) = 0; - - /** - * @ingroup domi_omg - * @brief Get the format information according to the parameters in the operator - * @param [in] op_src Parameter data to be resolved - * @param [out] format Output the parsed format - * @return SUCCESS - * @return FAILED - */ - virtual Status GetFormat(const Message *op_src, domi::domiTensorFormat_t &format) { - (void)op_src; - // Indicates that the op does not provide a value for format - format = domi::DOMI_TENSOR_RESERVED; - return domi::SUCCESS; - } -}; -} // namespace ge - -#endif // INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/parser_api.h b/inc/graphengine/inc/framework/omg/parser/parser_api.h deleted file mode 100644 index 6c2236656..000000000 --- a/inc/graphengine/inc/framework/omg/parser/parser_api.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_ -#define INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_ - -#include -#include -#include -#include "ge/ge_api_error_codes.h" - -namespace ge { -// Initialize parser -GE_FUNC_VISIBILITY Status ParserInitialize(const std::map& options); -// Finalize parser, release all resources -GE_FUNC_VISIBILITY Status ParserFinalize(); -} // namespace ge -#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/parser_factory.h b/inc/graphengine/inc/framework/omg/parser/parser_factory.h deleted file mode 100644 index 9d6590c01..000000000 --- a/inc/graphengine/inc/framework/omg/parser/parser_factory.h +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_PARSER_PARSER_FACTORY_H_ -#define INC_FRAMEWORK_OMG_PARSER_PARSER_FACTORY_H_ - -#include -#include -#include -#include -#include "framework/omg/omg_inner_types.h" -#include "framework/omg/parser/parser_types.h" - -using Status = domi::Status; - -namespace domi { -class WeightsParser; -class ModelParser; - -typedef std::shared_ptr (*MODEL_PARSER_CREATOR_FUN)(void); - -// Create modelparser for different frameworks -class GE_FUNC_VISIBILITY ModelParserFactory { - public: - static ModelParserFactory *Instance(); - - /** - * @ingroup domi_omg - * @brief Create a modelparser based on the type entered - * @param [in] type Framework type - * @return Created modelparser - */ - std::shared_ptr CreateModelParser(const domi::FrameworkType type); - - /** - * @ingroup domi_omg - * @brief Register create function - * @param [in] type Framework type - * @param [in] fun ModelParser's create function - */ - void RegisterCreator(const domi::FrameworkType type, MODEL_PARSER_CREATOR_FUN fun); - - protected: - ModelParserFactory() {} - ~ModelParserFactory(); - - private: - std::map creator_map_; -}; // end class ModelParserFactory - -class GE_FUNC_VISIBILITY ModelParserRegisterar { - public: - ModelParserRegisterar(const domi::FrameworkType type, MODEL_PARSER_CREATOR_FUN fun) { - ModelParserFactory::Instance()->RegisterCreator(type, fun); - } - ~ModelParserRegisterar() {} -}; - -// Registration macros for model parsers -#define REGISTER_MODEL_PARSER_CREATOR(type, clazz) \ - std::shared_ptr Creator_##type##_Model_Parser() { \ - std::shared_ptr ptr = nullptr; \ - try { \ - ptr = make_shared(); \ - } catch (...) { \ - ptr = nullptr; \ - } \ - return std::shared_ptr(ptr); \ - } \ - ModelParserRegisterar g_##type##_Model_Parser_Creator(type, Creator_##type##_Model_Parser) - -typedef std::shared_ptr (*WEIGHTS_PARSER_CREATOR_FUN)(void); - -// Create weightsparser for different frameworks -class GE_FUNC_VISIBILITY WeightsParserFactory { - public: - static WeightsParserFactory *Instance(); - - /** - * @ingroup domi_omg - * @brief Create weightsparser based on the type entered - * @param [in] type Framework type - * @return Created weightsparser - */ - std::shared_ptr CreateWeightsParser(const domi::FrameworkType type); - - /** - * @ingroup domi_omg - * @brief Register create function - * @param [in] type Framework type - * @param [in] fun WeightsParser's create function - */ - void RegisterCreator(const domi::FrameworkType type, WEIGHTS_PARSER_CREATOR_FUN fun); - - protected: - WeightsParserFactory() {} - ~WeightsParserFactory(); - - private: - std::map creator_map_; -}; // end class WeightsParserFactory - -class GE_FUNC_VISIBILITY WeightsParserRegisterar { - public: - WeightsParserRegisterar(const domi::FrameworkType type, WEIGHTS_PARSER_CREATOR_FUN fun) { - WeightsParserFactory::Instance()->RegisterCreator(type, fun); - } - ~WeightsParserRegisterar() {} -}; - -// Register macro of weight resolver -#define REGISTER_WEIGHTS_PARSER_CREATOR(type, clazz) \ - std::shared_ptr Creator_##type##_Weights_Parser() { \ - std::shared_ptr ptr = nullptr; \ - try { \ - ptr = make_shared(); \ - } catch (...) { \ - ptr = nullptr; \ - } \ - return std::shared_ptr(ptr); \ - } \ - WeightsParserRegisterar g_##type##_Weights_Parser_Creator(type, Creator_##type##_Weights_Parser) -}; // namespace domi - -#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_FACTORY_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/parser_inner_ctx.h b/inc/graphengine/inc/framework/omg/parser/parser_inner_ctx.h deleted file mode 100644 index b23da53f2..000000000 --- a/inc/graphengine/inc/framework/omg/parser/parser_inner_ctx.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_PARSER_PARSER_INNER_CONTEXT_H_ -#define INC_FRAMEWORK_OMG_PARSER_PARSER_INNER_CONTEXT_H_ - -#include -#include -#include -#include -#include -#include -#include "external/register/register_fmk_types.h" -#include "external/register/register_types.h" -#include "framework/omg/omg_inner_types.h" - -namespace ge { -struct ParserContext { - // format of the input specified by the command line - std::unordered_map input_nodes_format_map; - std::vector output_formats; - // user-designate input dims - std::vector>> user_input_dims; - std::map> input_dims; - // resolve the mapping between operators with the same name and corresponding network. format e.g. - // Detectionoutput:SsdDetectiontOutput - std::map op_conf_map; - // user-designate out nodes (this is used for determing the orders) - std::vector> user_out_nodes; - // default out nodes (this is used for determing the orders) - std::vector> default_out_nodes; - // save the output node of the network. key = operator name, value = index, index indicates the output index of the - // operator - std::map> out_nodes_map; - // save the output node of the network, value = topName, - // topName indicates the output name of the operator. - std::vector user_out_nodes_top_vec; - // net out nodes (where user_out_nodes or leaf nodes) - std::vector net_out_nodes; - // net data nodes top names(only caffe has top) - std::vector data_top_names; - // net out nodes top names(only caffe has top) - std::vector out_top_names; - // Whether to use dynamic batch size or dynamic image size - bool is_dynamic_input = false; - bool train_flag = false; - domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND; - domi::FrameworkType type = domi::FRAMEWORK_RESERVED; - RunMode run_mode = GEN_OM_MODEL; - // save caffe custom proto path, used by caffe parse - std::string custom_proto_path; - // save caffe proto path, used by caffe parse - std::string caffe_proto_path; - // name of the pass that needs to take effect - std::string enable_scope_fusion_passes; -}; - -GE_FUNC_VISIBILITY ParserContext &GetParserContext(); -} // namespace ge - -#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_INNER_CONTEXT_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/parser_types.h b/inc/graphengine/inc/framework/omg/parser/parser_types.h deleted file mode 100644 index f3b7f00a8..000000000 --- a/inc/graphengine/inc/framework/omg/parser/parser_types.h +++ /dev/null @@ -1,510 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef PARSER_COMMON_TYPES_H_ -#define PARSER_COMMON_TYPES_H_ - -#include -#include - -#include "register/register_types.h" - -#if !defined(__ANDROID__) && !defined(ANDROID) -#ifndef DOMI_DYNAMIC_CAST -#define DOMI_DYNAMIC_CAST static_cast -#endif -#ifndef DOMI_DYNAMIC_POINTER_CAST -#define DOMI_DYNAMIC_POINTER_CAST std::static_pointer_cast -#endif -#else -#ifndef DOMI_DYNAMIC_CAST -#define DOMI_DYNAMIC_CAST static_cast -#endif -#ifndef DOMI_DYNAMIC_POINTER_CAST -#define DOMI_DYNAMIC_POINTER_CAST std::static_pointer_cast -#endif -#endif - -namespace ge { -namespace parser { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DATA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *AIPPDATA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONVOLUTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CORRELATION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CORRELATIONV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DECONVOLUTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *POOLING; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ELTWISE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RELU; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RELU6; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SIGMOID; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ABSVAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TANH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PRELU; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BATCHNORM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FUSIONBATCHNORM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SCALE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FULL_CONNECTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SOFTMAX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PLUS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACTIVATION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLATTEN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ADD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SUB; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MUL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MATMUL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RSQRT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BIASADD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESHAPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFORMAT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPCONVOLUTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUTGENMASK; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUTDOMASK; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCAT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROIPOOLING; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PROPOSAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FSRDETECTIONOUTPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DETECTIONPOSTPROCESS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LRN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSDATA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PERMUTE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDNORMALIZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDPRIORBOX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NETOUTPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDDETECTIONOUTPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFINEDETDETECTIONOUTPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CHANNELAXPY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PSROIPOOLING; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *POWER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *POW; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROIALIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PYTHON; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FREESPACEEXTRACT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPATIALTF; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SHAPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SHAPEN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ARGMAX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GATHERND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GATHER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REALDIV; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PACK; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SLICE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SLICED; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLOORDIV; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQUEEZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UNSQUEEZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STRIDEDSLICE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RANGE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RPNPROPOSALS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DECODEBBOX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PADV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MIRRORPAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SIZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CLIPBOXES; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTRCNNPREDICTIONS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPLIT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPLITV; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXPANDDIMS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EMPTY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GREATER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SWITCH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SWITCHN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MERGE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SYMBOLICGRADIENT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REMOTECALL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *_IF; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STATELESSIF; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *IF; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CASE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *_WHILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *WHILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STATELESSWHILE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PARTITIONEDCALL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STATEFULPARTITIONEDCALL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FAKEPARAM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSPOSE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSPOSED; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CAST; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REGION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *YOLO; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *YOLODETECTIONOUTPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FILL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REVERSE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UNPACK; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *YOLO2REORG; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCESUM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SUM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONSTANT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESIZEBILINEAR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESIZEBILINEARGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MAXIMUM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FRAMEWORKOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ARG; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FUSEDBATCHNORMGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LSTM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HIGHWAY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RNN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATTENTIONDECODER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOGICAL_NOT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOGICAL_AND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOGICAL_OR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EQUAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NOTEQUAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *INTERP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SHUFFLECHANNEL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *AIPP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MULTISHAPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RECIPROCAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SELU; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ELU; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOSH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASINH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MINIMUM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CLIP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *L2NORMALIZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CROPANDRESIZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UNUSEDCONST; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSETODENSE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NONMAXSUPPRESSION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TOPKV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *INVERTPERMUTATION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MULTINOMIAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REVERSESEQUENCE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEPROD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEMAX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEMIN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXTRACTIMAGEPATCHES; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQRT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEALL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESIZENEARESTNEIGHBOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPACETOBATCHND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BATCHTOSPACEND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSERT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GREATEREQUAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLOOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RANDOMUNIFORM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BATCHMATMUL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPACETODEPTH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHTOSPACE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RINT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATAN2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATANH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASIN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NEG; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOG; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROUND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UPSAMPLE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLOORMOD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LESS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LESSEQUAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ONEHOT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFSWITCH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFMERGE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ENTER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFENTER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOOPCOND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NEXTITERATION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFNEXTITERATION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXIT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFEXIT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONTROLTRIGGER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ZEROSLIKE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *WHERE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FAKEQUANTWITHMINMAXVARS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SOFTPLUS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SOFTSIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *COSH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SINH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQUAREDDIFFERENCE; -// for retinanet scope fusion -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REQUIREDSPACETOBATCHPADDINGS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDPOSTPROCESSOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETBOXES; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINAMULTIANCHORS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETCLIPPEDBOXES; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETFILTEREDDETECTIONS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETPOSTPROCESSOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETANCHORS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNMAP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNMAP1; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNSECONDSTAGEPOSTPROCESSOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNROIINTERPOOLING; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNFIRSTSTAGEPOSTPROCESSOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNGRIDANCHORGENERATOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROIINTERPOOLING; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNCLIPTOWINDOW; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EMBEDLOOKUP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HASHLOOKUP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LSH_PROJ; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SVDF; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDANCHORGENERATOR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *IDENTITY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *IDENTITYN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PLACEHOLDERWITHDEFAULT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SELECT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GETSPAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STOPGRADIENT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PREVENTGRADIENT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GUARANTEECONST; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BROADCASTGRADIENTARGS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BROADCASTARGS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONFUSIONMATRIX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RANK; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PLACEHOLDER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *END; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BASICLSTMCELL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GETNEXT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *INITDATA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFIDENTITY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BITCAST; - -/***************Ann special operator*************************/ -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_MEAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_CONVOLUTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DEPCONVOLUTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_FULLCONNECTION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_NETOUTPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DATA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_RESHAPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_ADD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_MUL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_SUB; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DIV; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DEQUANTIZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_QUANTIZE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_PAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_RESIZE_BILINEAR; - -/***************************************************/ -/******************Training operator*************************/ -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GATHERV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONVGRADFILTER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONV2D; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONV2DBACKPROPINPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FUSEDBATCHNORM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BIASADDGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACTIVATIONGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MAXPOOLWITHARGMAX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MAXPOOLGRADWITHARGMAX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSESOFTMAXCROSSENTROPYWITHLOGITS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SNAPSHOT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VAR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEANGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSLATE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ADDN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *L2LOSS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MULTIPLY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HUBERLOSSGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HUBERLOSS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NEGATIVE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDCAST; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSESOFTMAXCROSSENTROPY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSESOFTMAXCROSSENTROPYGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDSQUEEZEFUSION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATFOUR2FIVE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATFIVE2FOUR; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDREALDIVTILEMUL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDSUMMULREALDIVMEAN; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARIABLEV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARHANDLEOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TEMPORARYVARIABLE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DESTROYTEMPORARYVARIABLE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARIABLE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNVARIABLEOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNADD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNADDVARIABLEOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNSUB; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNSUBVARIABLEOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYMOMENTUM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESOURCEAPPLYMOMENTUM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SGD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NOOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *READVARIABLEOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PARALLELCONCATSTART; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONSTANTOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISECONV2DBACKPROPFILTER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISECONV2DBACKPORPINPUT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISECONV2DFORWARDNATIVE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUTGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYRMSPROPMIXEDPRECISION; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYRMSPROP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RELU6GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *AVGPOOLGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATOFFSET; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LAYERNORMGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LAYERNORM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LARS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DYNAMICSTITCH; - -/***************************************************/ -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQUARE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMBROADCAST; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMALLGATHER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMALLREDUCE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREDUCESCATTER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMSEND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMRECEIVE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREMOTEREAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREMOTEREFREAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREMOTEWRITE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREMOTESCATTERWRITE; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARASSIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARISINITIALIZEDOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LogTimeStamp; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ISVARIABLEINITIALIZED; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMSWITCH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMSWITCHN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMACTIVE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEMCPYASYNC; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEMCPYADDRASYNC; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMMERGE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ENDGRAPH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SEND; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RECV; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ENDOFSEQUENCE; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELSET; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELGOTO; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELGOTOEX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELSWITCH; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELSWITCHBYINDEX; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATOMICADDRCLEAN; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ABS_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACCUMULATE_N_V2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOS_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOSH_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANY; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPROXIMATE_EQUAL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASIN_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASINH_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATAN_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BROADCAST_TO; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ELU_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ADD_V2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DATAFORMATDIMMAP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DATAFORMATVECPERMUTE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BESSELI0E; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BESSELI1E; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADADELTA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAGRADDA; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAM; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAMAX; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADDSIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYCENTEREDRMSPROP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYFTRL; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYFTRLV2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYGRADIENTDESCENT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYPOWERSIGN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYPROXIMALADAGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYPROXIMALGRADIENTDESCENT; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEQUANTIZE; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FOCAL_LOSS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FOCAL_LOSS_GRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SMOOTHL1_LOSS; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SMOOTHL1_LOSS_grad; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEMEAN; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCAT_V2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ONEHOT_V2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SLICE_V2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TILE_V2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SUM_V2; -// Common type when the operator has the same name -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DETECTIONOUTPUT; -// Custom operator -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP_NCHW; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP_NHWC; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP_NC1HWC0; - -// Depthwise 4d_2_6d,6d_2_4d -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISEWEIGHT4D26D; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISEWEIGHT6D24D; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQRTGRAD; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SIGMOIDGRAD; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSSHAPE; - -// Horovod operator -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDCALLBACKALLREDUCE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDCALLBACKALLGATHER; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDCALLBACKBROADCAST; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDWAIT; - -/// -/// @brief Magic number of model file -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_FILE_MAGIC_NUM; // magic number - -/// -/// @brief Model head length -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_FILE_HEAD_LEN; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_VERSION; ///< Model version 1.0/// - -// alpha default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float ALPHA_DEFAULT_VALUE; - -// beta default value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float BETA_DEFAULT_VALUE; - -/// -/// @ingroup domi_omg -/// @brief INPUT node type -/// -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string INPUT_TYPE; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMMY_DATA; - -// dim default size value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY static const int32_t DIM_DEFAULT_SIZE = 4; - -// for fusion op plugin -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_FUSIONOP_ORIGINAL_TYPE; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_INPUT_TENSOR_DESC; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_TENSOR_DESC; - -// DATA node type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DATA_TYPE; - -// framework Operator Type -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string FRAMEWORK_OP_TYPE; - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_NET_OUTPUT; - -#pragma pack() // Cancels single-byte alignment -} // namespace parser -} // namespace ge - -#endif // PARSER_COMMON_TYPES_H_ diff --git a/inc/graphengine/inc/framework/omg/parser/weights_parser.h b/inc/graphengine/inc/framework/omg/parser/weights_parser.h deleted file mode 100644 index e4436044d..000000000 --- a/inc/graphengine/inc/framework/omg/parser/weights_parser.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_PARSER_WEIGHTS_PARSER_H_ -#define INC_FRAMEWORK_OMG_PARSER_WEIGHTS_PARSER_H_ - -#include "graph/graph.h" -#include "graph/attr_value.h" -#include "graph/compute_graph.h" -#include "graph/ge_tensor.h" -#include "graph/op_desc.h" -#include "graph/operator.h" -#include "graph/range_vistor.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/op_desc_utils.h" -#include "graph/utils/tensor_utils.h" - -namespace domi { -/** - * @ingroup domi_omg - * @brief Weight information resolver - * - */ -class GE_FUNC_VISIBILITY WeightsParser { - public: - /** - * @ingroup domi_omg - * @brief Constructor - */ - WeightsParser() {} - - /** - * @ingroup domi_omg - * @brief Deconstructor - */ - virtual ~WeightsParser() {} - - /** - * @ingroup domi_omg - * @brief Analyze weight data - * @param [in] file Path of weight file after training - * @param [in|out] graph Graph for saving weight information after analysis - * @return SUCCESS - * @return Others failed - */ - virtual Status Parse(const char *file, ge::Graph &graph) = 0; - - /** - * @ingroup domi_omg - * @brief Parse relevant data from memory and save it to graph - * @param [in] input Model file memory data - * @param [in|out] graph A graph for saving the model information after analysis - * @return SUCCESS - * @return FAILED - * @author - */ - virtual Status ParseFromMemory(const char *input, uint32_t lengt, ge::ComputeGraphPtr &graph) = 0; -}; -} // namespace domi - -#endif // INC_FRAMEWORK_OMG_PARSER_WEIGHTS_PARSER_H_ diff --git a/inc/graphengine/inc/framework/omg/version.h b/inc/graphengine/inc/framework/omg/version.h deleted file mode 100644 index 4facba0dd..000000000 --- a/inc/graphengine/inc/framework/omg/version.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_FRAMEWORK_OMG_VERSION_H_ -#define INC_FRAMEWORK_OMG_VERSION_H_ - -#include -#include -#include -#include - -#include "common/debug/log.h" -#include "common/string_util.h" -#include "framework/common/debug/ge_log.h" - -namespace ge { -class GE_FUNC_VISIBILITY PlatformVersionManager { - public: - PlatformVersionManager() = delete; - ~PlatformVersionManager() = delete; - static Status GetPlatformVersion(std::string &ver) { - ver = "1.11.z"; - std::vector version_splits = StringUtils::Split(ver, '.'); - GE_IF_BOOL_EXEC(version_splits.size() < 3, GELOGW("Read platform version error!"); return FAILED;); - - GELOGI("Read current platform version: %s.", ver.c_str()); - return SUCCESS; - } -}; // class PlatformManager -} // namespace ge - -#endif // INC_FRAMEWORK_OMG_VERSION_H_ diff --git a/inc/hccl/base.h b/inc/hccl/base.h deleted file mode 100644 index 8e27d6b80..000000000 --- a/inc/hccl/base.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file base.h - * @brief HCOM data type definition - * - */ - -#ifndef HCCL_BASE_H_ -#define HCCL_BASE_H_ - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -typedef signed char s8; -typedef signed short s16; -typedef signed int s32; -typedef signed long long s64; -typedef unsigned char u8; -typedef unsigned short u16; -typedef unsigned int u32; -typedef unsigned long long u64; - -/** - * @brief Horovod Reduction opperation - */ -typedef enum { - HOROVOD_REDUCE_AVERAGE = 0, /**< average */ - HOROVOD_REDUCE_SUM = 1, /**< sum */ - HOROVOD_REDUCE_ADASUM = 2, /**< adasum */ - HOROVOD_REDUCE_MIN = 3, /**< min */ - HOROVOD_REDUCE_MAX = 4, /**< max */ - HOROVOD_REDUCE_PROD = 5, /**< proo */ - HOROVOD_REDUCE_RESERVED /**< reserved */ -} HorovodReduceOp; - -const u32 HCCL_MAX_SEGMENT_NUM = 32; // The max number of gradient segments. - -/** - * @brief the feature of the model - */ -struct model_feature { - const char *model_name; /**< The model name */ - u32 gradient_num; /**< The number of gradients */ - float *gradient_size; /**< The size of each gradient */ - float *gradient_time; /**< The BP compution time of each gradient */ -}; - -/** - * @brief Memory Register Address Struct for Remote Access - */ -struct MemRegisterAddr { - u64 addr; - u64 length; -}; - -const u32 HCCL_MAX_MEM_REGISTER_NUM = 8; // The max number of memory register address. - -enum GradSplitForceMode { - FORCE_NONE, /**< no force */ - FORCE_SIZE, /**< force split gradient by size */ - FORCE_RESERVED /**< reserved */ -}; - -enum OriginalGraphShapeType { - KNOWN_SHAPE, - UNKNOWN_SHAPE, - SHAPE_RESERVED /**< reserved */ -}; - -/** -* @brief stream handle. -*/ -typedef void *rtStream_t; - -/** -* @brief model handle. -*/ -typedef void *rtModel_t; - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HCCL_BASE_H_ diff --git a/inc/hccl/hcom.h b/inc/hccl/hcom.h deleted file mode 100644 index 4658c81e9..000000000 --- a/inc/hccl/hcom.h +++ /dev/null @@ -1,287 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file hcom.h - * @brief HCOM API - */ - -#ifndef HCOM_H_ -#define HCOM_H_ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * @brief Initialize HCOM. - * - * @param rank_table A string identifying the rank table file path, include file name. - * @param identify A string identifying the identify for the rank. - * @return HcclResult - * @see hcom_destroy() - */ -extern HcclResult hcom_init(const char *rank_table, const char *identify); - -/** - * @brief Destroy HCOM - * - * @return HcclResult - * @see hcom_init() - */ -extern HcclResult hcom_destroy(void); - -/** - * @brief Bind the model. - * - * @param model A pointer identifying the model information. - * @param stream A pointer identifying the stream information. - * @return HcclResult - * @see hcom_unbind_model() - */ -extern HcclResult hcom_bind_model(rtModel_t model, rtStream_t stream); - -/** - * @brief Unbind the model. - * - * @param model An pointer identifying the model information. - * @return HcclResult - * @see hcom_unbind_model() - */ -extern HcclResult hcom_unbind_model(rtModel_t model); - -/** - * @brief All-gather operator. - * - * @param tag A string identifying the tag of the operator. - * @param inputPtr A pointer identifying the input data address of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param inputCount An integer(u64) identifying the number of the input data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param group A string identifying the group name of ranks participating in the operator. - * @param stream A pointer identifying the stream information. - * @return HcclResult - */ -extern HcclResult hcom_all_gather(const char *tag, void *inputPtr, void *outputPtr, u64 inputCount, - HcclDataType dataType, const char *group, rtStream_t stream); - -/** - * @brief All-reduce operator. - * - * @param tag A string identifying the tag of the operator. - * @param inputPtr A pointer identifying the input data address of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param count An integer(u64) identifying the number of the output data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. - * @param group A string identifying the group name of ranks participating in the operator. - * @param stream A pointer identifying the stream information. - * @return HcclResult - */ -extern HcclResult hcom_all_reduce(const char *tag, void *inputPtr, void *outputPtr, u64 count, - HcclDataType dataType, HcclReduceOp op, const char *group, rtStream_t stream); - -extern HcclResult hcom_reduce(const char *tag, void *inputPtr, void *outputPtr, u64 count,HcclDataType dataType,HcclReduceOp op, u32 root, const char *group, rtStream_t stream); - -/** - * @brief Broadcast operator. - * - * @param tag A string identifying the tag of the operator. - * @param ptr A pointer identifying the data address of the operator. - * @param count An integer(u64) identifying the number of the data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param root An integer(u32) identifying the the root rank in the operator. - * @param group A string identifying the group name of ranks participating in the operator. - * @param stream A pointer identifying the stream information. - * @return HcclResult - */ -extern HcclResult hcom_broadcast(const char *tag, void *ptr, u64 count, HcclDataType dataType, u32 root, - const char *group, rtStream_t stream); - -/** - * @brief Reduce-scatter operator. - * - * @param tag A string identifying the tag of the operator. - * @param inputPtr A pointer identifying the input data address of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param count An integer(u64) identifying the number of the data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. - * @param group A string identifying the group name of ranks participating in the operator. - * @param stream A pointer identifying the stream information. - * @return HcclResult - */ -extern HcclResult hcom_reduce_scatter(const char *tag, void *inputPtr, void *outputPtr, u64 count, - HcclDataType dataType, HcclReduceOp op, const char *group, rtStream_t stream); - -/** - * @brief Get the rank number in the group. - * - * @param group A string identifying the group name. - * @param rankSize A pointer identifying the rank number. - * @return HcclResult - */ -HcclResult hcom_get_rank_size(const char *group, u32 *rankSize); - -/** - * @brief Get the rank number of this rank's server within the group. - * - * @param group A string identifying the group name. - * @param localRankSize A pointer identifying the rank number. - * @return HcclResult - */ -HcclResult hcom_get_local_rank_size(const char *group, u32 *localRankSize); - -/** - * @brief Get the rank id of this rank. - * - * @param group A string identifying the group name. - * @param rankId A pointer identifying the rank id. - * @return HcclResult - */ -HcclResult hcom_get_rank_id(const char *group, u32 *rankId); - -/** - * @brief Get the local rank id of this rank's server within the group. - * - * @param group A string identifying the group name. - * @param localRankId A pointer identifying the local rank id. - * @return HcclResult - */ -HcclResult hcom_get_local_rank_id(const char *group, u32 *localRankId); - -/** - * @brief Get the world rank id according to the group rank id. - * - * @param group A string identifying the group name. - * @param groupRank An integer(u32) identifying the group rank id. - * @param worldRank A pointer identifying the world rank id. - * @return HcclResult - */ -HcclResult hcom_get_world_rank_from_group_rank(const char *group, u32 groupRank, u32 *worldRank); - -/** - * @brief Get the group rank id according to the world rank id. - * - * @param worldRank An integer(u32) identifying the world rank id. - * @param group A string identifying the group name. - * @param groupRank A pointer identifying the group rank id. - * @return HcclResult - */ -HcclResult hcom_get_group_rank_from_world_rank(u32 worldRank, const char *group, u32 *groupRank); - -/** - * @brief Create group. - * - * @param group A string identifying the group name. - * @param rankNum An integer(u32) identifying the number of ranks in the group. - * @param rankIds A list identifying the ranks in the group. - * @return HcclResult - */ -HcclResult hcom_create_group(const char *group, u32 rankNum, u32 *rankIds); - -/** - * @brief Destroy group - * - * @param group A string identifying the group name. - * @return HcclResult - */ -HcclResult hcom_destroy_group(const char *group); - -/** - * @brief Send operator. - * - * @param tag A string identifying the tag of the operator. - * @param inputPtr A pointer identifying the input data address of the operator. - * @param count An integer(u64) identifying the number of the data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param destRank An integer identifying the destination rank. - * @param srTag An integer identifying the send/recv message tag. - * The message will be send by the receive operator with the same "sr_tag". - * @param group A string identifying the group name of ranks participating in the operator. - * @param stream A pointer identifying the stream information. - * @return HcclResult - */ -HcclResult hcom_send(const char *tag, void *inputPtr, u64 count, HcclDataType dataType, - u32 destRank, u32 srTag, const char *group, rtStream_t stream); - -/** - * @brief Receive operator. - * - * @param tag A string identifying the tag of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param count An integer(u64) identifying the number of the data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param srcRank An integer identifying the source rank. - * @param srTag An integer identifying the send/recv message tag. - * The message will be send by the send operator with the same "sr_tag". - * @param group A string identifying the group name of ranks participating in the operator. - * @param stream A pointer identifying the stream information. - * @return HcclResult - */ -HcclResult hcom_receive(const char *tag, void *outputPtr, u64 count, HcclDataType dataType, - u32 srcRank, u32 srTag, const char *group, rtStream_t stream); - -/** - * @brief Get the gradient split strategy with in the group. - * - * @param group A string identifying the group name. - * @param feature A pointer identifying the feature of the model. - * @param maxSegmentNum An integer(u32) identifying the max segments of gradients. - * @param segmentNum A pointer identifying the segments number of gradients. - * @param segmentIdx A list identifying the index of end gradient in each segment. - * @return HcclResult - */ -HcclResult hcom_get_split_strategy(const char *group, const struct model_feature *feature, u32 maxSegmentNum, - u32 *segmentNum, u32 *segmentIdx, GradSplitForceMode force = FORCE_NONE, - OriginalGraphShapeType shapeType = KNOWN_SHAPE); - -/** - * @brief Set the gradient split strategy with in the group, according to gradient index. - * - * @param group A string identifying the group name. - * @param segmentNum An integer(u32) identifying the segments number of gradients. - * @param IdxList A list identifying the index of end gradient in each segment. - * @return HcclResult - */ -extern HcclResult hcom_set_split_strategy_by_index(const char *group, u32 segmentNum, const u32 *IdxList); - -/** - * @brief Set the gradient split strategy with in the group, according to gradient data size. - * - * @param group A string identifying the group name. - * @param segmentNum An integer(u32) identifying the segments number of gradients. - * @param sizeList A list identifying the percent of each segment. - * @return HcclResult - */ -extern HcclResult hcom_set_split_strategy_by_size(const char *group, u32 segmentNum, const float *sizeList); - -/** - * @brief Register memories and init resources for remote access. - * - * @param addrList memory addresses for remote access. - * @param count number of remote memory addresses. - * @return HcclResult - */ -extern HcclResult hcom_remote_access_mem_register(const MemRegisterAddr* addrList, u32 count); - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HCOM_H_ diff --git a/inc/metadef/inc/common/blocking_queue.h b/inc/metadef/inc/common/blocking_queue.h deleted file mode 100644 index eeb0b1a49..000000000 --- a/inc/metadef/inc/common/blocking_queue.h +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_BLOCKING_QUEUE_H_ -#define INC_COMMON_BLOCKING_QUEUE_H_ - -#include -#include -#include -#include - -static const int kDefaultMaxQueueSize = 2048; - -template -class BlockingQueue { - public: - explicit BlockingQueue(uint32_t max_size = kDefaultMaxQueueSize) : max_size_(max_size), is_stoped_(false) {} - - ~BlockingQueue() {} - - bool Pop(T &item) { - std::unique_lock lock(mutex_); - - while (queue_.empty() && !is_stoped_) { - empty_cond_.wait(lock); - } - - if (is_stoped_) { - return false; - } - - item = std::move(queue_.front()); - queue_.pop_front(); - - full_cond_.notify_one(); - - return true; - } - - bool Push(const T &item, bool is_wait = true) { - std::unique_lock lock(mutex_); - - while (queue_.size() >= max_size_ && !is_stoped_) { - if (!is_wait) { - return false; - } - full_cond_.wait(lock); - } - - if (is_stoped_) { - return false; - } - - queue_.push_back(item); - - empty_cond_.notify_one(); - - return true; - } - - bool Push(T &&item, bool is_wait = true) { - std::unique_lock lock(mutex_); - - while (queue_.size() >= max_size_ && !is_stoped_) { - if (!is_wait) { - return false; - } - full_cond_.wait(lock); - } - - if (is_stoped_) { - return false; - } - - queue_.emplace_back(std::move(item)); - - empty_cond_.notify_one(); - - return true; - } - - void Stop() { - { - std::unique_lock lock(mutex_); - is_stoped_ = true; - } - - full_cond_.notify_all(); - empty_cond_.notify_all(); - } - - void Restart() { - std::unique_lock lock(mutex_); - is_stoped_ = false; - } - - // if the queue is stoped ,need call this function to release the unprocessed items - std::list GetRemainItems() { - std::unique_lock lock(mutex_); - - if (!is_stoped_) { - return std::list(); - } - - return queue_; - } - - bool IsFull() { - std::unique_lock lock(mutex_); - return queue_.size() >= max_size_; - } - - void Clear() { - std::unique_lock lock(mutex_); - queue_.clear(); - } - - void SetMaxSize(uint32_t size) { - std::unique_lock lock(mutex_); - if (size == 0) { - max_size_ = kDefaultMaxQueueSize; - return; - } - max_size_ = size; - } - - uint32_t Size() { - std::unique_lock lock(mutex_); - return queue_.size(); - } - - private: - std::list queue_; - std::mutex mutex_; - std::condition_variable empty_cond_; - std::condition_variable full_cond_; - uint32_t max_size_; - - bool is_stoped_; -}; - -#endif // INC_COMMON_BLOCKING_QUEUE_H_ diff --git a/inc/metadef/inc/common/dynamic_aipp.h b/inc/metadef/inc/common/dynamic_aipp.h deleted file mode 100644 index e563fd9e7..000000000 --- a/inc/metadef/inc/common/dynamic_aipp.h +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_DYNAMIC_AIPP_H_ -#define INC_COMMON_DYNAMIC_AIPP_H_ - -#include - -/** -* @ingroup dnn -* @brief struct define of dynamic aipp batch parameter. -*/ -typedef struct tagAippDynamicBatchPara { - int8_t cropSwitch; // crop switch - int8_t scfSwitch; // resize switch - int8_t paddingSwitch; // 0: unable padding - // 1: padding config value,sfr_filling_hblank_ch0 ~ sfr_filling_hblank_ch2 - // 2: padding source picture data, single row/collumn copy - // 3: padding source picture data, block copy - // 4: padding source picture data, mirror copy - int8_t rotateSwitch; // rotate switch,0: non-ratate, - // 1: ratate 90° clockwise,2: ratate 180° clockwise,3: ratate 270° clockwise - int8_t reserve[4]; - int32_t cropStartPosW; // the start horizontal position of cropping - int32_t cropStartPosH; // the start vertical position of cropping - int32_t cropSizeW; // crop width - int32_t cropSizeH; // crop height - - int32_t scfInputSizeW; // input width of scf - int32_t scfInputSizeH; // input height of scf - int32_t scfOutputSizeW; // output width of scf - int32_t scfOutputSizeH; // output height of scf - - int32_t paddingSizeTop; // top padding size - int32_t paddingSizeBottom; // bottom padding size - int32_t paddingSizeLeft; // left padding size - int32_t paddingSizeRight; // right padding size - - int16_t dtcPixelMeanChn0; // mean value of channel 0 - int16_t dtcPixelMeanChn1; // mean value of channel 1 - int16_t dtcPixelMeanChn2; // mean value of channel 2 - int16_t dtcPixelMeanChn3; // mean value of channel 3 - - uint16_t dtcPixelMinChn0; // min value of channel 0 - uint16_t dtcPixelMinChn1; // min value of channel 1 - uint16_t dtcPixelMinChn2; // min value of channel 2 - uint16_t dtcPixelMinChn3; // min value of channel 3 - uint16_t dtcPixelVarReciChn0; // sfr_dtc_pixel_variance_reci_ch0 - uint16_t dtcPixelVarReciChn1; // sfr_dtc_pixel_variance_reci_ch1 - uint16_t dtcPixelVarReciChn2; // sfr_dtc_pixel_variance_reci_ch2 - uint16_t dtcPixelVarReciChn3; // sfr_dtc_pixel_variance_reci_ch3 - - int8_t reserve1[16]; // 32B assign, for ub copy -} kAippDynamicBatchPara; - -/** -* @ingroup dnn -* @brief struct define of dynamic aipp parameter. lite:64+96*batchNum byte ; tiny:64+64*batchNum byte -*/ -typedef struct tagAippDynamicPara { - uint8_t inputFormat; // input format:YUV420SP_U8/XRGB8888_U8/RGB888_U8 - int8_t cscSwitch; // csc switch - int8_t rbuvSwapSwitch; // rb/ub swap switch - int8_t axSwapSwitch; // RGBA->ARGB, YUVA->AYUV swap switch - int8_t batchNum; // batch parameter number - int8_t reserve1[3]; - int32_t srcImageSizeW; // source image width - int32_t srcImageSizeH; // source image height - int16_t cscMatrixR0C0; // csc_matrix_r0_c0 - int16_t cscMatrixR0C1; // csc_matrix_r0_c1 - int16_t cscMatrixR0C2; // csc_matrix_r0_c2 - int16_t cscMatrixR1C0; // csc_matrix_r1_c0 - int16_t cscMatrixR1C1; // csc_matrix_r1_c1 - int16_t cscMatrixR1C2; // csc_matrix_r1_c2 - int16_t cscMatrixR2C0; // csc_matrix_r2_c0 - int16_t cscMatrixR2C1; // csc_matrix_r2_c1 - int16_t cscMatrixR2C2; // csc_matrix_r2_c2 - int16_t reserve2[3]; - uint8_t cscOutputBiasR0; // output Bias for RGB to YUV, element of row 0, unsigned number - uint8_t cscOutputBiasR1; // output Bias for RGB to YUV, element of row 1, unsigned number - uint8_t cscOutputBiasR2; // output Bias for RGB to YUV, element of row 2, unsigned number - uint8_t cscInputBiasR0; // input Bias for YUV to RGB, element of row 0, unsigned number - uint8_t cscInputBiasR1; // input Bias for YUV to RGB, element of row 1, unsigned number - uint8_t cscInputBiasR2; // input Bias for YUV to RGB, element of row 2, unsigned number - uint8_t reserve3[2]; - int8_t reserve4[16]; // 32B assign, for ub copy - - kAippDynamicBatchPara aippBatchPara; // allow transfer several batch para. -} kAippDynamicPara; - -#endif // INC_COMMON_DYNAMIC_AIPP_H_ diff --git a/inc/metadef/inc/common/fe_executor/ffts_plus_qos_update.h b/inc/metadef/inc/common/fe_executor/ffts_plus_qos_update.h deleted file mode 100644 index fe3095ff1..000000000 --- a/inc/metadef/inc/common/fe_executor/ffts_plus_qos_update.h +++ /dev/null @@ -1,30 +0,0 @@ -/** -* Copyright 2022-2023 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -#ifndef FFTS_PLUS_QOS_UPDATE_H_ -#define FFTS_PLUS_QOS_UPDATE_H_ - -#include "runtime/rt_ffts_plus_define.h" -#include "graph/utils/node_utils.h" -namespace fe { - -bool UpdateAicAivCtxQos(rtFftsPlusAicAivCtx_t *ctx, int label, int device_id); -bool UpdateMixAicAivCtxQos(rtFftsPlusMixAicAivCtx_t *ctx, int label, int device_id); -bool UpdateDataCtxQos(rtFftsPlusDataCtx_t *ctx, int device_id); - -} - -#endif \ No newline at end of file diff --git a/inc/metadef/inc/common/npu_error_define.h b/inc/metadef/inc/common/npu_error_define.h deleted file mode 100644 index aba70f990..000000000 --- a/inc/metadef/inc/common/npu_error_define.h +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_NPU_ERROR_DEFINE_H_ -#define INC_COMMON_NPU_ERROR_DEFINE_H_ - -typedef enum tagHiAiNpuLocal { - HIAI_HOST = 1, - HIAI_DEVICE = 2, -} HiAiNpuLocal; - -typedef enum tagHiAiNpuCodeType { - ERROR_CODE = 1, - EXCEPTION_CODE = 2, -} HiAiNpuCodeType; - -typedef enum tagHiAiNpuErrLevel { - NONE_LEVEL = 0, - SUGGESTION_LEVEL = 1, - NORMAL_LEVEL = 2, - SERIOUS_LEVEL = 3, - CRITICAL_ERROR = 4, -} HiAiNpuErrLevel; - -typedef enum tagHiAiNpuModuleId { - HIAI_DRIVER = 1, - HIAI_CTRLCPU = 2, - HIAI_TS = 3, - HIAI_RUNTIME = 4, - HIAI_AICPU = 5, - HIAI_CCE = 6, - HIAI_TVM = 7, - HIAI_FRAMEWORK = 8, - HiAI_ENGINE = 9, - HIAI_DVPP = 10, - HIAI_AIPP = 11, - HIAI_LOWPOWER = 12, - HIAI_MDC = 13, - HIAI_COMPILE = 14, - HIAI_TOOLCHIAN = 15, - HIAI_ALG = 16, - HIAI_PROFILING = 17, - HIAI_HCCL = 18, - HIAI_SIMULATION = 19, - HIAI_BIOS = 20, - HIAI_SEC = 21, - HIAI_TINY = 22, - HIAI_DP = 23, -} HiAiNpuModuleId; - -/* bit 31-bit30 to be hiai local */ -#define HIAI_NPULOCAL_MASK 0xC0000000 -#define SHIFT_LOCAL_MASK 30 -#define HIAI_NPULOCAL_VAL_MASK 0x3 -/* bit 29 -bit28 to be hiai aicpu code type */ -#define HIAI_CODE_TYPE_MASK 0x30000000 -#define SHIFT_CODE_MASK 28 -#define HIAI_CODE_TYPE_VAL_MASK 0x3 -/* bit 27 -bit25 to be hiai error level */ -#define HIAI_ERROR_LEVEL_MASK 0x0E000000 -#define SHIFT_ERROR_LVL_MASK 25 -#define HIAI_ERROR_LEVEL_VAL_MASK 0x7 -/* bit 24 -bit17 to be hiai mod */ -#define HIAI_MODE_ID_MASK 0x01FE0000 -#define SHIFT_MODE_MASK 17 -#define HIAI_MODE_ID_VAL_MASK 0xFF - -#define HIAI_NPU_LOC_BIT(a) \ - (HIAI_NPULOCAL_MASK & ((unsigned int)((HiAiNpuLocal)(a)) & HIAI_NPULOCAL_VAL_MASK) << SHIFT_LOCAL_MASK) -#define HIAI_NPU_CODE_TYPE_BIT(a) \ - (HIAI_CODE_TYPE_MASK & ((unsigned int)((HiAiNpuCodeType)(a)) & HIAI_CODE_TYPE_VAL_MASK) << SHIFT_CODE_MASK) -#define HIAI_NPU_ERR_LEV_BIT(a) \ - (HIAI_ERROR_LEVEL_MASK & ((unsigned int)((HiAiNpuErrLevel)(a)) & HIAI_ERROR_LEVEL_VAL_MASK) << SHIFT_ERROR_LVL_MASK) -#define HIAI_NPU_MOD_ID_BIT(a) \ - (HIAI_MODE_ID_MASK & ((unsigned int)((HiAiNpuModuleId)(a)) & HIAI_MODE_ID_VAL_MASK) << SHIFT_MODE_MASK) - -#define HIAI_NPU_ERR_CODE_HEAD(npuLocal, codeType, errLevel, moduleId) \ - (HIAI_NPU_LOC_BIT(npuLocal) + HIAI_NPU_CODE_TYPE_BIT(codeType) + HIAI_NPU_ERR_LEV_BIT(errLevel) + \ - HIAI_NPU_MOD_ID_BIT(moduleId)) - -#endif // INC_COMMON_NPU_ERROR_DEFINE_H_ diff --git a/inc/metadef/inc/common/opskernel/ge_task_info.h b/inc/metadef/inc/common/opskernel/ge_task_info.h deleted file mode 100644 index 785e409f3..000000000 --- a/inc/metadef/inc/common/opskernel/ge_task_info.h +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_OPSKERNEL_GE_TASK_INFO_H_ -#define INC_COMMON_OPSKERNEL_GE_TASK_INFO_H_ - -#include -#include -#include -#include "runtime/rt.h" - -using std::string; -namespace ge { -// when need to eliminate GETaskKernelHcclInfo, so not need DAVINCI_TRAIN/DAVINCI_CLOUD -struct GETaskKernelHcclInfo { - std::string input_name; - std::string hccl_type; - void *inputDataAddr; - void *outputDataAddr; - void *workSpaceAddr; - int32_t count; - int32_t dataType; - int32_t opType; - int64_t rootId; - uint64_t workSpaceMemSize; - std::vector dims; - std::vector hcclStreamList; -}; - -struct GETaskInfo { - uint32_t id; - uint16_t type; - uint32_t streamID; - void *stream; // rtKernelLaunch input argument - void *event; - void *privateDef; - uint32_t privateDefLen; - void *opsKernelStorePtr; - - std::vector kernelHcclInfo; -}; - -struct HcomOpertion { - std::string hcclType; - void *inputPtr; - void *outputPtr; - uint64_t count; - int32_t dataType; - int32_t opType; - int32_t root; -}; - -struct HcomRemoteAccessAddrInfo -{ - uint32_t remotetRankID; - uint64_t remoteAddr; // host embedding table address - uint64_t localAddr; // device HBM address - uint64_t length; // memory Length in Bytes -}; - - -} // namespace ge -#endif // INC_COMMON_OPSKERNEL_GE_TASK_INFO_H_ diff --git a/inc/metadef/inc/common/opskernel/ops_kernel_builder.h b/inc/metadef/inc/common/opskernel/ops_kernel_builder.h deleted file mode 100644 index fb25cbe55..000000000 --- a/inc/metadef/inc/common/opskernel/ops_kernel_builder.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_OPSKERNELUTILS_OPS_KERNEL_INFO_UTILS_H_ -#define INC_COMMON_OPSKERNELUTILS_OPS_KERNEL_INFO_UTILS_H_ - -#include "external/ge/ge_api_error_codes.h" -#include "cce/aicpu_engine_struct.h" -#include "common/opskernel/ops_kernel_info_types.h" -#include "graph/node.h" -#include "proto/task.pb.h" - -namespace ge { -class OpsKernelBuilder { - public: - enum Mode { - kNormal, - kFfts, - kFftsPlus - }; - OpsKernelBuilder() = default; - virtual ~OpsKernelBuilder() = default; - - // initialize OpsKernelBuilder - virtual Status Initialize(const std::map &options) = 0; - - // finalize OpsKernelBuilder - virtual Status Finalize() = 0; - - // memory allocation requirement - virtual Status CalcOpRunningParam(Node &node) = 0; - - // generate task for op - virtual Status GenerateTask(const Node &node, RunContext &context, - std::vector &tasks) = 0; - - // generate task for op with different mode - virtual Status GenerateTask(const Node &node, RunContext &context, std::vector &tasks, - OpsKernelBuilder::Mode) { - return SUCCESS; - } - - // only call aicpu interface to generate task struct - virtual Status GenSingleOpRunTask(const NodePtr &node, STR_FWK_OP_KERNEL &task, std::string &task_info) { - return FAILED; - } - - // only call aicpu interface to generate task struct - virtual Status GenMemCopyTask(uint64_t count, STR_FWK_OP_KERNEL &task, std::string &task_info) { - return FAILED; - } -}; -} // namespace ge -#endif // INC_COMMON_OPSKERNELUTILS_OPS_KERNEL_INFO_UTILS_H_ diff --git a/inc/metadef/inc/common/opskernel/ops_kernel_info_store.h b/inc/metadef/inc/common/opskernel/ops_kernel_info_store.h deleted file mode 100644 index ec414aa6b..000000000 --- a/inc/metadef/inc/common/opskernel/ops_kernel_info_store.h +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_STORE_H_ -#define INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_STORE_H_ - -#include -#include -#include -#include -#include "./ge_task_info.h" -#include "./ops_kernel_info_types.h" -#include "cce/aicpu_engine_struct.h" -#include "cce/fwk_adpt_struct.h" -#include "common/ge_inner_error_codes.h" -#include "graph/node.h" -#include "proto/task.pb.h" -using std::map; -using std::string; -using std::to_string; -using std::vector; - -namespace ge { -class OpDesc; - -class OpsKernelInfoStore { - public: - OpsKernelInfoStore() {} - - virtual ~OpsKernelInfoStore() {} - - // initialize opsKernelInfoStore - virtual Status Initialize(const std::map &options) = 0; - - // close opsKernelInfoStore - virtual Status Finalize() = 0; /*lint -e148*/ - - virtual Status CreateSession(const std::map &session_options) { return SUCCESS; } - - virtual Status DestroySession(const std::map &session_options) { return SUCCESS; } - - // get all opsKernelInfo - virtual void GetAllOpsKernelInfo(std::map &infos) const = 0; - - // whether the opsKernelInfoStore is supported based on the operator attribute - virtual bool CheckSupported(const OpDescPtr &opDescPtr, std::string &un_supported_reason) const = 0; - - virtual bool CheckAccuracySupported(const OpDescPtr &opDescPtr, std::string &un_supported_reason, - bool realQuery = false) const { - return CheckSupported(opDescPtr, un_supported_reason); - } - // opsFlag opsFlag[0] indicates constant folding is supported or not - virtual void opsFlagCheck(const ge::Node &node, std::string &opsFlag) {}; - - // only call fe engine interface to compile single op - virtual Status CompileOp(std::vector &node_vec) { return SUCCESS; } - virtual Status CompileOpRun(std::vector &node_vec) { return SUCCESS; } - // load task for op - virtual Status LoadTask(GETaskInfo &task) { return SUCCESS; } - - virtual bool CheckSupported(const ge::NodePtr &node, std::string &un_supported_reason) const { - if (node == nullptr) { - return false; - } - return CheckSupported(node->GetOpDesc(), un_supported_reason); - } - - virtual bool CheckAccuracySupported(const ge::NodePtr &node, std::string &un_supported_reason, - bool realQuery = false) const { - if (node == nullptr) { - return false; - } - return CheckAccuracySupported(node->GetOpDesc(), un_supported_reason, realQuery); - } - // Set cut support info - virtual Status SetCutSupportedInfo(const ge::NodePtr &node) { return SUCCESS; } -}; -} // namespace ge -#endif // INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_STORE_H_ diff --git a/inc/metadef/inc/common/opskernel/ops_kernel_info_types.h b/inc/metadef/inc/common/opskernel/ops_kernel_info_types.h deleted file mode 100644 index d86f09ecf..000000000 --- a/inc/metadef/inc/common/opskernel/ops_kernel_info_types.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_TYPES_H_ -#define INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_TYPES_H_ - -#include -#include -#include -#include "graph/buffer.h" -#include "runtime/rt_model.h" - -using std::string; - -namespace ge { -/*lint -e148*/ -struct RunContext { - rtModel_t model; - rtStream_t stream; - uint64_t sessionId; - uint64_t dataMemSize; - uint8_t *dataMemBase; - std::map mem_type_data_mem_size; - std::map mem_type_data_mem_base; - uint64_t weightMemSize; - uint8_t *weightMemBase; - ge::Buffer weightsBuffer; - std::vector graphStreamList; // all streams of graph, order by ge stream id(0,1,...) - std::vector graphEventList; // all events of graph, order by ge event id(0,1,...) - std::vector graphLabelList; // all labels of graph, order by ge label id(0,1,...) -}; - -/*lint +e148*/ -struct Task { - uint32_t id; - uint16_t type; - void *stream; - void *event; -}; - -struct OpInfo { - std::string engine; // which engin - /*lint -e148*/ - std::string opKernelLib; // which opsKernelStore - int computeCost; // compute cost - bool flagPartial; // whether to support is related to shape - bool flagAsync; // Whether to support asynchronous - bool isAtomic; // whether to support atomic addr clean - std::string opFileName; // op file name - std::string opFuncName; // op function name -}; -} // namespace ge - -#endif // INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_TYPES_H_ diff --git a/inc/metadef/inc/common/optimizer/graph_optimizer.h b/inc/metadef/inc/common/optimizer/graph_optimizer.h deleted file mode 100644 index 0fb1ff864..000000000 --- a/inc/metadef/inc/common/optimizer/graph_optimizer.h +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_H_ -#define INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_H_ - -#include -#include -#include "./graph_optimizer_types.h" -#include "common/ge_inner_error_codes.h" -#include "common/opskernel/ops_kernel_info_types.h" -#include "graph/compute_graph.h" - -using std::map; -using std::string; - -/*lint -e148*/ -namespace ge { -class GraphOptimizer { - public: - virtual ~GraphOptimizer() {} - - // initialize graphOptimizer - virtual Status Initialize(const std::map &options) = 0; - - // close graphOptimizer - virtual Status Finalize() = 0; - - // optimize original graph for FE quant optimize - virtual Status OptimizeGraphPrepare(ComputeGraph& graph) { - return SUCCESS; - } - - // optimize graph before build for RTS - virtual Status OptimizeGraphBeforeBuild(ComputeGraph& graph) { - return SUCCESS; - } - - // optimize original graph, using in graph preparation stage - virtual Status OptimizeOriginalGraph(ComputeGraph &graph) = 0; - - // optimize original graph, using for conversion operator insert in graph preparation stage - virtual Status OptimizeOriginalGraphJudgeInsert(ComputeGraph &graph) { - return SUCCESS; - } - - // optimize fused graph - virtual Status OptimizeFusedGraph(ComputeGraph &graph) = 0; - - // optimize whole graph, using after graph merged stage - virtual Status OptimizeWholeGraph(ComputeGraph &graph) = 0; - - // get attribute of graph optimizer - virtual Status GetAttributes(GraphOptimizerAttribute &attrs) const = 0; - - // optimize streamed Graph - virtual Status OptimizeStreamGraph(ComputeGraph &graph, const RunContext &context) { return SUCCESS; } - - // op compile - virtual Status OptimizeFusedGraphAfterGraphSlice(ComputeGraph &graph) { return SUCCESS; } - - // optimize whole graph, using after stage1 - virtual Status OptimizeAfterStage1(ComputeGraph &graph) { return SUCCESS; } -}; -} // namespace ge -/*lint +e148*/ -#endif // INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_H_ diff --git a/inc/metadef/inc/common/optimizer/graph_optimizer_types.h b/inc/metadef/inc/common/optimizer/graph_optimizer_types.h deleted file mode 100644 index f48394950..000000000 --- a/inc/metadef/inc/common/optimizer/graph_optimizer_types.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_TYPES_H_ -#define INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_TYPES_H_ - -#include -#include - -namespace ge { -enum OPTIMIZER_SCOPE { - UNIT = 0, - ENGINE, -}; - -struct GraphOptimizerAttribute { - std::string engineName; - OPTIMIZER_SCOPE scope; -}; -} // namespace ge - -#endif // INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_TYPES_H_ diff --git a/inc/metadef/inc/common/proto/dump_task.proto b/inc/metadef/inc/common/proto/dump_task.proto deleted file mode 100644 index ee1c6f47f..000000000 --- a/inc/metadef/inc/common/proto/dump_task.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; -package toolkit.dumpdata; - -enum OutputDataType { - DT_UNDEFINED = 0; - DT_FLOAT = 1; - DT_FLOAT16 = 2; - DT_INT8 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_UINT16 = 6; - DT_INT32 = 7; - DT_INT64 = 8; - DT_UINT32 = 9; - DT_UINT64 = 10; - DT_BOOL = 11; - DT_DOUBLE = 12; - DT_STRING = 13; - DT_DUAL_SUB_INT8 = 14; - DT_DUAL_SUB_UINT8 = 15; - DT_COMPLEX64 = 16; - DT_COMPLEX128 = 17; - DT_QINT8 = 18; - DT_QINT16 = 19; - DT_QINT32 = 20; - DT_QUINT8 = 21; - DT_QUINT16 = 22; - DT_RESOURCE = 23; - DT_STRING_REF = 24; - DT_DUAL = 25; - DT_VARIANT = 26; -} - -enum OutputFormat { - FORMAT_NCHW = 0; - FORMAT_NHWC = 1; - FORMAT_ND = 2; - FORMAT_NC1HWC0 = 3; - FORMAT_FRACTAL_Z = 4; - FORMAT_NC1C0HWPAD = 5; - FORMAT_NHWC1C0 = 6; - FORMAT_FSR_NCHW = 7; - FORMAT_FRACTAL_DECONV = 8; - FORMAT_C1HWNC0 = 9; - FORMAT_FRACTAL_DECONV_TRANSPOSE = 10; - FORMAT_FRACTAL_DECONV_SP_STRIDE_TRANS = 11; - FORMAT_NC1HWC0_C04 = 12; - FORMAT_FRACTAL_Z_C04 = 13; - FORMAT_CHWN = 14; - FORMAT_FRACTAL_DECONV_SP_STRIDE8_TRANS = 15; - FORMAT_HWCN = 16; - FORMAT_NC1KHKWHWC0 = 17; - FORMAT_BN_WEIGHT = 18; - FORMAT_FILTER_HWCK = 19; - FORMAT_HASHTABLE_LOOKUP_LOOKUPS=20; - FORMAT_HASHTABLE_LOOKUP_KEYS = 21; - FORMAT_HASHTABLE_LOOKUP_VALUE = 22; - FORMAT_HASHTABLE_LOOKUP_OUTPUT = 23; - FORMAT_HASHTABLE_LOOKUP_HITS=24; - FORMAT_C1HWNCoC0 = 25; - FORMAT_MD = 26; - FORMAT_NDHWC = 27; - FORMAT_FRACTAL_ZZ = 28; - FORMAT_FRACTAL_NZ = 29; - FORMAT_RESERVED = 30; -} - -message OriginalOp { - string name = 1; - uint32 output_index = 2; - OutputDataType data_type = 3; - OutputFormat format = 4; -} - -message Shape { - repeated uint64 dim = 1; -} - -message OpOutput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - OriginalOp original_op = 4; // the original op corresponding to the output - bytes data = 5; - uint64 size = 6; -} - -message OpInput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - bytes data = 4; - uint64 size = 5; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - bytes data = 2; - uint64 size = 3; -} - -message DumpData{ - string version = 1; - uint64 dump_time = 2; - repeated OpOutput output = 3; - repeated OpInput input = 4; - repeated OpBuffer buffer = 5; - string op_name = 6; -} diff --git a/inc/metadef/inc/common/proto/fusion_model.proto b/inc/metadef/inc/common/proto/fusion_model.proto deleted file mode 100644 index c92c5581a..000000000 --- a/inc/metadef/inc/common/proto/fusion_model.proto +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -import "om.proto"; - -package domi; - -message FusionModelDef { - string version = 1; - repeated OpDef fusion_op = 2; -} \ No newline at end of file diff --git a/inc/metadef/inc/common/proto/fwk_adapter.proto b/inc/metadef/inc/common/proto/fwk_adapter.proto deleted file mode 100644 index 9335c9263..000000000 --- a/inc/metadef/inc/common/proto/fwk_adapter.proto +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package aicpu.FWKAdapter; -option cc_enable_arenas = true; - - -// Defines an struct for input and output. -message TensorDataInfo { - - // value DataType - uint32 dtype = 1; - - // shape dim - repeated int64 dim = 2; - - // data point addr - int64 data_addr = 3; -} - -message KernelRunParam { - // input - repeated TensorDataInfo input = 1; - // output - repeated TensorDataInfo output = 2; -} - diff --git a/inc/metadef/inc/common/proto/ge_ir.proto b/inc/metadef/inc/common/proto/ge_ir.proto deleted file mode 100644 index 12989a548..000000000 --- a/inc/metadef/inc/common/proto/ge_ir.proto +++ /dev/null @@ -1,191 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/inc/metadef/inc/common/proto/insert_op.proto b/inc/metadef/inc/common/proto/insert_op.proto deleted file mode 100644 index bf918b20a..000000000 --- a/inc/metadef/inc/common/proto/insert_op.proto +++ /dev/null @@ -1,139 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/inc/metadef/inc/common/proto/om.proto b/inc/metadef/inc/common/proto/om.proto deleted file mode 100644 index e15e5f808..000000000 --- a/inc/metadef/inc/common/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/inc/metadef/inc/common/proto/op_mapping_info.proto b/inc/metadef/inc/common/proto/op_mapping_info.proto deleted file mode 100644 index 7fb6f84b1..000000000 --- a/inc/metadef/inc/common/proto/op_mapping_info.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package aicpu.dump; - -message Shape { - repeated uint64 dim = 1; -} - -message Output { - int32 data_type = 1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - string original_name = 5; - int32 original_output_index = 6; - int32 original_output_data_type = 7; - int32 original_output_format = 8; - uint64 size = 9; - Shape origin_shape = 10; -} - -message Input { - int32 data_type =1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - uint64 size = 5; - Shape origin_shape = 6; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - uint64 address = 2; - uint64 size = 3; -} - -message Op { - string op_name = 1; - string op_type = 2; -} - -message Task { - uint32 task_id = 1; - uint32 stream_id = 2; - Op op = 3; - repeated Output output = 4; - bool end_graph = 5; - repeated Input input = 6; - repeated OpBuffer buffer = 7; -} - -message OpMappingInfo { - string dump_path = 1; - oneof model_name_param { - string model_name = 2; - } - oneof model_id_param { - uint32 model_id = 3; - } - oneof step_id { - uint64 step_id_addr = 4; - } - oneof iterations_per_loop { - uint64 iterations_per_loop_addr = 5; - } - oneof loop_cond { - uint64 loop_cond_addr = 6; - } - uint32 flag = 7; // 0x01 load, 0x00 unload - repeated Task task = 8; - string dump_step = 9; -} \ No newline at end of file diff --git a/inc/metadef/inc/common/proto/proto_inner/ge_onnx.proto b/inc/metadef/inc/common/proto/proto_inner/ge_onnx.proto deleted file mode 100644 index 4cd77f3ae..000000000 --- a/inc/metadef/inc/common/proto/proto_inner/ge_onnx.proto +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright (c) ONNX Project Contributors. -// Licensed under the MIT license. - -syntax = "proto3"; - -package ge.onnx; - -// Overview -// -// ONNX is an open specification that is comprised of the following components: -// -// 1) A definition of an extensible computation graph model. -// 2) Definitions of standard data types. -// 3) Definitions of built-in operators. -// -// This document describes the syntax of models and their computation graphs, -// as well as the standard data types. Together, they are referred to as the ONNX -// Intermediate Representation, or 'IR' for short. -// -// The normative semantic specification of the ONNX IR is found in docs/IR.md. -// Definitions of the built-in neural network operators may be found in docs/Operators.md. - -// Notes -// -// Release -// -// We are still in the very early stage of defining ONNX. The current -// version of ONNX is a starting point. While we are actively working -// towards a complete spec, we would like to get the community involved -// by sharing our working version of ONNX. -// -// Protobuf compatibility -// -// To simplify framework compatibility, ONNX is defined using the subset of protobuf -// that is compatible with both protobuf v2 and v3. This means that we do not use any -// protobuf features that are only available in one of the two versions. -// -// Here are the most notable contortions we have to carry out to work around -// these limitations: -// -// - No 'map' (added protobuf 3.0). We instead represent mappings as lists -// of key-value pairs, where order does not matter and duplicates -// are not allowed. - - -// Versioning -// -// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md -// -// To be compatible with both proto2 and proto3, we will use a version number -// that is not defined by the default value but an explicit enum number. -enum Version { - // proto3 requires the first enum value to be zero. - // We add this just to appease the compiler. - _START_VERSION = 0; - // The version field is always serialized and we will use it to store the - // version that the graph is generated from. This helps us set up version - // control. - // For the IR, we are using simple numbers starting with with 0x00000001, - // which was the version we published on Oct 10, 2017. - IR_VERSION_2017_10_10 = 0x0000000000000001; - - // IR_VERSION 2 published on Oct 30, 2017 - // - Added type discriminator to AttributeProto to support proto3 users - IR_VERSION_2017_10_30 = 0x0000000000000002; - - // IR VERSION 3 published on Nov 3, 2017 - // - For operator versioning: - // - Added new message OperatorSetIdProto - // - Added opset_import in ModelProto - // - For vendor extensions, added domain in NodeProto - IR_VERSION_2017_11_3 = 0x0000000000000003; - - // IR VERSION 4 published on Jan 22, 2019 - // - Relax constraint that initializers should be a subset of graph inputs - // - Add type BFLOAT16 - IR_VERSION_2019_1_22 = 0x0000000000000004; - - // IR VERSION 5 published on March 18, 2019 - // - Add message TensorAnnotation. - // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. - IR_VERSION_2019_3_18 = 0x0000000000000005; - - // IR VERSION 6 published on Sep 19, 2019 - // - Add support for sparse tensor constants stored in model. - // - Add message SparseTensorProto - // - Add sparse initializers - IR_VERSION = 0x0000000000000006; -} - -// Attributes -// -// A named attribute containing either singular float, integer, string, graph, -// and tensor values, or repeated float, integer, string, graph, and tensor values. -// An AttributeProto MUST contain the name field, and *only one* of the -// following content fields, effectively enforcing a C/C++ union equivalent. -message AttributeProto { - - // Note: this enum is structurally identical to the OpSchema::AttrType - // enum defined in schema.h. If you rev one, you likely need to rev the other. - enum AttributeType { - UNDEFINED = 0; - FLOAT = 1; - INT = 2; - STRING = 3; - TENSOR = 4; - GRAPH = 5; - SPARSE_TENSOR = 11; - - FLOATS = 6; - INTS = 7; - STRINGS = 8; - TENSORS = 9; - GRAPHS = 10; - SPARSE_TENSORS = 12; - } - - // The name field MUST be present for this version of the IR. - string name = 1; // namespace Attribute - - // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. - // In this case, this AttributeProto does not contain data, and it's a reference of attribute - // in parent scope. - // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. - string ref_attr_name = 21; - - // A human-readable documentation for this attribute. Markdown is allowed. - string doc_string = 13; - - // The type field MUST be present for this version of the IR. - // For 0.0.1 versions of the IR, this field was not defined, and - // implementations needed to use has_field hueristics to determine - // which value field was in use. For IR_VERSION 0.0.2 or later, this - // field MUST be set and match the f|i|s|t|... field in use. This - // change was made to accomodate proto3 implementations. - AttributeType type = 20; // discriminator that indicates which field below is in use - - // Exactly ONE of the following fields must be present for this version of the IR - float f = 2; // float - int64 i = 3; // int - bytes s = 4; // UTF-8 string - TensorProto t = 5; // tensor value - GraphProto g = 6; // graph - SparseTensorProto sparse_tensor = 22; // sparse tensor value - // Do not use field below, it's deprecated. - // optional ValueProto v = 12; // value - subsumes everything but graph - - repeated float floats = 7; // list of floats - repeated int64 ints = 8; // list of ints - repeated bytes strings = 9; // list of UTF-8 strings - repeated TensorProto tensors = 10; // list of tensors - repeated GraphProto graphs = 11; // list of graph - repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors -} - -// Defines information on value, including the name, the type, and -// the shape of the value. -message ValueInfoProto { - // This field MUST be present in this version of the IR. - string name = 1; // namespace Value - // This field MUST be present in this version of the IR for - // inputs and outputs of the top-level graph. - TypeProto type = 2; - // A human-readable documentation for this value. Markdown is allowed. - string doc_string = 3; -} - -// Nodes -// -// Computation graphs are made up of a DAG of nodes, which represent what is -// commonly called a "layer" or "pipeline stage" in machine learning frameworks. -// -// For example, it can be a node of type "Conv" that takes in an image, a filter -// tensor and a bias tensor, and produces the convolved output. -message NodeProto { - repeated string input = 1; // namespace Value - repeated string output = 2; // namespace Value - - // An optional identifier for this node in a graph. - // This field MAY be absent in ths version of the IR. - string name = 3; // namespace Node - - // The symbolic identifier of the Operator to execute. - string op_type = 4; // namespace Operator - // The domain of the OperatorSet that specifies the operator named by op_type. - string domain = 7; // namespace Domain - - // Additional named attributes. - repeated AttributeProto attribute = 5; - - // A human-readable documentation for this node. Markdown is allowed. - string doc_string = 6; -} - -// Models -// -// ModelProto is a top-level file/container format for bundling a ML model and -// associating its computation graph with metadata. -// -// The semantics of the model are described by the associated GraphProto. -message ModelProto { - // The version of the IR this model targets. See Version enum above. - // This field MUST be present. - int64 ir_version = 1; - - // The OperatorSets this model relies on. - // All ModelProtos MUST have at least one entry that - // specifies which version of the ONNX OperatorSet is - // being imported. - // - // All nodes in the ModelProto's graph will bind against the operator - // with the same-domain/same-op_type operator with the HIGHEST version - // in the referenced operator sets. - repeated OperatorSetIdProto opset_import = 8; - - // The name of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_name = 2; - - // The version of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_version = 3; - - // Domain name of the model. - // We use reverse domain names as name space indicators. For example: - // `com.facebook.fair` or `com.microsoft.cognitiveservices` - // - // Together with `model_version` and GraphProto.name, this forms the unique identity of - // the graph. - string domain = 4; - - // The version of the graph encoded. See Version enum below. - int64 model_version = 5; - - // A human-readable documentation for this model. Markdown is allowed. - string doc_string = 6; - - // The parameterized graph that is evaluated to execute the model. - GraphProto graph = 7; - - // Named metadata values; keys should be distinct. - repeated StringStringEntryProto metadata_props = 14; -}; - -// StringStringEntryProto follows the pattern for cross-proto-version maps. -// See https://developers.google.com/protocol-buffers/docs/proto3#maps -message StringStringEntryProto { - string key = 1; - string value= 2; -}; - -message TensorAnnotation { - string tensor_name = 1; - // pairs to annotate tensor specified by above. - // The keys used in the mapping below must be pre-defined in ONNX spec. - // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as - // quantization parameter keys. - repeated StringStringEntryProto quant_parameter_tensor_names = 2; -} - - - -// Graphs -// -// A graph defines the computational logic of a model and is comprised of a parameterized -// list of nodes that form a directed acyclic graph based on their inputs and outputs. -// This is the equivalent of the "network" or "graph" in many deep learning -// frameworks. -message GraphProto { - // The nodes in the graph, sorted topologically. - repeated NodeProto node = 1; - - // The name of the graph. - string name = 2; // namespace Graph - - // A list of named tensor values, used to specify constant inputs of the graph. - // Each TensorProto entry must have a distinct name (within the list) that - // MAY also appear in the input list. - repeated TensorProto initializer = 5; - - // Initializers (see above) stored in sparse format. - repeated SparseTensorProto sparse_initializer = 15; - - // A human-readable documentation for this graph. Markdown is allowed. - string doc_string = 10; - - // The inputs and outputs of the graph. - repeated ValueInfoProto input = 11; - repeated ValueInfoProto output = 12; - - // Information for the values in the graph. The ValueInfoProto.name's - // must be distinct. It is optional for a value to appear in value_info list. - repeated ValueInfoProto value_info = 13; - - // This field carries information to indicate the mapping among a tensor and its - // quantization parameter tensors. For example: - // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, - // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. - repeated TensorAnnotation quantization_annotation = 14; - - // DO NOT USE the following fields, they were deprecated from earlier versions. - // repeated string input = 3; - // repeated string output = 4; - // optional int64 ir_version = 6; - // optional int64 producer_version = 7; - // optional string producer_tag = 8; - // optional string domain = 9; -} - -// Tensors -// -// A serialized tensor value. -message TensorProto { - enum DataType { - UNDEFINED = 0; - // Basic types. - FLOAT = 1; // float - UINT8 = 2; // uint8_t - INT8 = 3; // int8_t - UINT16 = 4; // uint16_t - INT16 = 5; // int16_t - INT32 = 6; // int32_t - INT64 = 7; // int64_t - STRING = 8; // string - BOOL = 9; // bool - - // IEEE754 half-precision floating-point format (16 bits wide). - // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. - FLOAT16 = 10; - - DOUBLE = 11; - UINT32 = 12; - UINT64 = 13; - COMPLEX64 = 14; // complex with float32 real and imaginary components - COMPLEX128 = 15; // complex with float64 real and imaginary components - - // Non-IEEE floating-point format based on IEEE754 single-precision - // floating-point number truncated to 16 bits. - // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. - BFLOAT16 = 16; - - // Future extensions go here. - } - - // The shape of the tensor. - repeated int64 dims = 1; - - // The data type of the tensor. - // This field MUST have a valid TensorProto.DataType value - int32 data_type = 2; - - // For very large tensors, we may want to store them in chunks, in which - // case the following fields will specify the segment that is stored in - // the current TensorProto. - message Segment { - int64 begin = 1; - int64 end = 2; - } - Segment segment = 3; - - // Tensor content must be organized in row-major order. - // - // Depending on the data_type field, exactly one of the fields below with - // name ending in _data is used to store the elements of the tensor. - - // For float and complex64 values - // Complex64 tensors are encoded as a single array of floats, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. - repeated float float_data = 4 [packed = true]; - - // For int32, uint8, int8, uint16, int16, bool, and float16 values - // float16 values must be bit-wise converted to an uint16_t prior - // to writing to the buffer. - // When this field is present, the data_type field MUST be - // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16 - repeated int32 int32_data = 5 [packed = true]; - - // For strings. - // Each element of string_data is a UTF-8 encoded Unicode - // string. No trailing null, no leading BOM. The protobuf "string" - // scalar type is not used to match ML community conventions. - // When this field is present, the data_type field MUST be STRING - repeated bytes string_data = 6; - - // For int64. - // When this field is present, the data_type field MUST be INT64 - repeated int64 int64_data = 7 [packed = true]; - - // Optionally, a name for the tensor. - string name = 8; // namespace Value - - // A human-readable documentation for this tensor. Markdown is allowed. - string doc_string = 12; - - // Serializations can either use one of the fields above, or use this - // raw bytes field. The only exception is the string case, where one is - // required to store the content in the repeated bytes string_data field. - // - // When this raw_data field is used to store tensor value, elements MUST - // be stored in as fixed-width, little-endian order. - // Floating-point data types MUST be stored in IEEE 754 format. - // Complex64 elements must be written as two consecutive FLOAT values, real component first. - // Complex128 elements must be written as two consecutive DOUBLE values, real component first. - // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). - // - // Note: the advantage of specific field rather than the raw_data field is - // that in some cases (e.g. int data), protobuf does a better packing via - // variable length storage, and may lead to smaller binary footprint. - // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED - bytes raw_data = 9; - - // Data can be stored inside the protobuf file using type-specific fields or raw_data. - // Alternatively, raw bytes data can be stored in an external file, using the external_data field. - // external_data stores key-value pairs describing data location. Recognized keys are: - // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX - // protobuf model was stored - // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. - // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. - // - "length" (optional) - number of bytes containing data. Integer stored as string. - // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. - repeated StringStringEntryProto external_data = 13; - - // Location of the data for this tensor. MUST be one of: - // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. - // - EXTERNAL - data stored in an external location as described by external_data field. - enum DataLocation { - DEFAULT = 0; - EXTERNAL = 1; - } - - // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. - DataLocation data_location = 14; - - // For double - // Complex128 tensors are encoded as a single array of doubles, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 - repeated double double_data = 10 [packed = true]; - - // For uint64 and uint32 values - // When this field is present, the data_type field MUST be - // UINT32 or UINT64 - repeated uint64 uint64_data = 11 [packed = true]; -} - -// A serialized sparse-tensor value -message SparseTensorProto { - // The sequence of non-default values are encoded as a tensor of shape [NNZ]. - // The default-value is zero for numeric tensors, and empty-string for string tensors. - TensorProto values = 1; - - // The indices of the non-default values, which may be stored in one of two formats. - // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value - // corresponding to the j-th index of the i-th value (in the values tensor). - // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value - // must be the linearized-index of the i-th value (in the values tensor). - // The linearized-index can be converted into an index tuple (k_1,...,k_rank) - // using the shape provided below. - // The indices must appear in ascending order without duplication. - // In the first format, the ordering is lexicographic-ordering: - // e.g., index-value [1,4] must appear before [2,1] - TensorProto indices = 2; - - // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] - repeated int64 dims = 3; -} - -// Defines a tensor shape. A dimension can be either an integer value -// or a symbolic variable. A symbolic variable represents an unknown -// dimension. -message TensorShapeProto { - message Dimension { - oneof value { - int64 dim_value = 1; - string dim_param = 2; // namespace Shape - }; - // Standard denotation can optionally be used to denote tensor - // dimensions with standard semantic descriptions to ensure - // that operations are applied to the correct axis of a tensor. - // Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition - // for pre-defined dimension denotations. - string denotation = 3; - }; - repeated Dimension dim = 1; -} - -// Types -// -// The standard ONNX data types. -message TypeProto { - - message Tensor { - // This field MUST NOT have the value of UNDEFINED - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - int32 elem_type = 1; - TensorShapeProto shape = 2; - } - - // repeated T - message Sequence { - // The type and optional shape of each element of the sequence. - // This field MUST be present for this version of the IR. - TypeProto elem_type = 1; - }; - - // map - message Map { - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING - int32 key_type = 1; - // This field MUST be present for this version of the IR. - TypeProto value_type = 2; - }; - - oneof value { - // The type of a tensor. - Tensor tensor_type = 1; - - // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values - // as input and output to graphs and nodes. These types are needed to naturally - // support classical ML operators. DNN operators SHOULD restrict their input - // and output types to tensors. - - // The type of a sequence. - Sequence sequence_type = 4; - - // The type of a map. - Map map_type = 5; - - } - - // An optional denotation can be used to denote the whole - // type with a standard semantic description as to what is - // stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition - // for pre-defined type denotations. - string denotation = 6; -} - -// Operator Sets -// -// OperatorSets are uniquely identified by a (domain, opset_version) pair. -message OperatorSetIdProto { - // The domain of the operator set being identified. - // The empty string ("") or absence of this field implies the operator - // set that is defined as part of the ONNX specification. - // This field MUST be present in this version of the IR when referring to any other operator set. - string domain = 1; - - // The version of the operator set being identified. - // This field MUST be present in this version of the IR. - int64 version = 2; -} diff --git a/inc/metadef/inc/common/proto/task.proto b/inc/metadef/inc/common/proto/task.proto deleted file mode 100644 index 0da5631ea..000000000 --- a/inc/metadef/inc/common/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/inc/metadef/inc/common/util/ai_core/aicore_manager/aicore_util_manager.h b/inc/metadef/inc/common/util/ai_core/aicore_manager/aicore_util_manager.h deleted file mode 100644 index df0728e12..000000000 --- a/inc/metadef/inc/common/util/ai_core/aicore_manager/aicore_util_manager.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef AICORE_UTIL_MANAGER_H_ -#define AICORE_UTIL_MANAGER_H_ - -#include -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" - -namespace fe { -class AICoreUtilManager { - public: - static AICoreUtilManager &Instance(); - /* - * to initialize the aicore configuration - * param[in] the options of init - * param[in] engine Name - * param[in] socVersion soc version from ge - * return Status(SUCCESS/FAILED) - */ - Status Initialize(const std::map &options, std::string &soc_version); - - /* - * to release the source of fusion manager - * return Status(SUCCESS/FAILED) - */ - Status Finalize(); - - private: - AICoreUtilManager(); - ~AICoreUtilManager(); - bool is_init_; -}; -} // namespace fe -#endif // AICORE_UTIL_MANAGER_H \ No newline at end of file diff --git a/inc/metadef/inc/common/util/ai_core/common/aicore_util_attr_define.h b/inc/metadef/inc/common/util/ai_core/common/aicore_util_attr_define.h deleted file mode 100644 index 237d9d0e3..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/aicore_util_attr_define.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_ATTR_DEFINE_H_ -#define INC_COMMON_UTILS_AI_CORE_COMMON_ATTR_DEFINE_H_ - -#include - -namespace fe { -static const std::string SCOPE_ID_ATTR = "fusion_scope"; - -static const std::string FE_IMPLY_TYPE = "_fe_imply_type"; - -static const std::string PARENT_OP_TYPE = "parentOpType"; - -static const std::string ATTR_NAME_TASK_L2_FUSION_INFO_EXTEND_PTR = "task_l2_fusion_info_extend_content"; - -static const std::string ATTR_DATA_DUMP_REF = "_datadump_ref"; - -static const std::string ATTR_NAME_L2_FUSION_EXTEND_PTR = "l2_fusion_extend_content"; - -static const std::string L1_OPTIMIZED = "l1_optimized"; - -static const std::string L2_OPTIMIZED = "l2_optimized"; - -static const std::string ATTR_NAME_UNKNOWN_SHAPE = "_unknown_shape"; - -static const std::string ATTR_NAME_IS_UNKNOWN_GRAPH = "_fe_is_unknown_graph"; - -static const std::string ATTR_NAME_IS_UNKNOWN_SHAPE_OP = "_fe_is_unknown_shape_op"; - -static const std::string ATTR_NAME_TVM_CACHE_READ_MODE = "tvm_cache_read_mode"; - -static const std::string ATTR_NAME_TBE_KERNEL_SIZE = "_tbeKernelSize"; -} // namespace fe -#endif diff --git a/inc/metadef/inc/common/util/ai_core/common/aicore_util_constants.h b/inc/metadef/inc/common/util/ai_core/common/aicore_util_constants.h deleted file mode 100644 index 1deced908..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/aicore_util_constants.h +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_CONSTANTS_H_ -#define INC_COMMON_UTILS_AI_CORE_COMMON_CONSTANTS_H_ - -#include - -namespace fe { -static const std::string CORE_TYPE = "_coretype"; -/* engine name of AI core and vector core */ -static const std::string AI_CORE_NAME = "AIcoreEngine"; -static const std::string VECTOR_CORE_NAME = "VectorEngine"; - -static const int64_t IS_UNKNOWN_SHAPE_VALUE = 1; - -static const int64_t SHAPE_UNKNOWN_DIM = -1; - -static const int64_t SHAPE_UNKNOWN_DIM_NUM = -2; - -static const std::string SOC_VERSION_ASCEND310 = "Ascend310"; -static const std::string SOC_VERSION_ASCEND610 = "Ascend610"; -static const std::string SOC_VERSION_ASCEND615 = "Ascend615"; -static const std::string SOC_VERSION_ASCEND710 = "Ascend710"; -static const std::string SOC_VERSION_ASCEND710P = "Ascend710Pro"; -static const std::string SOC_VERSION_ASCEND910A = "Ascend910A"; -static const std::string SOC_VERSION_ASCEND910B = "Ascend910B"; -static const std::string SOC_VERSION_ASCEND910PROA = "Ascend910ProA"; -static const std::string SOC_VERSION_ASCEND910PROB = "Ascend910ProB"; -static const std::string SOC_VERSION_ASCEND910PREMIUMA = "Ascend910PremiumA"; -static const std::string SOC_VERSION_HI3796CV300ES = "Hi3796CV300ES"; -static const std::string SOC_VERSION_HI3796CV300CS = "Hi3796CV300CS"; - -static const std::vector SOC_VERSION_CLOUD_LIST = { - SOC_VERSION_ASCEND910A, SOC_VERSION_ASCEND910B, SOC_VERSION_ASCEND910PROA, - SOC_VERSION_ASCEND910PROB, SOC_VERSION_ASCEND910PREMIUMA -}; - -static const std::vector SOC_VERSION_DC_LIST = {SOC_VERSION_ASCEND610, SOC_VERSION_ASCEND615, - SOC_VERSION_ASCEND710, SOC_VERSION_ASCEND710P}; -} // namespace fe -#endif diff --git a/inc/metadef/inc/common/util/ai_core/common/aicore_util_types.h b/inc/metadef/inc/common/util/ai_core/common/aicore_util_types.h deleted file mode 100644 index eeebb653d..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/aicore_util_types.h +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_TYPES_H_ -#define INC_COMMON_UTILS_AI_CORE_COMMON_TYPES_H_ - -#include "graph/anchor.h" -#include "graph/types.h" -#include "runtime/kernel.h" -#include -#include -#include - -namespace fe { -struct FusionOpSrc { - uint32_t src_op_id; - ge::AnchorPtr src_anchor; - int32_t fusion_src_index; - int32_t fusion_dst_index; -}; - -struct FusionOpDst { - uint32_t dst_op_id; - ge::AnchorPtr dst_anchor; -}; - -struct FusionDataFlow { - std::pair edge; - std::pair node_dataindex_pair; -}; - -typedef struct tag_l2_fusion_data { - uint32_t l2Index; - uint64_t l2Addr; - uint64_t l2PageNum; -} L2FusionData_t; -typedef std::map L2FusionDataMap_t; - -typedef struct tag_fe_sm_desc { - rtL2Ctrl_t l2ctrl; - std::string node_name[8]; - uint8_t output_index[8]; -} fe_sm_desc_t; - -typedef struct TagTaskL2FusionInfo { - std::string node_name; - fe_sm_desc_t l2_info; - L2FusionDataMap_t input; - L2FusionDataMap_t output; - uint32_t is_used; -} TaskL2FusionInfo_t; - -using L2FusionInfoPtr = std::shared_ptr; - -typedef struct ToOpStruct { - int64_t op_l1_space = 0; - std::vector op_l1_fusion_type; - int64_t op_l1_workspace_flag = 0; // for workspace flag - int64_t op_l1_workspace_size = 0; - std::vector> valid_input_shape; - std::vector> valid_output_shape; - std::vector> - slice_input_offset; // conv & pooling & ReadSelect - std::vector> slice_output_offset; // WriteSelect - std::vector total_shape; - uint32_t split_index = 0; - ToOpStruct() { - // set invalid value for essential variable - op_l1_space = -1; - op_l1_workspace_size = -1; - } -} ToOpStruct_t; - -enum SlicePattern { - ELEMENT_WISE = 0, - ELEMENT_WISE_BROADCAST, - BROADCAST, - SLIDING_WINDOW, - SLIDING_WINDOW_DECONV, - CUBE_MATMUL, - SLICE_PATTERN_REDUCE, - SLICE_PATTERN_RESIZE, - SLICE_PATTERN_SCATTER, - SLICE_PATTERN_SEGMENT, - PATTERN_RESERVED -}; - -enum OpImplType { - EN_IMPL_CUSTOM_CONSTANT_CCE = 0, // custom constant op - EN_IMPL_CUSTOM_TIK, // custom tik op - EN_IMPL_CUSTOM_TBE, // custom tbe op - EN_IMPL_HW_CONSTANT_CCE, // Huawei built-in constant op - EN_IMPL_HW_GENERAL_CCE, // Huawei built-in cce op - EN_IMPL_HW_TIK, // Huawei built-in tik op - EN_IMPL_HW_TBE, // Huawei built-in tbe op - EN_IMPL_RL, // RL op - EN_IMPL_PLUGIN_TBE, // Huawei built-in tbe plugin op - EN_IMPL_VECTOR_CORE_HW_TBE, // Huawei built-in tbe op - EN_IMPL_VECTOR_CORE_CUSTOM_TBE, // custom tbe op - EN_IMPL_NON_PERSISTENT_CUSTOM_TBE, // custom tbe op - EN_RESERVED // reserved value -}; - -// Dont change the order, only add new mode in the end -enum L2Mode { EN_L2_CLOSE = 0, EN_L2_BUFFER_OPTIMIZE, EN_L2_CACHE_NORMAL, EN_L2_CACHE_RC }; -enum BufferFusionMode { EN_OPTIMIZE_DISABLE = 0, EN_L2_BUFFER, EN_L2_FUSION}; - -static const std::map DATATYPE_SIZE_MAP{ - {ge::DT_FLOAT, sizeof(float)}, - {ge::DT_FLOAT16, sizeof(int16_t)}, - {ge::DT_INT8, sizeof(int8_t)}, - {ge::DT_INT32, sizeof(int32_t)}, - {ge::DT_UINT8, sizeof(uint8_t)}, - {ge::DT_UINT32, sizeof(uint32_t)}, - {ge::DT_INT16, sizeof(int16_t)}, - {ge::DT_UINT16, sizeof(uint16_t)}, - {ge::DT_INT64, sizeof(int64_t)}, - {ge::DT_UINT64, sizeof(uint64_t)}, - {ge::DT_DOUBLE, sizeof(double)}, - {ge::DT_BOOL, sizeof(bool)}, - {ge::DT_DUAL, sizeof(float) + sizeof(int8_t)}, - {ge::DT_DUAL_SUB_UINT8, sizeof(int8_t)}, - {ge::DT_DUAL_SUB_INT8, sizeof(int8_t)} -}; - -enum OpReduceType { - REDUCE_MEAN = 0, - REDUCE_ADD, - REDUCE_MAX, - REDUCE_MIN, -}; - -} -#endif diff --git a/inc/metadef/inc/common/util/ai_core/common/graph_comm.h b/inc/metadef/inc/common/util/ai_core/common/graph_comm.h deleted file mode 100644 index abde4437a..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/graph_comm.h +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_GRAPH_COMMON_H_ -#define INC_COMMON_UTILS_AI_CORE_COMMON_GRAPH_COMMON_H_ - -#include "graph/compute_graph.h" -#include "common/aicore_util_types.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" - -#include -#include -#include -#include - -namespace fe { - -using k_scope_node_map_t = std::map>; -using k_scope_node_pair_t = std::pair>; - -class GraphCommImpl; -using GraphCommImplPtr = std::unique_ptr; - -class GraphComm { -public: - GraphComm(const string &engine_name); - virtual ~GraphComm(); - GraphComm(const GraphComm &in) = delete; - GraphComm &operator=(const GraphComm &in) = delete; - - Status GetscopeNodeMap(ge::ComputeGraph &graph, k_scope_node_map_t &fusion_map); - - Status CopyFusionOpNodes(vector &fus_input_edge_list, - vector &fus_output_edge_list, - vector &fus_nodelist, - ge::OpDescPtr fusion_op_desc, - ge::ComputeGraphPtr fusion_graph); - - Status CopyFusionOpEdges(ge::OpDescPtr fusion_op_desc, - ge::ComputeGraph &orig_graph, - ge::ComputeGraphPtr fusion_graph); - - Status GetNodeDataFlowMap( - const ge::NodePtr &fus_node, - std::map> - &fusion_op_anchors_map, - ge::kFusionDataFlowVec_t &fus_dataflow_list, const int &map_type); - - Status GetFusionNodeEdgeList(std::vector &fus_nodelist, - std::vector &fus_input_edge_list, - std::vector &fus_output_edge_list); - void ClearFusionSrc(); - - void ClearFusionDst(); - - void - AddFusionOutputSrc(const uint32_t &src_op_id, const ge::AnchorPtr &src_anchor, - const int32_t &fusion_src_index, - std::pair &node_dataindex_pair); - - void AddFusionInputSrc(const uint32_t &src_op_id, - const ge::AnchorPtr &src_anchor, - const int32_t &fusion_dst_index, - std::pair &node_dataindex_pair); - - void SaveFusionDst(const uint32_t &dst_op_id, ge::AnchorPtr dst_anchor); - - bool IsFusionDstExist(const uint32_t &dst_op_id, - const ge::AnchorPtr &dst_anchor); - - bool GetFusionSrc(const uint32_t &src_op_id, const ge::AnchorPtr &src_anchor, - int32_t &fusion_src_index, int32_t &fusion_dst_index); - - Status - GetFusionNodeCtrlEdgeList(vector &fus_nodelist, - vector &fus_input_ctrl_edge_list, - vector &fus_output_ctrl_edge_list); - - Status MergeFusionNodeEdgeList(ge::NodePtr &fus_node, - vector &fus_nodelist, - vector &fus_input_edge_list, - vector &fus_output_edge_list); - - Status MergeFusionNodeCtrlEdgeList(ge::NodePtr &fus_node, - vector &fus_nodelist, - vector &fus_input_edge_list, - vector &fus_output_edge_list); - - string GetEngineName(); - -private: - Status - MergeFusionNodeInputEdgeList(ge::NodePtr fus_node, - std::vector &fus_nodelist, - std::vector &fus_input_edge_list); - Status - MergeFusionNodeOutputEdgeList(ge::NodePtr fus_node, - std::vector &fus_nodelist, - std::vector &fus_output_edge_list); - - string engine_name_; - - std::vector exist_fusion_src_list_; - std::vector exist_fusion_dst_list_; - - // std::vector> - ge::kFusionDataFlowVec_t fusion_input_dataflow_list_; - - // std::vector> - ge::kFusionDataFlowVec_t fusion_output_dataflow_list_; - - GraphCommImplPtr graph_comm_impl_ptr_; -}; -} // namespace fe -#endif diff --git a/inc/metadef/inc/common/util/ai_core/common/json_util.h b/inc/metadef/inc/common/util/ai_core/common/json_util.h deleted file mode 100644 index 5bca3b985..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/json_util.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef PROJECT_JSON_UTIL_H -#define PROJECT_JSON_UTIL_H - -#include "graph/compute_graph.h" - -#include "common/aicore_util_types.h" -#include "fusion_engine/graph_tuner/graph_tuner_errorcode.h" - -const std::string L1_FUSION_EXTEND_CONTENT = "_l1_fusion_extend_content"; -const std::string L2_FUSION_EXTEND_CONTENT = "l2_fusion_extend_content"; -const std::string TASK_L2_FUSION_INFO_EXTEND_CONTENT = "task_l2_fusion_info_extend_content"; -const std::string L1_FUSION_TO_OP_STRUCT = "_l1fusion_ToOpStruct"; -const std::string L2_FUSION_TO_OP_STRUCT = "_l2fusion_ToOpStruct"; -const std::string TASK_L2_FUSION_INFO = "_task_L2FusionInfo"; - -namespace tune { -using ToOpStructPtr = std::shared_ptr; -using L2FusionInfoPtr = std::shared_ptr; - -Status GetL1InfoFromJson(ge::OpDescPtr opDescPtr); - -Status GetL2InfoFromJson(ge::OpDescPtr opDescPtr); - -Status GetTaskL2FusionInfoFromJson(ge::OpDescPtr opDescPtr); - -Status ReadGraphInfoFromJson(ge::ComputeGraph &graph); - -Status WriteGraphInfoToJson(ge::ComputeGraph &graph); - -void GetL2ToOpStructFromJson(ge::OpDescPtr &opDescPtr, ToOpStructPtr &l2InfoPtr); - -void GetL1ToOpStructFromJson(ge::OpDescPtr &opDescPtr, ToOpStructPtr &l1InfoPtr); - -L2FusionInfoPtr GetL2FusionInfoFromJson(ge::OpDescPtr &opDescPtr); - -void SetL2FusionInfoToNode(ge::OpDescPtr &opDescPtr, L2FusionInfoPtr &l2FusionInfoPtr); -} // namespace tune -#endif //PROJECT_JSON_UTIL_H diff --git a/inc/metadef/inc/common/util/ai_core/common/l2_stream_info.h b/inc/metadef/inc/common/util/ai_core/common/l2_stream_info.h deleted file mode 100644 index 8a64eb8e3..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/l2_stream_info.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef L2_STREAM_INFO_H_ -#define L2_STREAM_INFO_H_ - -#include -#include -#include -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" -#include "runtime/base.h" -#include "cce/l2fusion_struct.hpp" - -namespace fe { -class StreamL2Info { - public: - StreamL2Info(const StreamL2Info &) = delete; - StreamL2Info &operator=(const StreamL2Info &) = delete; - static StreamL2Info& Instance(); - Status GetStreamL2Info(rtStream_t stream_id, string node_name, fusion::TaskL2Info_t *&l2_data); - Status SetStreamL2Info(const rtStream_t &stream_id, fusion::TaskL2InfoFEMap_t &l2_alloc_res); - - private: - StreamL2Info(); - ~StreamL2Info(); - mutable std::mutex stream_l2_mutex_; - std::map stream_l2_map_; -}; -} // namespace fe - -#endif // L2_STREAM_INFO_H_ \ No newline at end of file diff --git a/inc/metadef/inc/common/util/ai_core/common/scope_allocator.h b/inc/metadef/inc/common/util/ai_core/common/scope_allocator.h deleted file mode 100644 index e81282b38..000000000 --- a/inc/metadef/inc/common/util/ai_core/common/scope_allocator.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_SCOPE_ALLOCATOR_H_ -#define INC_COMMON_UTILS_AI_CORE_COMMON_SCOPE_ALLOCATOR_H_ - -#include "graph/op_desc.h" - -namespace fe { -class ScopeAllocator { - public: - ScopeAllocator(); - virtual ~ScopeAllocator(); - ScopeAllocator(const ScopeAllocator& in) = delete; - ScopeAllocator& operator = (const ScopeAllocator& in) = delete; - - public: - void Init(); - int64_t GetCurrentScopeId(); - int64_t AllocateScopeId(void); - bool HasScopeAttr(ge::ConstOpDescPtr opdef); - bool GetScopeAttr(ge::ConstOpDescPtr opdef, int64_t &scope_id); - bool SetScopeAttr(ge::OpDescPtr opdef, int64_t scope_id); - bool ResetScopeId(int64_t scope_id); - private: - int64_t scope_id; -}; -} // namespace fe -#endif diff --git a/inc/metadef/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h b/inc/metadef/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h deleted file mode 100644 index 9131b1ba2..000000000 --- a/inc/metadef/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TENSORSIZE_CALCULATOR_H -#define TENSORSIZE_CALCULATOR_H - -#include "graph_optimizer/graph_optimize_register_error_codes.h" - -#include -#include -#include "graph/compute_graph.h" -#include "graph/op_desc.h" - -namespace fe { -class TensorSizeCalculator { - public: - /** - * Calculate the tensor size of input and output of each opdesc - * @param op_desc opdesc object - * @param op_impl_type op impl type - * @return status SUCCESS or FAILED - */ - static Status CalculateOpTensorSize(ge::OpDesc &op_desc); - - private: - static Status CalcInputOpTensorSize(ge::OpDesc &op_desc, - int32_t &output_real_calc_flag); - - static Status CalcOutputOpTensorSize(ge::OpDesc &op_desc, - int32_t &output_real_calc_flag); -}; -} // namespace fe - -#endif // TENSORSIZE_CALCULATOR_H diff --git a/inc/metadef/inc/common/util/compress/compress.h b/inc/metadef/inc/common/util/compress/compress.h deleted file mode 100644 index b702324eb..000000000 --- a/inc/metadef/inc/common/util/compress/compress.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef COMPRESS_H -#define COMPRESS_H - -#include - -enum CmpStatus { - RET_SUCCESS = 0, - RET_ERROR = -1 -}; - -struct CompressConfig { - size_t inputSize; // length of data to compress - size_t engineNum; // how many decompress engines - size_t maxRatio; // how much size of a basic compression block, only 64 supported now (8x: 64 4x: 32) - size_t channel; // channels of L2 or DDR. For load balance - size_t fractalSize; // size of compressing block - bool isTight; // whether compose compressed data tightly - size_t init_offset; -}; - -CmpStatus CompressWeights(char* input, - const CompressConfig& compressConfig, - char* indexs, - char* output, - size_t& compressedLength); - - -#endif // COMPRESS_H diff --git a/inc/metadef/inc/common/util/compress/compress_weight.h b/inc/metadef/inc/common/util/compress/compress_weight.h deleted file mode 100644 index 36521a3a2..000000000 --- a/inc/metadef/inc/common/util/compress/compress_weight.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef COMPRESS_WEIGHT_H -#define COMPRESS_WEIGHT_H - -#include "compress.h" - -const int SHAPE_SIZE_WEIGHT = 4; - -struct CompressOpConfig { - int64_t wShape[SHAPE_SIZE_WEIGHT]; - size_t compressTilingK; - size_t compressTilingN; - struct CompressConfig compressConfig; -}; - -extern "C" CmpStatus CompressWeightsConv2D(const char *const input, - char *const zipBuffer, - char *const infoBuffer, - CompressOpConfig *const param); -#endif // COMPRESS_WEIGHT_H diff --git a/inc/metadef/inc/common/util/error_manager/error_manager.h b/inc/metadef/inc/common/util/error_manager/error_manager.h deleted file mode 100644 index 79546df9a..000000000 --- a/inc/metadef/inc/common/util/error_manager/error_manager.h +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ERROR_MANAGER_H_ -#define ERROR_MANAGER_H_ - -#include -#include -#include -#include -#include -#include - -namespace error_message { -#ifdef __GNUC__ -int32_t FormatErrorMessage(char *str_dst, size_t dst_max, const char *format, ...) __attribute__((format(printf, 3, 4))); -#define TRIM_PATH(x) (((x).find_last_of('/') != std::string::npos) ? (x).substr((x).find_last_of('/') + 1u) : (x)) -#else -int32_t FormatErrorMessage(char *str_dst, size_t dst_max, const char *format, ...); -#define TRIM_PATH(x) (((x).find_last_of('\\') != std::string::npos) ? (x).substr((x).find_last_of('\\') + 1u) : (x)) -#endif -} - -constexpr size_t const LIMIT_PER_MESSAGE = 512U; - -/// -/// @brief Report error message -/// @param [in] key: vector parameter key -/// @param [in] value: vector parameter value -/// -#define REPORT_INPUT_ERROR(error_code, key, value) \ - ErrorManager::GetInstance().ATCReportErrMessage(error_code, key, value) - -/// -/// @brief Report error message -/// @param [in] key: vector parameter key -/// @param [in] value: vector parameter value -/// -#define REPORT_ENV_ERROR(error_code, key, value) \ - ErrorManager::GetInstance().ATCReportErrMessage(error_code, key, value) - -#define REPORT_INNER_ERROR(error_code, fmt, ...) \ -do { \ - std::vector error_string(LIMIT_PER_MESSAGE, '\0'); \ - if (error_message::FormatErrorMessage(error_string.data(), error_string.size(), fmt, ##__VA_ARGS__) > 0) { \ - if (error_message::FormatErrorMessage(error_string.data(), error_string.size(), "%s[FUNC:%s][FILE:%s][LINE:%d]", \ - error_string.data(), &__FUNCTION__[0], TRIM_PATH(std::string(__FILE__)).c_str(), __LINE__) > 0) { \ - (void)ErrorManager::GetInstance().ReportInterErrMessage(error_code, std::string(error_string.data())); \ - } \ - } \ -} while (false) - -#define REPORT_CALL_ERROR REPORT_INNER_ERROR - -namespace error_message { - // first stage - constexpr char const *kInitialize = "INIT"; - constexpr char const *kModelCompile = "COMP"; - constexpr char const *kModelLoad = "LOAD"; - constexpr char const *kModelExecute = "EXEC"; - constexpr char const *kFinalize = "FINAL"; - - // SecondStage - // INITIALIZE - constexpr char const *kParser = "PARSER"; - constexpr char const *kOpsProtoInit = "OPS_PRO"; - constexpr char const *kSystemInit = "SYS"; - constexpr char const *kEngineInit = "ENGINE"; - constexpr char const *kOpsKernelInit = "OPS_KER"; - constexpr char const *kOpsKernelBuilderInit = "OPS_KER_BLD"; - // MODEL_COMPILE - constexpr char const *kPrepareOptimize = "PRE_OPT"; - constexpr char const *kOriginOptimize = "ORI_OPT"; - constexpr char const *kSubGraphOptimize = "SUB_OPT"; - constexpr char const *kMergeGraphOptimize = "MERGE_OPT"; - constexpr char const *kPreBuild = "PRE_BLD"; - constexpr char const *kStreamAlloc = "STM_ALLOC"; - constexpr char const *kMemoryAlloc = "MEM_ALLOC"; - constexpr char const *kTaskGenerate = "TASK_GEN"; - // COMMON - constexpr char const *kOther = "DEFAULT"; - - struct Context { - uint64_t work_stream_id; - std::string first_stage; - std::string second_stage; - std::string log_header; - }; -} - -class ErrorManager { - public: - /// - /// @brief Obtain ErrorManager instance - /// @return ErrorManager instance - /// - static ErrorManager &GetInstance(); - - /// - /// @brief init - /// @return int 0(success) -1(fail) - /// - int32_t Init(); - - /// - /// @brief init - /// @param [in] path: current so path - /// @return int 0(success) -1(fail) - /// - int32_t Init(std::string path); - - int32_t ReportInterErrMessage(std::string error_code, const std::string &error_msg); - - /// - /// @brief Report error message - /// @param [in] error_code: error code - /// @param [in] args_map: parameter map - /// @return int 0(success) -1(fail) - /// - int32_t ReportErrMessage(std::string error_code, const std::map &args_map); - - /// - /// @brief output error message - /// @param [in] handle: print handle - /// @return int 0(success) -1(fail) - /// - int32_t OutputErrMessage(int32_t handle); - - /// - /// @brief output message - /// @param [in] handle: print handle - /// @return int 0(success) -1(fail) - /// - int32_t OutputMessage(int32_t handle); - - std::string GetErrorMessage(); - - std::string GetWarningMessage(); - - /// - /// @brief Report error message - /// @param [in] key: vector parameter key - /// @param [in] value: vector parameter value - /// - void ATCReportErrMessage(std::string error_code, const std::vector &key = {}, - const std::vector &value = {}); - - /// - /// @brief report graph compile failed message such as error code and op_name in mstune case - /// @param [in] graph_name: root graph name - /// @param [in] msg: failed message map, key is error code, value is op_name - /// @return int 0(success) -1(fail) - /// - int32_t ReportMstuneCompileFailedMsg(const std::string &root_graph_name, - const std::map &msg); - - /// - /// @brief get graph compile failed message in mstune case - /// @param [in] graph_name: graph name - /// @param [out] msg_map: failed message map, key is error code, value is op_name list - /// @return int 0(success) -1(fail) - /// - int32_t GetMstuneCompileFailedMsg(const std::string &graph_name, - std::map> &msg_map); - - // @brief generate work_stream_id by current pid and tid, clear error_message stored by same work_stream_id - // used in external api entrance, all sync api can use - void GenWorkStreamIdDefault(); - - // @brief generate work_stream_id by args sessionid and graphid, clear error_message stored by same work_stream_id - // used in external api entrance - void GenWorkStreamIdBySessionGraph(uint64_t session_id, uint64_t graph_id); - - const std::string &GetLogHeader(); - - error_message::Context &GetErrorManagerContext(); - - void SetErrorContext(error_message::Context error_context); - - void SetStage(const std::string &first_stage, const std::string &second_stage); - - private: - struct ErrorInfoConfig { - std::string error_id; - std::string error_message; - std::vector arg_list; - }; - - struct ErrorItem { - std::string error_id; - std::string error_message; - - bool operator==(const ErrorItem &rhs) const { - return (error_id == rhs.error_id) && (error_message == rhs.error_message); - } - }; - - ErrorManager() {} - ~ErrorManager() {} - - ErrorManager(const ErrorManager &) = delete; - ErrorManager(ErrorManager &&) = delete; - ErrorManager &operator=(const ErrorManager &) = delete; - ErrorManager &operator=(ErrorManager &&) = delete; - - int32_t ParseJsonFile(std::string path); - - int32_t ReadJsonFile(const std::string &file_path, void *handle); - - void ClassifyCompileFailedMsg(const std::map &msg, - std::map> &classfied_msg); - - bool IsInnerErrorCode(const std::string &error_code); - - inline bool IsValidErrorCode(const std::string &error_code) { - const uint32_t kErrorCodeValidLength = 6; - return error_code.size() == kErrorCodeValidLength; - } - - std::vector &GetErrorMsgContainerByWorkId(uint64_t work_id); - std::vector &GetWarningMsgContainerByWorkId(uint64_t work_id); - - void ClearErrorMsgContainerByWorkId(uint64_t work_stream_id); - void ClearWarningMsgContainerByWorkId(uint64_t work_stream_id); - - bool is_init_ = false; - std::mutex mutex_; - std::map error_map_; - std::map>> compile_failed_msg_map_; - - std::map> error_message_per_work_id_; - std::map> warning_messages_per_work_id_; - - thread_local static error_message::Context error_context_; -}; - -#endif // ERROR_MANAGER_H_ diff --git a/inc/metadef/inc/common/util/platform_info.h b/inc/metadef/inc/common/util/platform_info.h deleted file mode 100644 index 81358649c..000000000 --- a/inc/metadef/inc/common/util/platform_info.h +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef PLATFORM_INFO_H -#define PLATFORM_INFO_H - -#include -#include -#include "platform_info_def.h" -#include "platform_infos_def.h" - -namespace fe { -class PlatformInfoManager { - public: - PlatformInfoManager(const PlatformInfoManager &) = delete; - PlatformInfoManager &operator=(const PlatformInfoManager &) = delete; - - static PlatformInfoManager &Instance(); - uint32_t InitializePlatformInfo(); - uint32_t Finalize(); - - uint32_t GetPlatformInfo(const std::string SoCVersion, - PlatformInfo &platform_info, - OptionalInfo &opti_compilation_info); - - uint32_t GetPlatformInfoWithOutSocVersion(PlatformInfo &platform_info, - OptionalInfo &opti_compilation_info); - - void SetOptionalCompilationInfo(OptionalInfo &opti_compilation_info); - - uint32_t GetPlatformInfos(const std::string SoCVersion, - PlatFormInfos &platform_info, - OptionalInfos &opti_compilation_info); - - uint32_t GetPlatformInfoWithOutSocVersion(PlatFormInfos &platform_info, - OptionalInfos &opti_compilation_info); - - void SetOptionalCompilationInfo(OptionalInfos &opti_compilation_info); - - private: - PlatformInfoManager(); - ~PlatformInfoManager(); - - uint32_t LoadIniFile(std::string ini_file_real_path); - - void Trim(std::string &str); - - uint32_t LoadConfigFile(std::string real_path); - - std::string RealPath(const std::string &path); - - std::string GetSoFilePath(); - - void ParseVersion(std::map &version_map, - std::string &soc_version, - PlatformInfo &platform_info_temp); - - void ParseSocInfo(std::map &soc_info_map, - PlatformInfo &platform_info_temp); - - void ParseCubeOfAICoreSpec(std::map &ai_core_spec_map, - PlatformInfo &platform_info_temp); - - void ParseBufferOfAICoreSpec(std::map &ai_core_spec_map, - PlatformInfo &platform_info_temp); - - void ParseUBOfAICoreSpec(std::map &ai_core_spec_map, - PlatformInfo &platform_info_temp); - - void ParseUnzipOfAICoreSpec(std::map &ai_core_spec_map, - PlatformInfo &platform_info_temp); - - void ParseAICoreSpec(std::map &ai_core_spec_map, - PlatformInfo &platform_info_temp); - - void ParseBufferOfAICoreMemoryRates(std::map &ai_core_memory_rates_map, - PlatformInfo &platform_info_temp); - - void ParseAICoreMemoryRates(std::map &ai_core_memory_rates_map, - PlatformInfo &platform_info_temp); - - void ParseUBOfAICoreMemoryRates(std::map &ai_core_memory_rates_map, - PlatformInfo &platform_info_temp); - - void ParseAICoreintrinsicDtypeMap(std::map &ai_coreintrinsic_dtype_map, - PlatformInfo &platform_info_temp); - - void ParseVectorCoreSpec(std::map &vector_core_spec_map, - PlatformInfo &platform_info_temp); - - void ParseVectorCoreMemoryRates(std::map &vector_core_memory_rates_map, - PlatformInfo &platform_info_temp); - - void ParseCPUCache(std::map &CPUCacheMap, - PlatformInfo &platform_info_temp); - - void ParseVectorCoreintrinsicDtypeMap(std::map &vector_coreintrinsic_dtype_map, - PlatformInfo &platform_info_temp); - - uint32_t ParsePlatformInfoFromStrToStruct(std::map> &content_info_map, - std::string &soc_version, - PlatformInfo &platform_info_temp); - - void ParseAICoreintrinsicDtypeMap(std::map &ai_coreintrinsic_dtype_map, - PlatFormInfos &platform_info_temp); - - void ParseVectorCoreintrinsicDtypeMap(std::map &vector_coreintrinsic_dtype_map, - PlatFormInfos &platform_info_temp); - - void ParsePlatformRes(const std::string &label, - std::map &platform_res_map, - PlatFormInfos &platform_info_temp); - - uint32_t ParsePlatformInfo(std::map> &content_info_map, - std::string &soc_version, - PlatFormInfos &platform_info_temp); - - uint32_t AssemblePlatformInfoVector(std::map> &content_info_map); - - private: - bool init_flag_; - std::map platform_info_map_; - - OptionalInfo opti_compilation_info_; - - std::map platform_infos_map_; - - OptionalInfos opti_compilation_infos_; -}; -} // namespace fe -#endif diff --git a/inc/metadef/inc/common/util/platform_info_def.h b/inc/metadef/inc/common/util/platform_info_def.h deleted file mode 100644 index 245908e1f..000000000 --- a/inc/metadef/inc/common/util/platform_info_def.h +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef PLATFORM_INFO_DEF_H -#define PLATFORM_INFO_DEF_H - -#include -#include -#include - -using std::map; -using std::vector; -using std::string; - -namespace fe { -enum MemoryType { DDR = 0, HBM }; - -enum L2Type { Cache = 0, Buff }; - -typedef struct tag_str_info { - std::string aic_version; - std::string ccec_aic_version; - std::string ccec_aiv_version; - std::string is_support_ai_cpu_compiler; -} StrInfo; - -typedef struct tag_so_c_info { - uint32_t ai_core_cnt; - uint32_t vector_core_cnt; - uint32_t ai_cpu_cnt; - MemoryType memory_type; - uint64_t memory_size; - L2Type l2_type; - uint64_t l2_size; - uint32_t l2PageNum; -} SoCInfo; - -typedef struct tag_ai_core_spec { - double cube_freq; - uint64_t cube_m_size; - uint64_t cube_n_size; - uint64_t cube_k_size; - uint64_t vec_calc_size; - uint64_t l0_a_size; - uint64_t l0_b_size; - uint64_t l0_c_size; - uint64_t l1_size; - uint64_t smask_buffer; - uint64_t ub_size; - uint64_t ubblock_size; - uint64_t ubbank_size; - uint64_t ubbank_num; - uint64_t ubburst_in_one_block; - uint64_t ubbank_group_num; - uint32_t unzip_engines; - uint32_t unzip_max_ratios; - uint32_t unzip_channels; - uint8_t unzip_is_tight; - uint8_t cube_vector_split; -} AiCoreSpec; - -typedef struct tag_ai_core_memory_rates { - double ddr_rate; - double ddr_read_rate; - double ddr_write_rate; - double l2_rate; - double l2_read_rate; - double l2_write_rate; - double l1_to_l0_a_rate; - double l1_to_l0_b_rate; - double l1_to_ub_rate; - double l0_c_to_ub_rate; - double ub_to_l2_rate; - double ub_to_ddr_rate; - double ub_to_l1_rate; -} AiCoreMemoryRates; - -typedef struct tag_vector_core_spec { - double vec_freq; - uint64_t vec_calc_size; - uint64_t smask_buffer; - uint64_t ub_size; - uint64_t ubblock_size; - uint64_t ubbank_size; - uint64_t ubbank_num; - uint64_t ubburst_in_one_block; - uint64_t ubbank_group_num; - uint64_t vector_reg_size; - uint64_t predicate_reg_size; - uint64_t address_reg_size; - uint64_t alignment_reg_size; -} VectorCoreSpec; - -typedef struct tag_vector_core_memory_rates { - double ddr_rate; - double ddr_read_rate; - double ddr_write_rate; - double l2_rate; - double l2_read_rate; - double l2_write_rate; - double ub_to_l2_rate; - double ub_to_ddr_rate; -} VectorCoreMemoryRates; - -typedef struct tag_cpu_cache { - uint32_t AICPUSyncBySW; - uint32_t TSCPUSyncBySW; -} CPUCache; - -typedef struct tag_platform_info { - StrInfo str_info; - SoCInfo soc_info; - AiCoreSpec ai_core_spec; - AiCoreMemoryRates ai_core_memory_rates; - std::map> ai_core_intrinsic_dtype_map; - VectorCoreSpec vector_core_spec; - VectorCoreMemoryRates vector_core_memory_rates; - CPUCache cpucache; - std::map> vector_core_intrinsic_dtype_map; -} PlatformInfo; - -typedef struct tag_optional_info { - std::string soc_version; - std::string core_type; - uint32_t ai_core_num; - std::string l1_fusion_flag; -} OptionalInfo; -} // namespace fe -#endif diff --git a/inc/metadef/inc/common/util/platform_infos_def.h b/inc/metadef/inc/common/util/platform_infos_def.h deleted file mode 100644 index b0d5e5315..000000000 --- a/inc/metadef/inc/common/util/platform_infos_def.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef PLATFORM_INFOS_DEF_H -#define PLATFORM_INFOS_DEF_H - -#include -#include -#include -#include -#include "platform_info_def.h" - -namespace fe { -class PlatFormInfosImpl; -using PlatFormInfosImplPtr = std::shared_ptr; -class PlatFormInfos { - public: - bool Init(); - std::map> GetAICoreIntrinsicDtype(); - std::map> GetVectorCoreIntrinsicDtype(); - bool GetPlatformRes(const std::string &label, const std::string &key, std::string &val); - - void SetAICoreIntrinsicDtype(std::map> &intrinsic_dtypes); - void SetVectorCoreIntrinsicDtype(std::map> &intrinsic_dtypes); - void SetPlatformRes(const std::string &label, std::map &res); - - private: - PlatFormInfosImplPtr platform_infos_impl_{nullptr}; -}; - -class OptionalInfosImpl; -using OptionalInfosImplPtr = std::shared_ptr; -class OptionalInfos { - public: - bool Init(); - std::string GetSocVersion(); - std::string GetCoreType(); - uint32_t GetAICoreNum(); - std::string GetL1FusionFlag(); - - void SetSocVersion(std::string soc_version); - void SetCoreType(std::string core_type); - void SetAICoreNum(uint32_t ai_core_num); - void SetL1FusionFlag(std::string l1_fusion_flag); - private: - OptionalInfosImplPtr optional_infos_impl_{nullptr}; -}; - -} -#endif diff --git a/inc/metadef/inc/external/graph/ascend_string.h b/inc/metadef/inc/external/graph/ascend_string.h deleted file mode 100644 index 4b86711d0..000000000 --- a/inc/metadef/inc/external/graph/ascend_string.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_ASCEND_STRING_H_ -#define INC_EXTERNAL_GRAPH_ASCEND_STRING_H_ - -#include -#include -#include - -namespace ge { -class AscendString { - public: - AscendString() = default; - - ~AscendString() = default; - - AscendString(const char* name); - - const char* GetString() const; - - size_t Hash() const; - - bool operator<(const AscendString& d) const; - - bool operator>(const AscendString& d) const; - - bool operator<=(const AscendString& d) const; - - bool operator>=(const AscendString& d) const; - - bool operator==(const AscendString& d) const; - - bool operator!=(const AscendString& d) const; - - private: - std::shared_ptr name_; -}; -} // namespace ge - -namespace std { -template <> -struct hash { - size_t operator()(const ge::AscendString &name) const { - return name.Hash(); - } -}; -} -#endif // INC_EXTERNAL_GRAPH_ASCEND_STRING_H_ diff --git a/inc/metadef/inc/external/graph/attr_value.h b/inc/metadef/inc/external/graph/attr_value.h deleted file mode 100644 index 35c0c997c..000000000 --- a/inc/metadef/inc/external/graph/attr_value.h +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_ATTR_VALUE_H_ -#define INC_EXTERNAL_GRAPH_ATTR_VALUE_H_ - -#include -#include -#include -#include - -#include "./ge_error_codes.h" -#include "ascend_string.h" - -using std::make_shared; -using std::map; -using std::pair; -using std::string; -using std::to_string; -using std::unique_ptr; -using std::vector; - -namespace ge { -class AttrValueImpl; -/*lint -e148*/ -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY AttrValue { - public: - using INT = int64_t; - using FLOAT = float; - using STR = std::string; - - AttrValue(); - ~AttrValue() = default; - - // GetValue, not list type - template - graphStatus GetValue(DT &val) const { - T valGet; - auto status = GetValue(valGet); - if (status != GRAPH_SUCCESS) { - return status; - } - val = DT(valGet); - return GRAPH_SUCCESS; - } - - template - static T CreateFrom(DT &&val) { - return val; - } - - graphStatus GetValue(AscendString &val); - - std::shared_ptr impl; - - private: -#define VALUE_SET_GET_DEC(DT) graphStatus GetValue(DT &val) const; - VALUE_SET_GET_DEC(AttrValue::STR) - VALUE_SET_GET_DEC(AttrValue::INT) - VALUE_SET_GET_DEC(AttrValue::FLOAT) -#undef VALUE_SET_GET_DEC -}; -/*lint +e148*/ -} // namespace ge -#endif // INC_EXTERNAL_GRAPH_ATTR_VALUE_H_ diff --git a/inc/metadef/inc/external/graph/ge_error_codes.h b/inc/metadef/inc/external/graph/ge_error_codes.h deleted file mode 100644 index 8e1cd3036..000000000 --- a/inc/metadef/inc/external/graph/ge_error_codes.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_GE_ERROR_CODES_H_ -#define INC_EXTERNAL_GRAPH_GE_ERROR_CODES_H_ - -namespace ge { -#if(defined(HOST_VISIBILITY)) && (defined(__GNUC__)) -#define GE_FUNC_HOST_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_HOST_VISIBILITY -#endif -#if(defined(DEV_VISIBILITY)) && (defined(__GNUC__)) -#define GE_FUNC_DEV_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_DEV_VISIBILITY -#endif -#ifdef __GNUC__ -#define ATTRIBUTED_DEPRECATED(replacement) __attribute__((deprecated("Please use " #replacement " instead."))) -#else -#define ATTRIBUTED_DEPRECATED(replacement) __declspec(deprecated("Please use " #replacement " instead.")) -#endif - -using graphStatus = uint32_t; -const graphStatus GRAPH_FAILED = 0xFFFFFFFF; -const graphStatus GRAPH_SUCCESS = 0; -const graphStatus GRAPH_NOT_CHANGED = 1343242304; -const graphStatus GRAPH_PARAM_INVALID = 50331649; -const graphStatus GRAPH_NODE_WITHOUT_CONST_INPUT = 50331648; -const graphStatus GRAPH_NODE_NEED_REPASS = 50331647; -} // namespace ge - -#endif // INC_EXTERNAL_GRAPH_GE_ERROR_CODES_H_ diff --git a/inc/metadef/inc/external/graph/gnode.h b/inc/metadef/inc/external/graph/gnode.h deleted file mode 100644 index 90f030a73..000000000 --- a/inc/metadef/inc/external/graph/gnode.h +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_NODE_H_ -#define INC_EXTERNAL_GRAPH_NODE_H_ - -#include -#include - -#include "./ge_error_codes.h" -#include "./types.h" -#include "./tensor.h" -#include "./ascend_string.h" - -namespace ge { -class AttrValue; -class GNode; -class OpDesc; -class Graph; -class ComputeGraph; -using GNodePtr = std::shared_ptr; -using GraphPtr = std::shared_ptr; -using OpBytes = std::vector; -using OpDescPtr = std::shared_ptr; -using ComputeGraphPtr = std::shared_ptr; - -class NodeImpl; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GNode { - public: - GNode(); - - ~GNode() = default; - - graphStatus GetType(AscendString &type) const; - - graphStatus GetName(AscendString &name) const; - - std::pair GetInDataNodesAndPortIndexs(const int32_t index) const; - - std::vector GetInControlNodes() const; - - std::vector> GetOutDataNodesAndPortIndexs(const int32_t index) const; - - std::vector GetOutControlNodes() const; - - graphStatus GetInputConstData(const int32_t index, Tensor &data) const; - - graphStatus GetInputIndexByName(const AscendString &name, int32_t &index); - - graphStatus GetOutputIndexByName(const AscendString &name, int32_t &index); - - size_t GetInputsSize() const; - - size_t GetOutputsSize() const; - - graphStatus GetInputDesc(const int32_t index, TensorDesc &tensor_desc) const; - - graphStatus UpdateInputDesc(const int32_t index, const TensorDesc &tensor_desc); - - graphStatus GetOutputDesc(const int32_t index, TensorDesc &tensor_desc) const; - - graphStatus UpdateOutputDesc(const int32_t index, const TensorDesc &tensor_desc); - - graphStatus GetAttr(const AscendString &name, int64_t &attr_value) const; - graphStatus GetAttr(const AscendString &name, int32_t &attr_value) const; - graphStatus GetAttr(const AscendString &name, uint32_t &attr_value) const; - graphStatus GetAttr(const AscendString &name, float &attr_value) const; - graphStatus GetAttr(const AscendString &name, AscendString &attr_value) const; - graphStatus GetAttr(const AscendString &name, bool &attr_value) const; - graphStatus GetAttr(const AscendString &name, Tensor &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_values) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, OpBytes &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector> &attr_value) const; - graphStatus GetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus GetAttr(const AscendString &name, ge::DataType &attr_value) const; - graphStatus GetAttr(const AscendString &name, AttrValue &attr_value) const; - - graphStatus SetAttr(const AscendString &name, int64_t &attr_value) const; - graphStatus SetAttr(const AscendString &name, int32_t &attr_value) const; - graphStatus SetAttr(const AscendString &name, uint32_t &attr_value) const; - graphStatus SetAttr(const AscendString &name, float &attr_value) const; - graphStatus SetAttr(const AscendString &name, AscendString &attr_value) const; - graphStatus SetAttr(const AscendString &name, bool &attr_value) const; - graphStatus SetAttr(const AscendString &name, Tensor &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_values) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, OpBytes &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector> &attr_value) const; - graphStatus SetAttr(const AscendString &name, std::vector &attr_value) const; - graphStatus SetAttr(const AscendString &name, ge::DataType &attr_value) const; - graphStatus SetAttr(const AscendString &name, AttrValue &attr_value) const; - - bool HasAttr(const AscendString &name); - - graphStatus GetSubgraph(uint32_t index, GraphPtr &graph) const; - - graphStatus GetALLSubgraphs(std::vector &graph_list) const; - - private: - std::shared_ptr impl_; - friend class NodeAdapter; -}; -} // namespace ge - -#endif // INC_EXTERNAL_GRAPH_NODE_H_ diff --git a/inc/metadef/inc/external/graph/graph.h b/inc/metadef/inc/external/graph/graph.h deleted file mode 100644 index 35a4b61fb..000000000 --- a/inc/metadef/inc/external/graph/graph.h +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_GRAPH_H_ -#define INC_EXTERNAL_GRAPH_GRAPH_H_ - -#include -#include -#include -#include - -#include "./operator.h" -#include "./gnode.h" - -namespace ge { -class Graph; -class GraphImpl; - -using GraphImplPtr = std::shared_ptr; -using GraphPtr = std::shared_ptr; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Graph { - friend class GraphUtils; - - public: - ATTRIBUTED_DEPRECATED(Graph(const char *)) - explicit Graph(const std::string &name); - - explicit Graph(const char *name); - - Graph() = default; - - ~Graph() = default; - - Graph &SetInputs(const std::vector &inputs); - - Graph &SetOutputs(const std::vector &outputs); - - Graph &SetOutputs(const std::vector>> &output_indexs); - - ATTRIBUTED_DEPRECATED(Graph &SetOutputs(const std::vector> &outputs); - - Graph &SetOutputs(const std::vector> &outputs); - - Graph &SetTargets(const std::vector &targets); - - bool IsValid() const; - - graphStatus AddOp(const ge::Operator &op); - - ATTRIBUTED_DEPRECATED(graphStatus FindOpByName(const char *, ge::Operator &)) - graphStatus FindOpByName(const std::string &name, ge::Operator &op) const; - - graphStatus FindOpByName(const char *name, ge::Operator &op) const; - - ATTRIBUTED_DEPRECATED(graphStatus FindOpByType(const char *, std::vector &)) - graphStatus FindOpByType(const std::string &type, std::vector &ops) const; - - graphStatus FindOpByType(const char *type, std::vector &ops) const; - - ATTRIBUTED_DEPRECATED(graphStatus GetAllOpName(std::vector &) const) - graphStatus GetAllOpName(std::vector &op_name) const; - - graphStatus GetAllOpName(std::vector &names) const; - - ATTRIBUTED_DEPRECATED(graphStatus SaveToFile(const char *file_name) const) - graphStatus SaveToFile(const std::string &file_name) const; - - graphStatus SaveToFile(const char *file_name) const; - - ATTRIBUTED_DEPRECATED(graphStatus LoadFromFile(const char *)) - graphStatus LoadFromFile(const std::string &file_name); - - graphStatus LoadFromFile(const char *file_name); - - ATTRIBUTED_DEPRECATED(graphStatus GetName(AscendString &) const) - const std::string &GetName() const; - - graphStatus GetName(AscendString &name) const; - - /// - /// Set is need train iteration. - /// If set true, it means this graph need to be run iteration some - /// times(according variant "npu_runconfig/iterations_per_loop"). - /// @param need_iteration need_iteration:whether to set iteration or not - /// - void SetNeedIteration(bool need_iteration); - - std::vector GetAllNodes() const; - - std::vector GetDirectNode () const; - - graphStatus RemoveNode(GNode &node); - - graphStatus RemoveNode(GNode &node, bool contain_subgraph); - - graphStatus RemoveEdge(GNode &src_node, const int32_t src_port_index, GNode &dst_node, const int32_t dst_port_index); - - GNode AddNodeByOp(const Operator &op); - - graphStatus AddDataEdge(GNode &src_node, const int32_t src_port_index, - GNode &dst_node, const int32_t dst_port_index); - - graphStatus AddControlEdge(GNode &src_node, GNode &dst_node); - - graphStatus CopyFrom(const Graph &src_graph); - - static GraphPtr ConstructFromInputs(const std::vector &inputs, const AscendString &name); - - private: - - GraphImplPtr impl_{nullptr}; -}; -} // namespace ge - -#endif // INC_EXTERNAL_GRAPH_GRAPH_H_ diff --git a/inc/metadef/inc/external/graph/inference_context.h b/inc/metadef/inc/external/graph/inference_context.h deleted file mode 100644 index b8704dd42..000000000 --- a/inc/metadef/inc/external/graph/inference_context.h +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_INFERENCE_CONTEXT_H_ -#define INC_EXTERNAL_GRAPH_INFERENCE_CONTEXT_H_ - -#include -#include -#include -#include - -#include "./tensor.h" -#include "./types.h" -#include "ascend_string.h" -#include "resource_context.h" - -namespace ge { -class InferenceContext; -using InferenceContextPtr = std::shared_ptr; - -class ShapeAndTypeImpl; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ShapeAndType { - public: - ShapeAndType(); - ~ShapeAndType() = default; - - ShapeAndType(const Shape &shape, DataType dataType); - - void SetShape(const Shape &shape); - - void SetType(DataType dataType); - - Shape GetShape() const; - - DataType GetDataType() const; - - private: - std::shared_ptr shape_and_type_impl_; -}; - -struct InnerInferenceContext; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferenceContext { - public: - ~InferenceContext() = default; - InferenceContext(const InferenceContext &context) = delete; - InferenceContext(const InferenceContext &&context) = delete; - InferenceContext &operator=(const InferenceContext &context) = delete; - InferenceContext &operator=(const InferenceContext &&context) = delete; - - void SetInputHandleShapesAndTypes(std::vector> &&shapes_and_types); - const std::vector> &GetInputHandleShapesAndTypes() const; - const std::vector> &GetOutputHandleShapesAndTypes() const; - void SetOutputHandleShapesAndTypes(const std::vector> &shapes_and_types); - void SetOutputHandleShapesAndTypes(std::vector> &&shapes_and_types); - - ATTRIBUTED_DEPRECATED(void SetMarks(const std::vector &)) - void SetMarks(const std::vector &marks); - void SetMarks(const std::vector &marks); - - - ATTRIBUTED_DEPRECATED(void GetMarks(std::vector &) const) - const std::vector &GetMarks() const; - void GetMarks(std::vector &marks) const; - - static std::unique_ptr Create(void *resource_context_mgr = nullptr); - /** - * Get corresponding resource_context by key - * For resource op infershape, invoked by op infer_func. - * @param key - * @return corresponding resource context. Check not null before use it. - */ - ResourceContext *GetResourceContext(const ge::AscendString &key); - - /** - * Set corresponding resource_context by key. For node which will write to resource. - * For resource op infershape, invoked by write_op infer_func. - * @param key - * @param resource_context pointer. - * @return status - */ - graphStatus SetResourceContext(const ge::AscendString &key, ResourceContext *resource_context); - /** - * Register resource key relied on. For node which will read from resource. - * For resource op infershape, invoked by read_op infer_func. - * @param key - * @return status - */ - graphStatus RegisterReliedOnResourceKey(const ge::AscendString &key); - - /** - * During infershape of write op, if resource shape changed, use this to tell. - * For resource op infershape, invoked by write_op infer_func. - * @param key - * @return status - */ - graphStatus AddChangedResourceKey(const ge::AscendString &key); - - /** - * After read_op infershaped, can get resource_keys relied on. - * For resource op infershape, invoked by ge infershape framework. - * @param keys - * @return status - */ - const std::set& GetReliedOnResourceKeys() const; - - /** - * After infershape of write op, ge can get resource_key which shape changed. - * For resource op infershape, invoked by ge infershape framework. - * @return keys - */ - const std::set& GetChangedResourceKeys() const; - /** - * After handle changed resource shape, should clear changed_keys in context. - * For resource op infershape, invoked by ge infershape framework. - */ - void ClearChangedResourceKeys(); - - private: - explicit InferenceContext(std::unique_ptr &inner_context); - std::shared_ptr inner_inference_context_; -}; -} // namespace ge -#endif // INC_EXTERNAL_GRAPH_INFERENCE_CONTEXT_H_ diff --git a/inc/metadef/inc/external/graph/operator.h b/inc/metadef/inc/external/graph/operator.h deleted file mode 100644 index 5ac8dd73f..000000000 --- a/inc/metadef/inc/external/graph/operator.h +++ /dev/null @@ -1,459 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_OPERATOR_H_ -#define INC_EXTERNAL_GRAPH_OPERATOR_H_ - -#include -#include -#include -#include -#include - -#include "./ge_error_codes.h" -#include "./inference_context.h" -#include "./tensor.h" - -#ifndef USER_GE_LOGI -#define USER_GE_LOGI(...) -#endif // USER_GE_LOGI - -#ifndef USER_GE_LOGW -#define USER_GE_LOGW(...) -#endif // USER_GE_LOGW - -#ifndef USER_GE_LOGE -#define USER_GE_LOGE(...) -#endif // USER_GE_LOGE - -#define DYNAMIC_OUTPUT_TD_NUM(name) ("__dynamic_output_" + name + "_cnt") -#define DYNAMIC_INPUT_TD_NUM(name) ("__dynamic_input_" + name + "_cnt") - -namespace ge { -class Operator; -class OperatorImpl; -class NodeUtils; -class NamedAttrs; -class Graph; -class AttrValue; -class Node; - -using SubgraphBuilder = std::function; -using OperatorImplPtr = std::shared_ptr; -using OperatorPtr = std::shared_ptr; - -class OpIO; -using OutHandler = std::shared_ptr; -using InHandler = std::shared_ptr; - -using std::function; -using std::shared_ptr; -using std::string; - -/*lint -e148*/ -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator { - public: - friend class OperatorImpl; - friend class GraphBuilderImpl; - friend class NodeUtils; - friend class OpDescUtils; - friend class GraphUtils; - - using OpInt = int64_t; - using OpFloat = float; - using OpString = string; - using OpAscendString = AscendString; - using OpBool = bool; - using OpTensor = Tensor; - using OpType = ge::DataType; - using OpNamedAttrs = ge::NamedAttrs; - using OpListInt = std::vector; - using OpListFloat = std::vector; - using OpListString = std::vector; - using OpListAcendString = std::vector; - using OpListBool = std::vector; - using OpListTensor = std::vector; - using OpBytes = std::vector; - using OpListListInt = std::vector>; - using OpListType = std::vector; - using OpListNamedAttrs = std::vector; - - Operator() {} - ATTRIBUTED_DEPRECATED(Operator(const char *)) - explicit Operator(const std::string &type); - - explicit Operator(const char *type); - - ATTRIBUTED_DEPRECATED(Operator(const char *, const char *)) - Operator(const std::string &name, const std::string &type); - - Operator(const AscendString &name, const AscendString &type); - - Operator(const char *name, const char *type); - - virtual ~Operator() = default; - - bool IsEmpty() const; - - ATTRIBUTED_DEPRECATED(graphStatus GetName(AscendString &) const) - std::string GetName() const; - - graphStatus GetName(AscendString &name) const; - - ATTRIBUTED_DEPRECATED(graphStatus GetOpType(AscendString &) const) - std::string GetOpType() const; - - graphStatus GetOpType(AscendString &type) const; - - // Only has one output index = 0 - ATTRIBUTED_DEPRECATED(Operator &SetInput(const char *, const Operator &)) - Operator &SetInput(const std::string &dst_name, const Operator &src_oprt); - - Operator &SetInput(const char *dst_name, const Operator &src_oprt); - - ATTRIBUTED_DEPRECATED(Operator &SetInput(const char *, const Operator &, const char *)) - Operator &SetInput(const std::string &dst_name, const Operator &src_oprt, const std::string &name); - - Operator &SetInput(const char *dst_name, const Operator &src_oprt, const char *name); - - ATTRIBUTED_DEPRECATED(Operator &SetInput(const char *, const Operator &, uint32_t)) - Operator &SetInput(const std::string &dst_name, const Operator &src_oprt, uint32_t index); - - Operator &SetInput(const char *dst_name, const Operator &src_oprt, uint32_t index); - - Operator &SetInput(uint32_t dst_index, const Operator &src_oprt, uint32_t src_index); - - Operator &AddControlInput(const Operator &src_oprt); - - ATTRIBUTED_DEPRECATED(graphStatus GetInputConstData(const char *, Tensor &) const) - graphStatus GetInputConstData(const std::string &dst_name, Tensor &data) const; - - graphStatus GetInputConstData(const char *dst_name, Tensor &data) const; - - ATTRIBUTED_DEPRECATED(TensorDesc GetInputDescByName(const char *) const) - TensorDesc GetInputDesc(const std::string &name) const; - - TensorDesc GetInputDescByName(const char *name) const; - - TensorDesc GetInputDesc(uint32_t index) const; - - ATTRIBUTED_DEPRECATED(int GetDynamicOutputNum(const char *) const) - int GetDynamicOutputNum(const std::string &name) const; - - int GetDynamicOutputNum(const char *name) const; - - ATTRIBUTED_DEPRECATED(int GetDynamicInputNum(const char *)) - int GetDynamicInputNum(const std::string &name) const; - - int GetDynamicInputNum(const char *name) const; - - ATTRIBUTED_DEPRECATED(graphStatus TryGetInputDesc(const char *, TensorDesc &) const) - graphStatus TryGetInputDesc(const std::string &name, TensorDesc &tensor_desc) const; - - graphStatus TryGetInputDesc(const char *name, TensorDesc &tensor_desc) const; - - ATTRIBUTED_DEPRECATED(graphStatus UpdateInputDesc(const char *, const TensorDesc &)) - graphStatus UpdateInputDesc(const std::string &name, const TensorDesc &tensor_desc); - - graphStatus UpdateInputDesc(const char *name, const TensorDesc &tensor_desc); - - ATTRIBUTED_DEPRECATED(TensorDesc GetOutputDescByName(const char *) const) - TensorDesc GetOutputDesc(const std::string &name) const; - - TensorDesc GetOutputDescByName(const char *name) const; - - TensorDesc GetOutputDesc(uint32_t index) const; - - ATTRIBUTED_DEPRECATED(graphStatus UpdateOutputDesc(const char *, const TensorDesc &tensor_desc)) - graphStatus UpdateOutputDesc(const std::string &name, const TensorDesc &tensor_desc); - - graphStatus UpdateOutputDesc(const char *name, const TensorDesc &tensor_desc); - - ATTRIBUTED_DEPRECATED(TensorDesc GetDynamicInputDesc(const char *, uint32_t) const) - TensorDesc GetDynamicInputDesc(const std::string &name, uint32_t index) const; - - TensorDesc GetDynamicInputDesc(const char *name, uint32_t index) const; - - ATTRIBUTED_DEPRECATED(graphStatus UpdateDynamicInputDesc(const char *, uint32_t, const TensorDesc &)) - graphStatus UpdateDynamicInputDesc(const std::string &name, uint32_t index, const TensorDesc &tensor_desc); - - graphStatus UpdateDynamicInputDesc(const char *name, uint32_t index, const TensorDesc &tensor_desc); - - ATTRIBUTED_DEPRECATED(TensorDesc GetDynamicOutputDesc(const char *, uint32_t) const) - TensorDesc GetDynamicOutputDesc(const std::string &name, uint32_t index) const; - - TensorDesc GetDynamicOutputDesc(const char *name, uint32_t index) const; - - ATTRIBUTED_DEPRECATED(graphStatus UpdateDynamicOutputDesc(const char *, uint32_t, const TensorDesc &)) - graphStatus UpdateDynamicOutputDesc(const std::string &name, uint32_t index, const TensorDesc &tensor_desc); - - graphStatus UpdateDynamicOutputDesc(const char *name, uint32_t index, const TensorDesc &tensor_desc); - - graphStatus InferShapeAndType(); // lint !e148 - - void SetInferenceContext(const InferenceContextPtr &inference_context); - InferenceContextPtr GetInferenceContext() const; - - graphStatus VerifyAllAttr(bool disable_common_verifier = false); // lint !e148 - - size_t GetInputsSize() const; - - size_t GetOutputsSize() const; - - ATTRIBUTED_DEPRECATED(graphStatus GetAllAttrNamesAndTypes(std::map &) const) - const std::map GetAllAttrNamesAndTypes() const; - - graphStatus GetAllAttrNamesAndTypes(std::map &attr_name_types) const; - - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, int64_t)) - Operator &SetAttr(const std::string &name, int64_t attr_value); - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, int32_t)) - Operator &SetAttr(const std::string &name, int32_t attr_value); - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, uint32_t)) - Operator &SetAttr(const std::string &name, uint32_t attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, int64_t &) const) - graphStatus GetAttr(const std::string &name, int64_t &attr_value) const; - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, int32_t &) const) - graphStatus GetAttr(const std::string &name, int32_t &attr_value) const; - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, uint32_t &) const) - graphStatus GetAttr(const std::string &name, uint32_t &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, std::initializer_list &&)) - Operator &SetAttr(const std::string &name, std::initializer_list &&attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *name, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *name, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const std::string &, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, float attr_value)) - Operator &SetAttr(const std::string &name, float attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, float &) const) - graphStatus GetAttr(const std::string &name, float &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, AttrValue &&)) - Operator &SetAttr(const std::string &name, AttrValue &&attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, AttrValue &) const) - graphStatus GetAttr(const std::string &name, AttrValue &attr_value) const; - Operator &SetAttr(const std::string &name, const std::string &attr_value); - graphStatus GetAttr(const std::string &name, std::string &attr_value) const; - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, bool)) - Operator &SetAttr(const std::string &name, bool attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, bool &) const) - graphStatus GetAttr(const std::string &name, bool &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const Tensor &)) - Operator &SetAttr(const std::string &name, const Tensor &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, Tensor &) const) - graphStatus GetAttr(const std::string &name, Tensor &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - - // Bytes type - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const OpBytes &)) - Operator &SetAttr(const std::string &name, const OpBytes &attr_value); - // Bytes type - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, OpBytes &) const) - graphStatus GetAttr(const std::string &name, OpBytes &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector> &)) - Operator &SetAttr(const std::string &name, const std::vector> &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, std::vector> &) const) - graphStatus GetAttr(const std::string &name, std::vector> &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const ge::DataType &)) - Operator &SetAttr(const std::string &name, const ge::DataType &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, ge::DataType &) const) - graphStatus GetAttr(const std::string &name, ge::DataType &attr_value) const; - - // func type - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const ge::NamedAttrs &)) - Operator &SetAttr(const std::string &name, const ge::NamedAttrs &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, ge::NamedAttrs &) const) - graphStatus GetAttr(const std::string &name, ge::NamedAttrs &attr_value) const; - ATTRIBUTED_DEPRECATED(Operator &SetAttr(const char *, const std::vector &)) - Operator &SetAttr(const std::string &name, const std::vector &attr_value); - ATTRIBUTED_DEPRECATED(graphStatus GetAttr(const char *, std::vector &) const) - graphStatus GetAttr(const std::string &name, std::vector &attr_value) const; - - Operator &SetAttr(const char *name, int64_t attr_value); - Operator &SetAttr(const char *name, int32_t attr_value); - Operator &SetAttr(const char *name, uint32_t attr_value); - graphStatus GetAttr(const char *name, int64_t &attr_value) const; - graphStatus GetAttr(const char *name, int32_t &attr_value) const; - graphStatus GetAttr(const char *name, uint32_t &attr_value) const; - Operator &SetAttr(const char *name, const std::vector &attr_value); - Operator &SetAttr(const char *name, const std::vector &attr_value); - Operator &SetAttr(const char *name, const std::vector &attr_value); - Operator &SetAttr(const char *name, std::initializer_list &&attr_value); - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - - Operator &SetAttr(const char *name, float attr_value); - graphStatus GetAttr(const char *name, float &attr_value) const; - Operator &SetAttr(const char *name, const std::vector &attr_value); - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - Operator &SetAttr(const char *name, AttrValue &&attr_value); - graphStatus GetAttr(const char *name, AttrValue &attr_value) const; - - Operator &SetAttr(const char *name, const char *attr_value); - Operator &SetAttr(const char *name, const AscendString &attr_value); - graphStatus GetAttr(const char *name, AscendString &attr_value) const; - Operator &SetAttr(const char *name, const std::vector &attr_values); - graphStatus GetAttr(const char *name, std::vector &attr_values) const; - - Operator &SetAttr(const char *name, bool attr_value); - graphStatus GetAttr(const char *name, bool &attr_value) const; - Operator &SetAttr(const char *name, const std::vector &attr_value); - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - - Operator &SetAttr(const char *name, const Tensor &attr_value); - graphStatus GetAttr(const char *name, Tensor &attr_value) const; - Operator &SetAttr(const char *name, const std::vector &attr_value); - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - - // Bytes type - Operator &SetAttr(const char *name, const OpBytes &attr_value); - // Bytes type - graphStatus GetAttr(const char *name, OpBytes &attr_value) const; - - Operator &SetAttr(const char *name, const std::vector> &attr_value); - graphStatus GetAttr(const char *name, std::vector> &attr_value) const; - - Operator &SetAttr(const char *name, const std::vector &attr_value); - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - - Operator &SetAttr(const char *name, const ge::DataType &attr_value); - graphStatus GetAttr(const char *name, ge::DataType &attr_value) const; - - // func type - Operator &SetAttr(const char *name, const ge::NamedAttrs &attr_value); - graphStatus GetAttr(const char *name, ge::NamedAttrs &attr_value) const; - Operator &SetAttr(const char *name, const std::vector &attr_value); - graphStatus GetAttr(const char *name, std::vector &attr_value) const; - - void BreakConnect() const; - - size_t GetSubgraphNamesCount() const; - ATTRIBUTED_DEPRECATED(graphStatus GetSubgraphNames(std::vector &) const) - std::vector GetSubgraphNames() const; - graphStatus GetSubgraphNames(std::vector &names) const; - ATTRIBUTED_DEPRECATED(SubgraphBuilder GetSubgraphBuilder(const char *) const) - SubgraphBuilder GetSubgraphBuilder(const std::string &name) const; - SubgraphBuilder GetSubgraphBuilder(const char *name) const; - ATTRIBUTED_DEPRECATED(Graph GetSubgraph(const char *) const) - Graph GetSubgraph(const std::string &name) const; - Graph GetSubgraph(const char *name) const; - ATTRIBUTED_DEPRECATED(SubgraphBuilder GetDynamicSubgraphBuilder(const char *, uint32_t) const) - SubgraphBuilder GetDynamicSubgraphBuilder(const std::string &name, uint32_t index) const; - SubgraphBuilder GetDynamicSubgraphBuilder(const char *name, uint32_t index) const; - ATTRIBUTED_DEPRECATED(Graph GetDynamicSubgraph(const char *, uint32_t) const) - Graph GetDynamicSubgraph(const std::string &name, uint32_t index) const; - Graph GetDynamicSubgraph(const char *name, uint32_t index) const; - - protected: - void AttrRegister(const std::string &name, float attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, int64_t attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, const std::string &attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, bool attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, const Tensor &attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, const OpBytes &attr_value); - void AttrRegister(const std::string &name, const std::vector> &attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, const ge::DataType &attr_value); - void AttrRegister(const std::string &name, const ge::NamedAttrs &attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - void AttrRegister(const std::string &name, const AscendString &attr_value); - void AttrRegister(const std::string &name, const std::vector &attr_value); - - explicit Operator(OperatorImplPtr &&op_impl); - - void InputRegister(const std::string &name); - - void OptionalInputRegister(const std::string &name); - - void InferFuncRegister(const std::function &func); - - void VerifierFuncRegister(const std::function &func); - - void InferFormatFuncRegister(const std::function &func); - - void OutputRegister(const std::string &name); - - void DynamicInputRegister(const std::string &name, const unsigned int num, bool is_push_back = true); - - void DynamicInputRegisterByIndex(const std::string &name, const unsigned int num, size_t index); - - void DynamicOutputRegister(const std::string &name, const unsigned int num, bool is_push_back = true); - - void RequiredAttrRegister(const std::string &name); - - graphStatus VerifyAll(); - - // Only has one output index = 0 - Operator &SetInput(const std::string &dst_name, uint32_t dst_index, - const Operator &src_oprt); - - Operator &SetInput(const std::string &dst_name, uint32_t dst_index, const Operator &src_oprt, - const std::string &name); - - void SubgraphRegister(const std::string &ir_name, bool dynamic); - void SubgraphCountRegister(const std::string &ir_name, uint32_t count); - void SetSubgraphBuilder(const std::string &ir_name, uint32_t index, const SubgraphBuilder &builder); - Graph GetSubgraphImpl(const std::string &name) const; - - private: - Operator &SetInput(const std::string &dst_name, const OutHandler &out_handler); - - OutHandler GetOutput(const std::string &name) const; - - OutHandler GetOutput(uint32_t index) const; - - OperatorImplPtr GetOperatorImplPtr() const; - - OperatorImplPtr operator_impl_{nullptr}; - - graphStatus GetInputConstDataOut(const std::string &dst_name, Tensor &data) const; - - std::shared_ptr GetNode() const; -}; -/*lint +e148*/ -} // namespace ge - -#endif // INC_EXTERNAL_GRAPH_OPERATOR_H_ diff --git a/inc/metadef/inc/external/graph/operator_factory.h b/inc/metadef/inc/external/graph/operator_factory.h deleted file mode 100644 index a75cf25e4..000000000 --- a/inc/metadef/inc/external/graph/operator_factory.h +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_OPERATOR_FACTORY_H_ -#define INC_EXTERNAL_GRAPH_OPERATOR_FACTORY_H_ - -#include -#include -#include -#include - -#include "./operator.h" -#include "./ge_error_codes.h" - -namespace ge { -using OpCreator = std::function; -using OpCreatorV2 = std::function; -using InferShapeFunc = std::function; -using InferFormatFunc = std::function; -using InferValueRangeFunc = std::function; -using VerifyFunc = std::function; - -enum WHEN_CALL { - INPUT_IS_DYNAMIC = 0, - INPUT_HAS_VALUE_RANGE = 1 -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY OperatorFactory { - public: - ATTRIBUTED_DEPRECATED(static Operator CreateOperator(const char *, const char *)) - static Operator CreateOperator(const std::string &operator_name, const std::string &operator_type); - - static Operator CreateOperator(const char *operator_name, const char *operator_type); - - ATTRIBUTED_DEPRECATED(graphStatus GetOpsTypeList(std::vector &)) - static graphStatus GetOpsTypeList(std::vector &all_ops); - - static graphStatus GetOpsTypeList(std::vector &all_ops); - - ATTRIBUTED_DEPRECATED(bool IsExistOp(const char *)) - static bool IsExistOp(const std::string &operator_type); - - static bool IsExistOp(const char *operator_type); -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY OperatorCreatorRegister { - public: - ATTRIBUTED_DEPRECATED(OperatorCreatorRegister(const char *, OpCreatorV2 const &)) - OperatorCreatorRegister(const std::string &operator_type, OpCreator const &op_creator); - OperatorCreatorRegister(const char *operator_type, OpCreatorV2 const &op_creator); - ~OperatorCreatorRegister() = default; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferShapeFuncRegister { - public: - ATTRIBUTED_DEPRECATED(InferShapeFuncRegister(const char *, const InferShapeFunc &)) - InferShapeFuncRegister(const std::string &operator_type, const InferShapeFunc &infer_shape_func); - InferShapeFuncRegister(const char *operator_type, const InferShapeFunc &infer_shape_func); - ~InferShapeFuncRegister() = default; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferFormatFuncRegister { - public: - ATTRIBUTED_DEPRECATED(InferFormatFuncRegister(const char *, const InferFormatFunc &)) - InferFormatFuncRegister(const std::string &operator_type, const InferFormatFunc &infer_format_func); - InferFormatFuncRegister(const char *operator_type, const InferFormatFunc &infer_format_func); - ~InferFormatFuncRegister() = default; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferValueRangeFuncRegister { - public: - InferValueRangeFuncRegister(const char *operator_type, WHEN_CALL when_call, - const InferValueRangeFunc &infer_value_range_func); - InferValueRangeFuncRegister(const char *operator_type); - ~InferValueRangeFuncRegister() = default; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY VerifyFuncRegister { - public: - ATTRIBUTED_DEPRECATED(VerifyFuncRegister(const char *, const VerifyFunc &)) - VerifyFuncRegister(const std::string &operator_type, const VerifyFunc &verify_func); - VerifyFuncRegister(const char *operator_type, const VerifyFunc &verify_func); - ~VerifyFuncRegister() = default; -}; -} // namespace ge - -#endif // INC_EXTERNAL_GRAPH_OPERATOR_FACTORY_H_ diff --git a/inc/metadef/inc/external/graph/operator_reg.h b/inc/metadef/inc/external/graph/operator_reg.h deleted file mode 100644 index 9887f8dca..000000000 --- a/inc/metadef/inc/external/graph/operator_reg.h +++ /dev/null @@ -1,561 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_OPERATOR_REG_H_ -#define INC_EXTERNAL_GRAPH_OPERATOR_REG_H_ - -#include -#include -#include -#include - -#include "graph/operator.h" -#include "graph/operator_factory.h" -#include "graph/tensor.h" -#include "graph/types.h" -#include "graph/graph.h" - -namespace ge { -using std::function; -using std::string; -using std::vector; - -#define ATTR_String(x, ...) \ - graphStatus get_attr_##x(AscendString &ret) const { \ - string ret_str = __VA_ARGS__; \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - ret = AscendString(ret_str.c_str()); \ - } \ - return GRAPH_SUCCESS; \ - } \ - _THIS_TYPE &set_attr_##x(const char *v) { \ - Operator::SetAttr(#x, v); \ - return *this; \ - } \ - _THIS_TYPE &set_attr_##x(const function &v) { return *this; } - -#define ATTR_ListString(x, ...) \ - graphStatus get_attr_##x(vector &ret) const { \ - vector ret_strs = __VA_ARGS__; \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - for (auto &ret_str : ret_strs) { \ - ret.emplace_back(ret_str.c_str()); \ - } \ - } \ - return GRAPH_SUCCESS; \ - } \ - _THIS_TYPE &set_attr_##x(const vector &v) { \ - Operator::SetAttr(#x, v); \ - return *this; \ - } \ - _THIS_TYPE &set_attr_##x(const function()> &v) { \ - return *this; } - -#define ATTR_AscendString(x, ...) \ - graphStatus get_attr_##x(AscendString &ret) const { \ - AscendString ret_str = __VA_ARGS__; \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - ret = AscendString(ret_str.c_str()); \ - } \ - return GRAPH_SUCCESS; \ - } - -#define ATTR_ListAscendString(x, ...) \ - graphStatus get_attr_##x(vector &ret) const { \ - vector ret_strs = __VA_ARGS__; \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - for (auto &ret_str : ret_strs) { \ - if (ret_str.GetString() != nullptr) { \ - ret.emplace_back(ret_str.GetString()); \ - } \ - } \ - } \ - return GRAPH_SUCCESS; \ - } - -#define ATTR_Int(x, ...) -#define ATTR_Float(x, ...) -#define ATTR_Bool(x, ...) -#define ATTR_Tensor(x, ...) -#define ATTR_Type(x, ...) -#define ATTR_NamedAttrs(x, ...) -#define ATTR_ListInt(x, ...) -#define ATTR_ListFloat(x, ...) -#define ATTR_ListBool(x, ...) -#define ATTR_ListTensor(x, ...) -#define ATTR_Bytes(x, ...) -#define ATTR_ListListInt(x, ...) -#define ATTR_ListType(x, ...) -#define ATTR_ListNamedAttrs(x, ...) - -#define REQUIRED_ATTR_String(x) \ - graphStatus get_attr_##x(AscendString &ret) const { \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - return GRAPH_FAILED; \ - } \ - return GRAPH_SUCCESS; \ - } \ - _THIS_TYPE &set_attr_##x(const char *v) { \ - Operator::SetAttr(#x, v); \ - return *this; \ - } \ - _THIS_TYPE &set_attr_##x(const function &v) { return *this; } - -#define REQUIRED_ATTR_ListString(x) \ - graphStatus get_attr_##x(vector &ret) const { \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - return GRAPH_FAILED; \ - } \ - return GRAPH_SUCCESS; \ - } \ - _THIS_TYPE &set_attr_##x(const vector &v) { \ - Operator::SetAttr(#x, v); \ - return *this; \ - } \ - _THIS_TYPE &set_attr_##x(const function()> &v) { \ - return *this; } - -#define REQUIRED_ATTR_AscendString(x) \ - graphStatus get_attr_##x(AscendString &ret) const { \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - return GRAPH_FAILED \ - } \ - return GRAPH_SUCCESS; \ - } - -#define REQUIRED_ATTR_ListAscendString(x) \ - graphStatus get_attr_##x(vector &ret) const { \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - return GRAPH_FAILED; \ - } \ - return GRAPH_SUCCESS; \ - } - -#define REQUIRED_ATTR_Int(x) -#define REQUIRED_ATTR_Float(x) -#define REQUIRED_ATTR_Bool(x) -#define REQUIRED_ATTR_Tensor(x) -#define REQUIRED_ATTR_Type(x) -#define REQUIRED_ATTR_NamedAttrs(x) -#define REQUIRED_ATTR_ListInt(x) -#define REQUIRED_ATTR_ListFloat(x) -#define REQUIRED_ATTR_ListBool(x) -#define REQUIRED_ATTR_ListTensor(x) -#define REQUIRED_ATTR_Bytes(x) -#define REQUIRED_ATTR_ListListInt(x) -#define REQUIRED_ATTR_ListType(x) -#define REQUIRED_ATTR_ListNamedAttrs(x) - -class OpReg { - public: - OpReg &N() { return *this; } - - OpReg &ATTR() { return *this; } - - OpReg &REQUIRED_ATTR() { return *this; } - - OpReg &INPUT() { return *this; } - - OpReg &OPTIONAL_INPUT() { return *this; } - - OpReg &OUTPUT() { return *this; } - - OpReg &GRAPH() { return *this; } - - OpReg &DYNAMIC_GRAPH() { return *this; } - - OpReg &INFER_SHAPE_AND_TYPE() { return *this; } -}; - -#define REG_OP(x) \ - namespace op { \ - class x : public Operator { \ - typedef x _THIS_TYPE; \ - \ - public: \ - ATTRIBUTED_DEPRECATED(x(const char *)) \ - explicit x(const string &name) : Operator(name.c_str(), #x) { __##x(); } \ - explicit x(const char *name) : Operator(name, #x) { __##x(); } \ - explicit x(const AscendString &name) : Operator(name, #x) { \ - __##x(); } \ - x() : Operator(#x) { __##x(); } \ - \ - private: \ - void __##x() { \ - OpReg() - -#define ATTR(x, Type, ...) \ - N(); \ - __attr_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_attr_##x(AscendString &)) \ - static const string name_attr_##x() { return #x; } \ - static const void name_attr_##x(AscendString &attr) { \ - attr = AscendString(#x); \ - } \ - ATTR_##Type(x, __VA_ARGS__) \ - Op##Type get_attr_##x() const { \ - Op##Type ret = __VA_ARGS__; \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - return ret; \ - } \ - return ret; \ - } \ - _THIS_TYPE &set_attr_##x(const Op##Type &v) { \ - Operator::SetAttr(#x, v); \ - return *this; \ - } \ - _THIS_TYPE &set_attr_##x(const function &v) { return *this; } \ - \ - private: \ - void __attr_##x() { \ - Operator::AttrRegister(#x, Op##Type(__VA_ARGS__)); \ - string attr_name(#x); \ - (void)OpReg() - -#define REQUIRED_ATTR(x, Type) \ - N(); \ - __required_attr_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_attr_##x(AscendString &)) \ - static const string name_attr_##x() { return #x; } \ - static const void name_attr_##x(AscendString &attr_name) { \ - attr_name = AscendString(#x); \ - } \ - REQUIRED_ATTR_##Type(x) \ - Op##Type get_attr_##x() const { \ - Op##Type ret; \ - if (Operator::GetAttr(#x, ret) == GRAPH_FAILED) { \ - return ret; \ - } \ - return ret; \ - } \ - _THIS_TYPE &set_attr_##x(const Op##Type &v) { \ - Operator::SetAttr(#x, v); \ - return *this; \ - } \ - _THIS_TYPE &set_attr_##x(const function &v) { return *this; } \ - \ - private: \ - void __required_attr_##x() { \ - Operator::RequiredAttrRegister(#x); \ - string attr_name(#x); \ - (void)OpReg() - -#define INPUT(x, t) \ - N(); \ - __input_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_in_##x(AscendString &)) \ - static const string name_in_##x() { return #x; } \ - static const void name_in_##x(AscendString &name) { \ - name = AscendString(#x); \ - } \ - ATTRIBUTED_DEPRECATED(_THIS_TYPE &set_input_##x##_by_name(Operator &, const char *)) \ - _THIS_TYPE &set_input_##x(Operator &v, const string &srcName) { \ - Operator::SetInput(#x, v, srcName.c_str()); \ - return *this; \ - } \ - _THIS_TYPE &set_input_##x##_by_name(Operator &v, const char *srcName) { \ - Operator::SetInput(#x, v, srcName); \ - return *this; \ - } \ - _THIS_TYPE &set_input_##x(Operator &v, uint32_t index) { \ - Operator::SetInput(#x, v, index); \ - return *this; \ - } \ - _THIS_TYPE &set_input_##x(Operator &v) { \ - Operator::SetInput(#x, v); \ - return *this; \ - } \ - TensorDesc get_input_desc_##x() const { return Operator::GetInputDescByName(#x); } \ - graphStatus update_input_desc_##x(const TensorDesc &tensorDesc) { \ - return Operator::UpdateInputDesc(#x, tensorDesc); \ - } \ - \ - private: \ - void __input_##x() { \ - Operator::InputRegister(#x); \ - (void)OpReg() - -#define OPTIONAL_INPUT(x, t) \ - N(); \ - __optional_input_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_in_##x(AscendString &)) \ - static const string name_in_##x() { return #x; } \ - static const void name_in_##x(AscendString &name) { \ - name = AscendString(#x); \ - } \ - _THIS_TYPE &set_input_##x(Operator &v) { \ - Operator::SetInput(#x, v); \ - return *this; \ - } \ - ATTRIBUTED_DEPRECATED(_THIS_TYPE &set_input_##x##_by_name(Operator &, const char *)) \ - _THIS_TYPE &set_input_##x(Operator &v, const string &srcName) { \ - Operator::SetInput(#x, v, srcName.c_str()); \ - return *this; \ - } \ - _THIS_TYPE &set_input_##x##_by_name(Operator &v, const char *srcName) { \ - Operator::SetInput(#x, v, srcName); \ - return *this; \ - } \ - _THIS_TYPE &set_input_##x(Operator &v, uint32_t index) { \ - Operator::SetInput(#x, v, index); \ - return *this; \ - } \ - TensorDesc get_input_desc_##x() const { return Operator::GetInputDescByName(#x); } \ - graphStatus update_input_desc_##x(const TensorDesc &tensorDesc) { \ - return Operator::UpdateInputDesc(#x, tensorDesc); \ - } \ - \ - private: \ - void __optional_input_##x() { \ - Operator::OptionalInputRegister(#x); \ - (void)OpReg() - -#define OUTPUT(x, t) \ - N(); \ - __out_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_out_##x(AscendString &)) \ - static const string name_out_##x() { return #x; } \ - static const void name_out_##x(AscendString &name) { \ - name = AscendString(#x); \ - } \ - TensorDesc get_output_desc_##x() const { return Operator::GetOutputDescByName(#x); } \ - graphStatus update_output_desc_##x(const TensorDesc &tensorDesc) { \ - return Operator::UpdateOutputDesc(#x, tensorDesc); \ - } \ - \ - private: \ - void __out_##x() { \ - Operator::OutputRegister(#x); \ - (void)OpReg() - -#define DYNAMIC_INPUT(x, t) \ - N(); \ - __dy_input_##x(); \ - } \ - \ - public: \ - _THIS_TYPE &create_dynamic_input_##x(uint32_t num, bool isPushBack = true) { \ - Operator::DynamicInputRegister(#x, num, isPushBack); \ - return *this; \ - } \ - _THIS_TYPE &create_dynamic_input_byindex_##x(uint32_t num, size_t index) { \ - Operator::DynamicInputRegisterByIndex(#x, num, index); \ - return *this; \ - } \ - TensorDesc get_dynamic_input_desc_##x(uint32_t index) const { \ - return Operator::GetDynamicInputDesc(#x, index); \ - } \ - graphStatus update_dynamic_input_desc_##x(uint32_t index, const TensorDesc &tensorDesc) { \ - return Operator::UpdateDynamicInputDesc(#x, index, tensorDesc); \ - } \ - _THIS_TYPE &set_dynamic_input_##x(uint32_t dstIndex, Operator &v) { \ - Operator::SetInput(#x, dstIndex, v); \ - return *this; \ - } \ - ATTRIBUTED_DEPRECATED(_THIS_TYPE &set_dynamic_input_##x(uint32_t, Operator &, const char *))\ - _THIS_TYPE &set_dynamic_input_##x(uint32_t dstIndex, Operator &v, const string &srcName) { \ - Operator::SetInput(#x, dstIndex, v, srcName.c_str()); \ - return *this; \ - } \ - _THIS_TYPE &set_dynamic_input_##x(uint32_t dstIndex, Operator &v, const char *srcName) { \ - Operator::SetInput(#x, dstIndex, v, srcName); \ - return *this; \ - } \ - \ - private: \ - void __dy_input_##x() { \ - Operator::DynamicInputRegister(#x, 0, true); \ - (void)OpReg() - -#define DYNAMIC_OUTPUT(x, t) \ - N(); \ - __dy_output_##x(); \ - } \ - \ - public: \ - _THIS_TYPE &create_dynamic_output_##x(uint32_t num, bool isPushBack = true) { \ - Operator::DynamicOutputRegister(#x, num, isPushBack); \ - return *this; \ - } \ - TensorDesc get_dynamic_output_desc_##x(uint32_t index) const { \ - return Operator::GetDynamicOutputDesc(#x, index); \ - } \ - graphStatus update_dynamic_output_desc_##x(uint32_t index, const TensorDesc &tensorDesc) { \ - return Operator::UpdateDynamicOutputDesc(#x, index, tensorDesc); \ - } \ - \ - private: \ - void __dy_output_##x() { \ - Operator::DynamicOutputRegister(#x, 0, true); \ - (void)OpReg() - -#define GRAPH(x) \ - N(); \ - __graph_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_graph_##x(AscendString &)) \ - static const string name_graph_##x() { return #x; } \ - static const void name_graph_##x(AscendString &name) { \ - name = AscendString(#x); \ - } \ - SubgraphBuilder get_subgraph_builder_##x() const { \ - return Operator::GetSubgraphBuilder(#x); \ - } \ - _THIS_TYPE &set_subgraph_builder_##x(const SubgraphBuilder &v) { \ - Operator::SetSubgraphBuilder(#x, 0, v); \ - return *this; \ - } \ - Graph get_subgraph_##x() const { \ - return Operator::GetSubgraph(#x); \ - } \ - \ - private: \ - void __graph_##x() { \ - Operator::SubgraphRegister(#x, false); \ - Operator::SubgraphCountRegister(#x, 1); \ - (void)OpReg() - -#define DYNAMIC_GRAPH(x) \ - N(); \ - __graph_##x(); \ - } \ - \ - public: \ - ATTRIBUTED_DEPRECATED(static const void name_graph_##x(AscendString &)) \ - static const string name_graph_##x() { return #x; } \ - static const void name_graph_##x(AscendString &name) { \ - name = AscendString(#x); \ - } \ - _THIS_TYPE &create_dynamic_subgraph_##x(uint32_t num) { \ - Operator::SubgraphCountRegister(#x, num); \ - return *this; \ - } \ - SubgraphBuilder get_dynamic_subgraph_builder_##x(uint32_t index) const { \ - return Operator::GetDynamicSubgraphBuilder(#x, index); \ - } \ - Graph get_dynamic_subgraph_##x(uint32_t index) const { \ - return Operator::GetDynamicSubgraph(#x, index); \ - } \ - _THIS_TYPE &set_dynamic_subgraph_builder_##x(uint32_t index,const SubgraphBuilder &v) { \ - Operator::SetSubgraphBuilder(#x, index, v); \ - return *this; \ - } \ - \ - private: \ - void __graph_##x() { \ - Operator::SubgraphRegister(#x, true); \ - (void)OpReg() - - -#define PASTE(g_register, y) g_register##y -#define __OP_END_IMPL__(x, y) \ - N(); \ - } \ - static_assert( \ - std::is_same::value, \ - "The class name entered into the OP_END_FACTORY_REG needs to be the same as the operator name you define."); \ - } \ - ; \ - static const OperatorCreatorRegister PASTE(g_register, y)(#x, [](const AscendString &name) { return x(name); }); \ - } -#define OP_END_FACTORY_REG(x) __OP_END_IMPL__(x, __COUNTER__) - -// Specialized shape inferencer macro - -#define IMPLEMT_INFERFUNC(op_name, func_name) \ - GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY static graphStatus func_name(op::op_name &op) - -#define IMPLEMT_COMMON_INFERFUNC(func_name) \ - GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY static graphStatus func_name(Operator &op) - -#define IMPLEMT_INFERFORMAT_FUNC(op_name, func_name) \ - GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY static graphStatus func_name(op::op_name &op) - -// Specialized verifier macro - -#define IMPLEMT_VERIFIER(op_name, func_name) \ - GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY static graphStatus func_name(op::op_name op) - -#define INFER_VERIFY_FUNC(op_name, x) [&](Operator &v) { return x((op::op_name &)v); } - -#define COMMON_INFER_VERIFY_FUNC(x) [&](Operator &v) { return x(v); } - -#define INFER_FORMAT_FUNC(op_name, x) [&](Operator &v) { return x((op::op_name &)v); } - -#define __INFER_FUNC_REG_IMPL__(op_name, x, n) static const InferShapeFuncRegister PASTE(if_register, n)(#op_name, x) - -#define __VERIFY_FUNC_REG_IMPL__(op_name, x, n) static const VerifyFuncRegister PASTE(vf_register, n)(#op_name, x) -// Infer format func register -#define __INFER_FORMAT_FUNC_REG_IMPL__(op_name, x, n) \ - static const InferFormatFuncRegister PASTE(ff_register, n)(#op_name, x) - -// Shape inferencer & verifier register macro - -#define INFER_FUNC_REG(op_name, x) __INFER_FUNC_REG_IMPL__(op_name, INFER_VERIFY_FUNC(op_name, x), __COUNTER__) - -#define COMMON_INFER_FUNC_REG(op_name, x) __INFER_FUNC_REG_IMPL__(op_name, COMMON_INFER_VERIFY_FUNC(x), __COUNTER__) - -#define VERIFY_FUNC_REG(op_name, x) __VERIFY_FUNC_REG_IMPL__(op_name, INFER_VERIFY_FUNC(op_name, x), __COUNTER__) - -// Infer format func reg -#define INFER_FORMAT_FUNC_REG(op_name, x) \ - __INFER_FORMAT_FUNC_REG_IMPL__(op_name, INFER_FORMAT_FUNC(op_name, x), __COUNTER__) - -// Common shape inferencer - -#define ELMTWISE_INFER_SHAPEANDTYPE(in_name, out_name) \ - [](Operator op)->graphStatus { \ - auto x_shape = op.GetInputDescByName(in_name).GetShape().GetDims(); \ - auto x_type = op.GetInputDescByName(in_name).GetDataType(); \ - TensorDesc op_output_desc = op.GetOutputDescByName(out_name); \ - op_output_desc.SetShape(ge::Shape(x_shape)); \ - op_output_desc.SetOriginShape(ge::Shape(x_shape)); \ - op_output_desc.SetDataType(x_type); \ - return op.UpdateOutputDesc(out_name, op_output_desc); \ - } - -graphStatus BroadCastInfer(const function()> &get_in1_shape, - const function()> &get_in2_shape, - const function &y_shape)> &set_out_shape); - -#define BROADCAST_INFER(in1_name, in2_name, out_name) \ - [](Operator op) -> graphStatus { \ - return BroadCastInfer([&]() { return op.GetInputDescByName(in1_name).GetShape().GetDims(); }, \ - [&]() { return op.GetInputDescByName(in2_name).GetShape().GetDims(); }, \ - [&](const vector &y_shape) { \ - TensorDesc op_output_desc = op.GetOutputDescByName(out_name); \ - op_output_desc.SetShape(ge::Shape(y_shape)); \ - (void)op.UpdateOutputDesc(out_name, op_output_desc);}); \ - } -} // namespace ge -#endif // INC_EXTERNAL_GRAPH_OPERATOR_REG_H_ diff --git a/inc/metadef/inc/external/graph/resource_context.h b/inc/metadef/inc/external/graph/resource_context.h deleted file mode 100644 index c9bf4c9ba..000000000 --- a/inc/metadef/inc/external/graph/resource_context.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_RESOURCE_CONTEXT_H_ -#define INC_EXTERNAL_GRAPH_RESOURCE_CONTEXT_H_ - -namespace ge { -// For resource op infershape, indicate content stored in resources, shape/dtype etc. -// Op can inherit from this struct and extend more content -struct ResourceContext { - virtual ~ResourceContext() {} -}; // struct ResourceContext -} // namespace ge -#endif // INC_EXTERNAL_GRAPH_RESOURCE_CONTEXT_H_ diff --git a/inc/metadef/inc/external/graph/tensor.h b/inc/metadef/inc/external/graph/tensor.h deleted file mode 100644 index 0036a26c4..000000000 --- a/inc/metadef/inc/external/graph/tensor.h +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_TENSOR_H_ -#define INC_EXTERNAL_GRAPH_TENSOR_H_ - -#include -#include -#include -#include -#include - -#include "./ge_error_codes.h" -#include "./types.h" -#include "ascend_string.h" - -namespace ge { -class ShapeImpl; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Shape { - public: - Shape(); - ~Shape() = default; - explicit Shape(const std::vector &dims); - - size_t GetDimNum() const; - // If the idx is invalid, return 0 - int64_t GetDim(size_t idx) const; - graphStatus SetDim(size_t idx, int64_t value); - std::vector GetDims() const; - int64_t GetShapeSize() const; - - private: - std::shared_ptr impl_; -}; - -class TensorDescImpl; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TensorDesc { - public: - TensorDesc(); - ~TensorDesc() = default; - explicit TensorDesc(Shape shape, Format format = FORMAT_ND, DataType dt = DT_FLOAT); - // Copy - TensorDesc(const TensorDesc &desc); - // Move - TensorDesc(TensorDesc &&desc); - // Copy - TensorDesc &operator=(const TensorDesc &desc); - // Move - TensorDesc &operator=(TensorDesc &&desc); - - void Update(const Shape &shape, Format format = FORMAT_ND, DataType dt = DT_FLOAT); - Shape GetShape() const; - void SetShape(const Shape &shape); - // set shape with -2, it stand for unknown shape - graphStatus SetUnknownDimNumShape(); - // for unknown shape - graphStatus SetShapeRange(const std::vector> &range); - graphStatus GetShapeRange(std::vector> &range) const; - - Format GetFormat() const; - void SetFormat(Format format); - - Shape GetOriginShape() const; - void SetOriginShape(const Shape &originShape); - - Format GetOriginFormat() const; - void SetOriginFormat(Format originFormat); - - DataType GetDataType() const; - void SetDataType(DataType dt); - - ATTRIBUTED_DEPRECATED(graphStatus GetName(AscendString &)) - std::string GetName() const; - graphStatus GetName(AscendString &name); - - ATTRIBUTED_DEPRECATED(void SetName(const char *)) - void SetName(const std::string &name); - void SetName(const char *name); - - // Attr acess - void SetSize(int64_t size); - int64_t GetSize() const; - - int64_t GetRealDimCnt() const; - void SetRealDimCnt(const int64_t realDimCnt); - - void SetPlacement(Placement placement); - Placement GetPlacement() const; - - void SetConstData(std::unique_ptr const_data_buffer, const size_t &const_data_len); - bool GetConstData(uint8_t **const_data_buffer, size_t &const_data_len) const; - - private: - std::shared_ptr impl; - friend class TensorAdapter; -}; - -class TensorImpl; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Tensor { - public: - using DeleteFunc = std::function; - Tensor(); - ~Tensor() = default; - explicit Tensor(const TensorDesc &tensorDesc); - Tensor(const TensorDesc &tensorDesc, const std::vector &data); - Tensor(const TensorDesc &tensorDesc, const uint8_t *data, size_t size); - Tensor(TensorDesc &&tensorDesc, std::vector &&data); - - TensorDesc GetTensorDesc() const; - graphStatus SetTensorDesc(const TensorDesc &tensorDesc); - - const uint8_t *GetData() const; - uint8_t *GetData(); - size_t GetSize() const; - std::unique_ptr ResetData(); - - graphStatus SetData(std::vector &&data); - graphStatus SetData(const std::vector &data); - graphStatus SetData(const uint8_t *data, size_t size); - ATTRIBUTED_DEPRECATED(graphStatus SetData(const char *data)) - graphStatus SetData(const std::string &data); - graphStatus SetData(const char *data); - ATTRIBUTED_DEPRECATED(graphStatus SetData(const std::vector &)) - graphStatus SetData(const std::vector &data); - graphStatus SetData(const std::vector &datas); - graphStatus SetData(uint8_t *data, size_t size, const Tensor::DeleteFunc &deleter_func); - graphStatus IsValid(); - - Tensor Clone() const; - - private: - std::shared_ptr impl; - friend class TensorAdapter; -}; -} // namespace ge - -#endif // INC_EXTERNAL_GRAPH_TENSOR_H_ diff --git a/inc/metadef/inc/external/graph/types.h b/inc/metadef/inc/external/graph/types.h deleted file mode 100644 index 96a5c3c04..000000000 --- a/inc/metadef/inc/external/graph/types.h +++ /dev/null @@ -1,319 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_GRAPH_TYPES_H_ -#define INC_EXTERNAL_GRAPH_TYPES_H_ - -#include -#include -#include - -namespace ge { -static const int64_t UNKNOWN_DIM = -1; -static const int64_t UNKNOWN_DIM_NUM = -2; -static const std::vector UNKNOWN_SHAPE = {-1}; -static const std::vector UNKNOWN_RANK = {-2}; -static const std::vector DUMMY_SHAPE = {-3}; -// When data type unit is bit, this offset need to be added. -static const int kDataTypeSizeBitOffset = 1000; -static const int kBitNumOfOneByte = 8; - -#if(defined(HOST_VISIBILITY)) && (defined(__GNUC__)) -#define GE_FUNC_HOST_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_HOST_VISIBILITY -#endif -#if(defined(DEV_VISIBILITY)) && (defined(__GNUC__)) -#define GE_FUNC_DEV_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_DEV_VISIBILITY -#endif - -enum DataType { - DT_FLOAT = 0, // float type - DT_FLOAT16 = 1, // fp16 type - DT_INT8 = 2, // int8 type - DT_INT16 = 6, // int16 type - DT_UINT16 = 7, // uint16 type - DT_UINT8 = 4, // uint8 type - DT_INT32 = 3, // - DT_INT64 = 9, // int64 type - DT_UINT32 = 8, // unsigned int32 - DT_UINT64 = 10, // unsigned int64 - DT_BOOL = 12, // bool type - DT_DOUBLE = 11, // double type - DT_STRING = 13, // string type - DT_DUAL_SUB_INT8 = 14, // dual output int8 type - DT_DUAL_SUB_UINT8 = 15, // dual output uint8 type - DT_COMPLEX64 = 16, // complex64 type - DT_COMPLEX128 = 17, // complex128 type - DT_QINT8 = 18, // qint8 type - DT_QINT16 = 19, // qint16 type - DT_QINT32 = 20, // qint32 type - DT_QUINT8 = 21, // quint8 type - DT_QUINT16 = 22, // quint16 type - DT_RESOURCE = 23, // resource type - DT_STRING_REF = 24, // string ref type - DT_DUAL = 25, // dual output type - DT_VARIANT = 26, // dt_variant type - DT_BF16 = 27, // bf16 type - DT_UNDEFINED = 28, // Used to indicate a DataType field has not been set. - DT_INT4 = 29, // int4 type - DT_UINT1 = 30, // uint1 type - DT_INT2 = 31, // int2 type - DT_UINT2 = 32, // uint2 type - DT_MAX // Mark the boundaries of data types -}; - -inline int GetSizeByDataType(DataType data_type) { - static int data_type_size[DT_MAX] = { - 4, // DT_FLOAT = 0, float type - 2, // DT_FLOAT16 = 1, fp16 type - 1, // DT_INT8 = 2, int8 type - 4, // DT_INT32 = 3, - 1, // DT_UINT8 = 4, uint8 type - -1, - 2, // DT_INT16 = 6, int16 type - 2, // DT_UINT16 = 7, uint16 type - 4, // DT_UINT32 = 8, unsigned int32 - 8, // DT_INT64 = 9, int64 type - 8, // DT_UINT64 = 10, unsigned int64 - 8, // DT_DOUBLE = 11, double type - 1, // DT_BOOL = 12, bool type - -1, // DT_STRING = 13, string type - 1, // DT_DUAL_SUB_INT8 = 14, dual output int8 type - 1, // DT_DUAL_SUB_UINT8 = 15, dual output uint8 type - 8, // DT_COMPLEX64 = 16, complex64 type - 16, // DT_COMPLEX128 = 17, complex128 type - 1, // DT_QINT8 = 18, qint8 type - 2, // DT_QINT16 = 19, qint16 type - 4, // DT_QINT32 = 20, qint32 type - 1, // DT_QUINT8 = 21, quint8 type - 2, // DT_QUINT16 = 22, quint16 type - 8, // DT_RESOURCE = 23, resource type - -1, // DT_STRING_REF = 24, string ref type - 5, // DT_DUAL = 25, dual output type (float + int8) - 8, // DT_VARIANT variant type - 2, // DT_BF16 = 27, bf16 type - -1, // DT_UNDEFINED = 28 Used to indicate a DataType field has not been set. - kDataTypeSizeBitOffset + 4, // DT_INT4 = 29, int4 type - kDataTypeSizeBitOffset + 1, // DT_UINT1 = 30, uint1 type - kDataTypeSizeBitOffset + 2, // DT_INT2 = 31, int2 type - kDataTypeSizeBitOffset + 2, // DT_UINT2 = 32, uint2 type - // DT_MAX - }; - if (data_type >= DT_MAX) { - return -1; - } - return data_type_size[data_type]; -} - -/// @brief Calculates the length in bytes based on the DataType and the number of elements. -/// @param element_count -/// @param data_type -/// @return -int64_t GetSizeInBytes(int64_t element_count, DataType data_type); - -enum Format { - FORMAT_NCHW = 0, // NCHW - FORMAT_NHWC, // NHWC - FORMAT_ND, // Nd Tensor - FORMAT_NC1HWC0, // NC1HWC0 - FORMAT_FRACTAL_Z, // FRACTAL_Z - FORMAT_NC1C0HWPAD = 5, - FORMAT_NHWC1C0, - FORMAT_FSR_NCHW, - FORMAT_FRACTAL_DECONV, - FORMAT_C1HWNC0, - FORMAT_FRACTAL_DECONV_TRANSPOSE = 10, - FORMAT_FRACTAL_DECONV_SP_STRIDE_TRANS, - FORMAT_NC1HWC0_C04, // NC1HWC0, C0 is 4 - FORMAT_FRACTAL_Z_C04, // FRACZ, C0 is 4 - FORMAT_CHWN, - FORMAT_FRACTAL_DECONV_SP_STRIDE8_TRANS = 15, - FORMAT_HWCN, - FORMAT_NC1KHKWHWC0, // KH,KW kernel h& kernel w maxpooling max output format - FORMAT_BN_WEIGHT, - FORMAT_FILTER_HWCK, // filter input tensor format - FORMAT_HASHTABLE_LOOKUP_LOOKUPS = 20, - FORMAT_HASHTABLE_LOOKUP_KEYS, - FORMAT_HASHTABLE_LOOKUP_VALUE, - FORMAT_HASHTABLE_LOOKUP_OUTPUT, - FORMAT_HASHTABLE_LOOKUP_HITS, - FORMAT_C1HWNCoC0 = 25, - FORMAT_MD, - FORMAT_NDHWC, - FORMAT_FRACTAL_ZZ, - FORMAT_FRACTAL_NZ, - FORMAT_NCDHW = 30, - FORMAT_DHWCN, // 3D filter input tensor format - FORMAT_NDC1HWC0, - FORMAT_FRACTAL_Z_3D, - FORMAT_CN, - FORMAT_NC = 35, - FORMAT_DHWNC, - FORMAT_FRACTAL_Z_3D_TRANSPOSE, // 3D filter(transpose) input tensor format - FORMAT_FRACTAL_ZN_LSTM, - FORMAT_FRACTAL_Z_G, - FORMAT_RESERVED = 40, - FORMAT_ALL, - FORMAT_NULL, - FORMAT_ND_RNN_BIAS, - FORMAT_FRACTAL_ZN_RNN, - // Add new formats definition here - FORMAT_END, - // FORMAT_MAX defines the max value of Format. - // Any Format should not exceed the value of FORMAT_MAX. - // ** Attention ** : FORMAT_MAX stands for the SPEC of enum Format and almost SHOULD NOT be used in code. - // If you want to judge the range of Format, you can use FORMAT_END. - FORMAT_MAX = 0xff -}; - -/// Get format from primary and sub-format, -/// in bits field: -/// ---------------------------------- -/// | 1 byte | 2 bytes | 1 byte | -/// |----------|------------|--------| -/// | reserved | sub-format | format | -/// ---------------------------------- -/// @param primary_format -/// @param sub_format -/// @return -inline int32_t GetFormatFromSub(int32_t primary_format, int32_t sub_format) { - return static_cast((static_cast(primary_format) & 0xff) | - ((static_cast(sub_format) & 0xffff) << 8)); -} - -inline int32_t GetPrimaryFormat(int32_t format) { - return static_cast(static_cast(format) & 0xff); -} - -inline int32_t GetSubFormat(int32_t format) { - return static_cast((static_cast(format) & 0xffff00) >> 8); -} - -inline bool HasSubFormat(int32_t format) { - return GetSubFormat(format) > 0; -} - -// for unknown shape op type -enum UnknowShapeOpType { - DEPEND_IN_SHAPE = 1, // op out shape get by input shape - DEPEND_CONST_VALUE = 2, // op out shape get by const op value - DEPEND_SHAPE_RANGE = 3, // op out shape get by range - DEPEND_COMPUTE = 4 // op out shape get by totally computing -}; - -struct TensorDescInfo { - Format format_ = FORMAT_RESERVED; // tbe op register support format - DataType dataType_ = DT_UNDEFINED; // tbe op register support datatype -}; - -enum DeviceType { - NPU = 0, - CPU = 1, -}; - -enum Placement { - kPlacementHost = 0, // host data addr - kPlacementDevice = 1, // device data addr -}; - -/// -/// @brief Get a format name from enum -/// @param format -/// @return -/// -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY -const char *GetFormatName(Format format); - -class TensorTypeImpl; -struct TensorType { - explicit TensorType(DataType dt); - - TensorType(const std::initializer_list &types); - - static TensorType ALL() { - return TensorType{DT_BOOL, DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, - DT_INT32, DT_INT64, DT_INT8, DT_QINT16, DT_QINT32, DT_QINT8, DT_QUINT16, - DT_QUINT8, DT_RESOURCE, DT_STRING, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8, - DT_BF16}; - } - - static TensorType QuantifiedType() { return TensorType{DT_QINT16, DT_QINT32, DT_QINT8, DT_QUINT16, DT_QUINT8}; } - - static TensorType OrdinaryType() { - return TensorType{DT_BOOL, DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, - DT_INT32, DT_INT64, DT_INT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8, - DT_BF16}; - } - - static TensorType BasicType() { - return TensorType{DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, - DT_INT32, DT_INT64, DT_INT8, DT_QINT16, DT_QINT32, DT_QINT8, - DT_QUINT16, DT_QUINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8, - DT_BF16}; - } - - static TensorType NumberType() { - return TensorType{DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, - DT_INT8, DT_QINT32, DT_QINT8, DT_QUINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8, - DT_BF16}; - } - - static TensorType RealNumberType() { - return TensorType{DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, - DT_INT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8, DT_BF16}; - } - - static TensorType ComplexDataType() { return TensorType{DT_COMPLEX128, DT_COMPLEX64}; } - - static TensorType IntegerDataType() { - return TensorType{DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; - } - - static TensorType SignedDataType() { return TensorType{DT_INT16, DT_INT32, DT_INT64, DT_INT8}; } - - static TensorType UnsignedDataType() { return TensorType{DT_UINT16, DT_UINT32, DT_UINT64, DT_UINT8}; } - - static TensorType FloatingDataType() { return TensorType{DT_DOUBLE, DT_FLOAT, DT_FLOAT16}; } - - static TensorType IndexNumberType() { return TensorType{DT_INT32, DT_INT64}; } - - static TensorType UnaryDataType() { - return TensorType{DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_BF16}; - } - - static TensorType FLOAT() { return TensorType{DT_FLOAT, DT_FLOAT16, DT_BF16}; } - - std::shared_ptr tensor_type_impl_; -}; -} // namespace ge - -namespace domi { -enum class ImplyType : unsigned int { - BUILDIN = 0, // Built in operator, normally executed by OME - TVM, // Compile to TVM bin file for execution - CUSTOM, // User defined calculation logic, executed by CPU - AI_CPU, // AICPU - CCE, // Cce - GELOCAL, // GE local, do node need execute by device - HCCL, // Hccl - INVALID = 0xFFFFFFFF, -}; -} // namespace domi - -#endif // INC_EXTERNAL_GRAPH_TYPES_H_ diff --git a/inc/metadef/inc/external/register/op_tiling_info.h b/inc/metadef/inc/external/register/op_tiling_info.h deleted file mode 100644 index cedbc2ce6..000000000 --- a/inc/metadef/inc/external/register/op_tiling_info.h +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Copyright 2019-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_OP_TILING_INFO_H_ -#define INC_EXTERNAL_REGISTER_OP_TILING_INFO_H_ - -#include -#include -#include -#include -#include "external/graph/ascend_string.h" -#include "external/graph/tensor.h" - -namespace optiling { -using ByteBuffer = std::stringstream; - -enum TensorArgType { - TA_NONE, - TA_SINGLE, - TA_LIST, -}; - -class TeOpVarAttrArgsImpl; -class TeOpVarAttrArgs { - friend class VarAttrHelper; - -public: - TeOpVarAttrArgs() = default; - ~TeOpVarAttrArgs() = default; - const uint8_t *GetData(const std::string &name, const std::string &dtype, size_t &size) const; - -private: - std::shared_ptr impl_; -}; - -struct TeOpTensor { - std::vector shape; - std::vector ori_shape; - std::string format; - std::string ori_format; - std::string dtype; - std::string name; - std::map attrs; -}; - -struct TeOpTensorArg { - TensorArgType arg_type; - std::vector tensor; -}; - -struct OpRunInfo { - uint32_t block_dim; - std::vector workspaces; - ByteBuffer tiling_data; - bool clear_atomic; - uint64_t tiling_key; -}; - -using TeOpAttrArgs = std::vector; -using TeConstTensorData = std::tuple; - -struct TeOpParas { - std::vector inputs; - std::vector outputs; - std::map const_inputs; - TeOpAttrArgs attrs; - std::string op_type; - TeOpVarAttrArgs var_attrs; -}; - -struct OpCompileInfo { - std::string str; - std::string key; -}; - -namespace utils { -class OpRunInfoImpl; -class OpRunInfo { -public: - OpRunInfo(); - ~OpRunInfo() = default; - - OpRunInfo(const uint32_t &block_dim, const bool &clear_atomic, const uint64_t &tiling_key); - // Copy - OpRunInfo(const OpRunInfo &runinfo); - // Move - OpRunInfo(OpRunInfo &&runinfo); - // Copy - OpRunInfo &operator=(const OpRunInfo &runinfo); - // Move - OpRunInfo &operator=(OpRunInfo &&runinfo); - - void SetBlockDim(const uint32_t &block_dim); - uint32_t GetBlockDim() const; - - void AddWorkspace(const int64_t &workspace); - size_t GetWorkspaceNum() const; - ge::graphStatus GetWorkspace(const size_t &idx, int64_t &workspace) const; - void GetAllWorkspaces(std::vector &workspaces) const; - void SetWorkspaces(const std::vector &workspaces); - - template - void AddTilingData(const T &value) { - AddTilingData(reinterpret_cast(&value), sizeof(value)); - } - void AddTilingData(const char *value, size_t size); - ByteBuffer &GetAllTilingData(); - const ByteBuffer &GetAllTilingData() const; - void InternelSetTiling(const ByteBuffer &value); - void SetClearAtomic(bool clear_atomic); - bool GetClearAtomic() const; - - void SetTilingKey(const uint64_t &new_tiling_key); - uint64_t GetTilingKey() const; - -private: - std::shared_ptr impl_; -}; - -class OpCompileInfoImpl; -class OpCompileInfo { -public: - OpCompileInfo(); - ~OpCompileInfo() = default; - OpCompileInfo(const ge::AscendString &key, const ge::AscendString &value); - OpCompileInfo(const std::string &key, const std::string &value); - // Copy - OpCompileInfo(const OpCompileInfo &compileinfo); - // Move - OpCompileInfo(OpCompileInfo &&compileinfo); - // Copy - OpCompileInfo &operator=(const OpCompileInfo &compileinfo); - // Move - OpCompileInfo &operator=(OpCompileInfo &&compileinfo); - - void SetKey(const ge::AscendString &key); - const ge::AscendString &GetKey() const; - - void SetValue(const ge::AscendString &value); - const ge::AscendString &GetValue() const; - -private: - std::shared_ptr impl_; -}; -} -} // namespace optiling -#endif // INC_REGISTER_OP_TILING_REGISTRY_H_ diff --git a/inc/metadef/inc/external/register/op_tiling_registry.h b/inc/metadef/inc/external/register/op_tiling_registry.h deleted file mode 100644 index 67178b712..000000000 --- a/inc/metadef/inc/external/register/op_tiling_registry.h +++ /dev/null @@ -1,151 +0,0 @@ -/** - * Copyright 2019-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_OP_TILING_REGISTRY_H_ -#define INC_EXTERNAL_REGISTER_OP_TILING_REGISTRY_H_ - -#include -#include -#include -#include -#include -#include -#include "external/graph/operator.h" -#include "external/register/register_error_codes.h" -#include "external/register/register_types.h" -#include "external/register/op_tiling_info.h" - -#define REGISTER_OP_TILING(optype, opfunc) REGISTER_OP_TILING_UNIQ_HELPER(optype, opfunc, __COUNTER__) - -#define REGISTER_OP_TILING_UNIQ_HELPER(optype, opfunc, counter) REGISTER_OP_TILING_UNIQ(optype, opfunc, counter) - -#define REGISTER_OP_TILING_UNIQ(optype, opfunc, counter) \ - static optiling::OpTilingFuncRegistry g_##optype##TilingRegistryInterfV1##counter(#optype, opfunc) - -#define REGISTER_OP_TILING_V2(optype, opfunc) REGISTER_OP_TILING_UNIQ_HELPER_V2(optype, opfunc, __COUNTER__) - -#define REGISTER_OP_TILING_UNIQ_HELPER_V2(optype, opfunc, counter) REGISTER_OP_TILING_UNIQ_V2(optype, opfunc, counter) - -#define REGISTER_OP_TILING_UNIQ_V2(optype, opfunc, counter) \ - static optiling::OpTilingFuncRegistry g_##optype##TilingRegistryInterfV2##counter(#optype, opfunc) - -#define REGISTER_OP_TILING_V3(optype, tilingfunc, parsefunc) \ - REGISTER_OP_TILING_UNIQ_HELPER_V3(optype, tilingfunc, parsefunc, __COUNTER__) - -#define REGISTER_OP_TILING_UNIQ_HELPER_V3(optype, tilingfunc, parsefunc, counter) \ - REGISTER_OP_TILING_UNIQ_V3(optype, tilingfunc, parsefunc, counter) - -#define REGISTER_OP_TILING_UNIQ_V3(optype, tilingfunc, parsefunc, counter) \ - static optiling::OpTilingFuncRegistry g_##optype##TilingRegistryInterfV3##counter(#optype, tilingfunc, parsefunc) - -using Status = domi::Status; -namespace optiling { -template -ByteBuffer &ByteBufferPut(ByteBuffer &buf, const T &value) { - buf.write(reinterpret_cast(&value), sizeof(value)); - buf.flush(); - return buf; -} - -template -ByteBuffer &ByteBufferGet(ByteBuffer &buf, T &value) { - buf.read(reinterpret_cast(&value), sizeof(value)); - return buf; -} - -size_t ByteBufferGetAll(ByteBuffer &buf, char *dest, size_t dest_len); -ByteBuffer &ByteBufferPut(ByteBuffer &buf, const uint8_t *data, size_t dest_len); - -class CompileInfoCache { -public: - CompileInfoCache(const CompileInfoCache &) = delete; - CompileInfoCache &operator=(const CompileInfoCache &) = delete; - static CompileInfoCache& Instance(); - bool HasCompileInfo(const std::string &key); - void* GetCompileInfo(const std::string &key); - void SetCompileInfo(const std::string &key, void* value); - -private: - CompileInfoCache(); - ~CompileInfoCache(); - mutable std::mutex compile_info_mutex_; - std::unordered_map compile_info_map_; -}; - -using OpTilingFunc = std::function; -using OpTilingFuncPtr = std::shared_ptr; -class FMK_FUNC_HOST_VISIBILITY OpTilingRegistryInterf { - public: - OpTilingRegistryInterf(std::string op_type, OpTilingFunc func); - ~OpTilingRegistryInterf() = default; - static std::unordered_map &RegisteredOpInterf(); -}; - -using OpRunInfoV2 = utils::OpRunInfo; -using OpCompileInfoV2 = utils::OpCompileInfo; -using OpTilingFuncV2 = std::function; -using OpTilingFuncV2Ptr = std::shared_ptr; -class FMK_FUNC_HOST_VISIBILITY OpTilingRegistryInterf_V2 { -public: - OpTilingRegistryInterf_V2(const std::string &op_type, OpTilingFuncV2 func); - ~OpTilingRegistryInterf_V2() = default; - static std::unordered_map &RegisteredOpInterf(); -}; -namespace utils { -} // namespace utils - -using OpTilingFuncV3 = std::function; -using OpParseFuncV3 = std::function; - -class OpTilingFuncInfo { -public: - explicit OpTilingFuncInfo(const std::string &op_type); - OpTilingFuncInfo() = default; - ~OpTilingFuncInfo() = default; - - bool IsFunctionV3(); - bool IsFunctionV2(); - bool IsFunctionV1(); - void SetOpTilingFunc(OpTilingFunc &tiling_func); - void SetOpTilingFuncV2(OpTilingFuncV2 &tiling_func); - void SetOpTilingFuncV3(OpTilingFuncV3 &tiling_func, OpParseFuncV3 &parse_func); - const OpTilingFunc& GetOpTilingFunc(); - const OpTilingFuncV2& GetOpTilingFuncV2(); - const OpTilingFuncV3& GetOpTilingFuncV3(); - const OpParseFuncV3& GetOpParseFuncV3(); - const std::string& GetOpType() const { - return op_type_; - } - -private: - std::string op_type_; - OpTilingFunc tiling_func_; - OpTilingFuncV2 tiling_func_v2_; - OpTilingFuncV3 tiling_func_v3_; - OpParseFuncV3 parse_func_v3_; -}; - -class FMK_FUNC_HOST_VISIBILITY OpTilingFuncRegistry { -public: - OpTilingFuncRegistry(const std::string &op_type, OpTilingFunc tiling_func); - OpTilingFuncRegistry(const std::string &op_type, OpTilingFuncV2 tiling_func); - OpTilingFuncRegistry(const std::string &op_type, OpTilingFuncV3 tiling_func, OpParseFuncV3 parse_func); - ~OpTilingFuncRegistry() = default; - static std::unordered_map &RegisteredOpFuncInfo(); -}; - -} // namespace optiling -#endif // INC_EXTERNAL_REGISTER_OP_TILING_REGISTRY_H_ diff --git a/inc/metadef/inc/external/register/register.h b/inc/metadef/inc/external/register/register.h deleted file mode 100644 index 535153321..000000000 --- a/inc/metadef/inc/external/register/register.h +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_REGISTER_H_ -#define INC_EXTERNAL_REGISTER_REGISTER_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "graph/operator.h" -#include "register/register_error_codes.h" -#include "register/register_fmk_types.h" -#include "register/register_types.h" - -using std::unique_ptr; -using std::map; -using std::make_shared; -using std::to_string; -using std::string; -using std::pair; -using std::vector; - -/*lint -e148*/ -namespace ge { -class Operator; -class TensorDesc; -class Tensor; -class TBEPluginManager; -} - -namespace google { -namespace protobuf { -class Message; -} -} - -namespace domi { -const int64_t kMaxNameLength = 1048576; // 1M - -enum DynamicType { - kInvalid = 0, - kInput = 1, - kOutput = 2 -}; -struct DynamicInputOutputInfo { - DynamicType type; // input/output - const char *port_name; - int64_t port_name_len; - const char *attr_name; - int64_t attr_name_len; - DynamicInputOutputInfo() - : type(kInvalid), port_name(nullptr), port_name_len(0), attr_name(nullptr), attr_name_len(0) {} - DynamicInputOutputInfo(DynamicType type, const char *port_name, int64_t port_name_len, const char *attr_name, - int64_t attr_name_len) - : type(type), - port_name(port_name), - port_name_len(port_name_len), - attr_name(attr_name), - attr_name_len(attr_name_len) {} -}; -Status AutoMappingByOpFn(const ge::Operator &op_src, ge::Operator &op); -Status AutoMappingByOpFnDynamic(const ge::Operator &op_src, ge::Operator &op, - const std::vector &dynamic_name_attr_value); -ATTRIBUTED_DEPRECATED(Status AutoMappingByOpFn(const ge::Operator &, ge::Operator &)) -Status AutoMappingFn(const google::protobuf::Message *op_src, ge::Operator &op); -ATTRIBUTED_DEPRECATED(Status AutoMappingByOpFnDynamic(const ge::Operator &, ge::Operator &, - const std::vector &)) -Status AutoMappingFnDynamic(const google::protobuf::Message *op_src, ge::Operator &op, - std::map> dynamic_name_attr_value, - int in_pos = -1, int out_pos = -1); -Status AutoMappingSubgraphIndex(const ge::Graph &graph, - const std::function &input, - const std::function &output); -Status AutoMappingSubgraphIndex(const ge::Graph &graph, - const std::function &input, - const std::function &output); -using google::protobuf::Message; -class OpRegistrationDataImpl; -class FrameworkRegistryImpl; - -using ParseParamFunc = std::function; -using ParseParamByOpFunc = std::function; -using FusionParseParamFunc = std::function, - ge::Operator &)>; -using FusionParseParamByOpFunc = std::function &, ge::Operator &)>; -using ParseSubgraphFunc = std::function; -using ParseOpToGraphFunc = std::function; -using ParseSubgraphFuncV2 = std::function; -using AutoMappingSubgraphIOIndexFunc = std::function &input, - const std::function &output)>; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY FrameworkRegistry { - public: - FrameworkRegistry(const FrameworkRegistry &) = delete; - FrameworkRegistry& operator = (const FrameworkRegistry &) = delete; - ~FrameworkRegistry(); - static FrameworkRegistry& Instance(); - void AddAutoMappingSubgraphIOIndexFunc(domi::FrameworkType framework, AutoMappingSubgraphIOIndexFunc fun); - AutoMappingSubgraphIOIndexFunc GetAutoMappingSubgraphIOIndexFunc(domi::FrameworkType framework); - private: - FrameworkRegistry(); - std::unique_ptr impl_; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY AutoMappingSubgraphIOIndexFuncRegister { - public: - AutoMappingSubgraphIOIndexFuncRegister(domi::FrameworkType framework, AutoMappingSubgraphIOIndexFunc fun); - ~AutoMappingSubgraphIOIndexFuncRegister() {} -}; - -#define REGISTER_AUTOMAPPING_SUBGRAPH_IO_INDEX_FUNC(framework, fun) \ - static AutoMappingSubgraphIOIndexFuncRegister \ - auto_mapping_subgraph_fun_##framework(framework, fun); \ - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpRegistrationData { - public: - ATTRIBUTED_DEPRECATED(OpRegistrationData(const char *)) - OpRegistrationData(const std::string &om_optype); - - OpRegistrationData(const char *om_optype); - - ~OpRegistrationData(); - - OpRegistrationData &FrameworkType(const domi::FrameworkType &fmk_type); - - ATTRIBUTED_DEPRECATED(OpRegistrationData &OriginOpType(const std::vector &)) - OpRegistrationData &OriginOpType(const std::initializer_list &ori_optype_list); - - OpRegistrationData &OriginOpType(const std::vector &ori_op_type_list); - - ATTRIBUTED_DEPRECATED(OpRegistrationData &OriginOpType(const char *)) - OpRegistrationData &OriginOpType(const std::string &ori_optype); - - OpRegistrationData &OriginOpType(const char *ori_op_type); - - OpRegistrationData &ParseParamsFn(const ParseParamFunc &parseParamFn); - - OpRegistrationData &ParseParamsByOperatorFn(const ParseParamByOpFunc &parse_param_by_op_fn); - - OpRegistrationData &FusionParseParamsFn(const FusionParseParamFunc &fusionParseParamFn); - - OpRegistrationData &FusionParseParamsFn(const FusionParseParamByOpFunc &fusion_parse_param_fn); - - ATTRIBUTED_DEPRECATED(OpRegistrationData &ParseSubgraphPostFn(const ParseSubgraphFuncV2 &)) - OpRegistrationData &ParseSubgraphPostFn(const ParseSubgraphFunc &subgraph_post_fn); - - OpRegistrationData &ParseSubgraphPostFn(const ParseSubgraphFuncV2 &subgraph_post_fn); - - OpRegistrationData &ImplyType(const domi::ImplyType &imply_type); - - ATTRIBUTED_DEPRECATED(OpRegistrationData &DelInputWithCond(int, const char *, bool)) - OpRegistrationData &DelInputWithCond(int inputIdx, const std::string &attrName, bool attrValue); - - OpRegistrationData &DelInputWithCond(int input_idx, const char *attr_name, bool attr_value); - - ATTRIBUTED_DEPRECATED(OpRegistrationData &DelInputWithOriginalType(int, const char *)) - OpRegistrationData &DelInputWithOriginalType(int input_idx, const std::string &ori_type); - - OpRegistrationData &DelInputWithOriginalType(int input_idx, const char *ori_type); - - OpRegistrationData &InputReorderVector(const std::vector &input_order); - - OpRegistrationData &ParseOpToGraphFn(const ParseOpToGraphFunc &parse_op_to_graph_fn); - - domi::ImplyType GetImplyType () const; - ATTRIBUTED_DEPRECATED(Status GetOmOptype(ge::AscendString &) const) - std::string GetOmOptype () const; - Status GetOmOptype(ge::AscendString &om_op_type) const; - ATTRIBUTED_DEPRECATED(GetOriginOpTypeSet(std::set &) const) - std::set GetOriginOpTypeSet () const; - Status GetOriginOpTypeSet(std::set &ori_op_type) const; - domi::FrameworkType GetFrameworkType() const; - ParseParamFunc GetParseParamFn() const; - ParseParamByOpFunc GetParseParamByOperatorFn() const; - FusionParseParamFunc GetFusionParseParamFn() const; - FusionParseParamByOpFunc GetFusionParseParamByOpFn() const; - ParseSubgraphFunc GetParseSubgraphPostFn() const; - ParseOpToGraphFunc GetParseOpToGraphFn() const; - Status GetParseSubgraphPostFn(ParseSubgraphFuncV2 &func) const; - - private: - std::shared_ptr impl_; - friend class OpRegistry; - friend class OpRegistrationTbe; - friend class ge::TBEPluginManager; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpReceiver { - public: - OpReceiver(OpRegistrationData ®_data); - ~OpReceiver() {} -}; - -#define REGISTER_CUSTOM_OP(name) REGISTER_CUSTOM_OP_UNIQ_HELPER(__COUNTER__, name) -#define REGISTER_CUSTOM_OP_UNIQ_HELPER(ctr, name) REGISTER_CUSTOM_OP_UNIQ(ctr, name) -#define REGISTER_CUSTOM_OP_UNIQ(ctr, name) \ - static OpReceiver register_op##ctr \ - __attribute__((unused)) = \ - OpRegistrationData(name) -} // namespace domi - -namespace ge { -using OpRegistrationData = domi::OpRegistrationData; -using OpReceiver = domi::OpReceiver; -} // namespace ge -/*lint +e148*/ -#endif // INC_EXTERNAL_REGISTER_REGISTER_H_ diff --git a/inc/metadef/inc/external/register/register_error_codes.h b/inc/metadef/inc/external/register/register_error_codes.h deleted file mode 100644 index a71bb72cd..000000000 --- a/inc/metadef/inc/external/register/register_error_codes.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_REGISTER_ERROR_CODES_H_ -#define INC_EXTERNAL_REGISTER_REGISTER_ERROR_CODES_H_ - -#define SYSID_FWK 3 // Subsystem ID -#define MODID_COMMON 0 // Common module ID - -#define DECLARE_ERRORNO(sysid, modid, name, value) \ - const domi::Status name = \ - ((0xFF & ((uint8_t)sysid)) << 24) | ((0xFF & ((uint8_t)modid)) << 16) | (0xFFFF & ((uint16_t)value)); - -#define DECLARE_ERRORNO_COMMON(name, value) DECLARE_ERRORNO(SYSID_FWK, MODID_COMMON, name, value) - -namespace domi { -using Status = uint32_t; - -// General error code -DECLARE_ERRORNO(0, 0, SUCCESS, 0); -DECLARE_ERRORNO(0xFF, 0xFF, FAILED, 0xFFFFFFFF); -DECLARE_ERRORNO_COMMON(PARAM_INVALID, 1); // 50331649 -DECLARE_ERRORNO(SYSID_FWK, 1, SCOPE_NOT_CHANGED, 201); -} // namespace domi - -#endif // INC_EXTERNAL_REGISTER_REGISTER_ERROR_CODES_H_ diff --git a/inc/metadef/inc/external/register/register_fmk_types.h b/inc/metadef/inc/external/register/register_fmk_types.h deleted file mode 100644 index 976160608..000000000 --- a/inc/metadef/inc/external/register/register_fmk_types.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_REGISTER_FMK_TYPES_H_ -#define INC_EXTERNAL_REGISTER_REGISTER_FMK_TYPES_H_ - -#include - -namespace domi { -/// -/// @ingroup domi_omg -/// @brief AI framework types -/// -enum FrameworkType { - CAFFE = 0, - MINDSPORE = 1, - TENSORFLOW = 3, - ANDROID_NN, - ONNX, - FRAMEWORK_RESERVED, -}; -} // namespace domi - -#endif // INC_EXTERNAL_REGISTER_REGISTER_FMK_TYPES_H_ diff --git a/inc/metadef/inc/external/register/register_pass.h b/inc/metadef/inc/external/register/register_pass.h deleted file mode 100644 index f58821fa1..000000000 --- a/inc/metadef/inc/external/register/register_pass.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_REGISTER_PASS_H_ -#define INC_EXTERNAL_REGISTER_REGISTER_PASS_H_ - -#include -#include -#include - -#include "graph/graph.h" -#include "ge/ge_api_error_codes.h" -#include "register/register_types.h" - -namespace ge { -class PassRegistrationDataImpl; -using CustomPassFunc = std::function; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY PassRegistrationData { - public: - PassRegistrationData() = default; - ~PassRegistrationData() = default; - - PassRegistrationData(std::string pass_name); - - PassRegistrationData &Priority(const int32_t &); - - PassRegistrationData &CustomPassFn(const CustomPassFunc &); - - std::string GetPassName() const; - int32_t GetPriority() const; - CustomPassFunc GetCustomPassFn() const; - - private: - std::shared_ptr impl_; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY PassReceiver { - public: - PassReceiver(PassRegistrationData ®_data); - ~PassReceiver() = default; -}; - -#define REGISTER_CUSTOM_PASS(name) REGISTER_CUSTOM_PASS_UNIQ_HELPER(__COUNTER__, name) -#define REGISTER_CUSTOM_PASS_UNIQ_HELPER(ctr, name) REGISTER_CUSTOM_PASS_UNIQ(ctr, name) -#define REGISTER_CUSTOM_PASS_UNIQ(ctr, name) \ - static ::ge::PassReceiver register_pass##ctr \ - __attribute__((unused)) = \ - ::ge::PassRegistrationData(name) -} // namespace ge - -#endif // INC_EXTERNAL_REGISTER_REGISTER_PASS_H_ diff --git a/inc/metadef/inc/external/register/register_types.h b/inc/metadef/inc/external/register/register_types.h deleted file mode 100644 index 54382672b..000000000 --- a/inc/metadef/inc/external/register/register_types.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_REGISTER_REGISTER_TYPES_H_ -#define INC_EXTERNAL_REGISTER_REGISTER_TYPES_H_ - -namespace domi { -#if(defined(HOST_VISIBILITY)) && (defined(__GNUC__)) -#define FMK_FUNC_HOST_VISIBILITY __attribute__((visibility("default"))) -#else -#define FMK_FUNC_HOST_VISIBILITY -#endif -#if(defined(DEV_VISIBILITY)) && (defined(__GNUC__)) -#define FMK_FUNC_DEV_VISIBILITY __attribute__((visibility("default"))) -#else -#define FMK_FUNC_DEV_VISIBILITY -#endif -#ifdef __GNUC__ -#define ATTRIBUTED_DEPRECATED(replacement) __attribute__((deprecated("Please use " #replacement " instead."))) -#else -#define ATTRIBUTED_DEPRECATED(replacement) __declspec(deprecated("Please use " #replacement " instead.")) -#endif - -/// -/// @ingroup domi -/// @brief original tensor type -/// -typedef enum tagDomiTensorFormat { - DOMI_TENSOR_NCHW = 0, // < NCHW - DOMI_TENSOR_NHWC, // < NHWC - DOMI_TENSOR_ND, // < Nd Tensor - DOMI_TENSOR_NC1HWC0, // < NC1HWC0 - DOMI_TENSOR_FRACTAL_Z, // < FRACTAL_Z - DOMI_TENSOR_NC1C0HWPAD, - DOMI_TENSOR_NHWC1C0, - DOMI_TENSOR_FSR_NCHW, - DOMI_TENSOR_FRACTAL_DECONV, - DOMI_TENSOR_BN_WEIGHT, - DOMI_TENSOR_CHWN, // Android NN Depth CONV - DOMI_TENSOR_FILTER_HWCK, // filter input tensor format - DOMI_TENSOR_NDHWC, - DOMI_TENSOR_NCDHW, - DOMI_TENSOR_DHWCN, // 3D filter input tensor format - DOMI_TENSOR_DHWNC, - DOMI_TENSOR_RESERVED -} domiTensorFormat_t; -} // namespace domi - -#endif // INC_EXTERNAL_REGISTER_REGISTER_TYPES_H_ diff --git a/inc/metadef/inc/external/register/scope/scope_fusion_pass_register.h b/inc/metadef/inc/external/register/scope/scope_fusion_pass_register.h deleted file mode 100644 index 5602ff1d2..000000000 --- a/inc/metadef/inc/external/register/scope/scope_fusion_pass_register.h +++ /dev/null @@ -1,401 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef EXTERNAL_REGISTER_SCOPE_SCOPE_FUSION_PASS_REGISTER_H_ -#define EXTERNAL_REGISTER_SCOPE_SCOPE_FUSION_PASS_REGISTER_H_ - -#include -#include -#include -#include -#include -#include "ge/ge_api_error_codes.h" -#include "register/register_error_codes.h" -#include "register/register_types.h" -#include "graph/operator.h" - -#define CHECK_INNER_NODE_CONDITION(cond, fusion_rlt) \ - do { \ - if (!(cond)) { \ - if ((fusion_rlt) != nullptr) { \ - (fusion_rlt)->SetType(ge::kScopeInvalidType); \ - } \ - return; \ - } \ - } while (0) - -namespace domi { -class TensorFlowModelParser; -} // namespace domi -namespace ge { -const int32_t kFusionDisableIndex = 99999; -const char *const kScopeToMultiNodes = "ScopeToMultiNodes"; -const char *const kScopeInvalidType = "ScopeInvalidType"; -const char *const kInputFromFusionScope = "InputFromFusionScope"; -const char *const kOutputToFusionScope = "OutputToFusionScope"; -class ScopePattern; -using ScopeFusionPatterns = std::vector>; - -class ScopePassManager; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY Scope { - public: - Scope(); - ATTRIBUTED_DEPRECATED(Status Init(const char *, const char *, Scope *)) - Status Init(const std::string &name, const std::string &sub_type = "", Scope *father_scope = nullptr); - Status Init(const char *name, const char *sub_type, Scope *father_scope = nullptr); - ~Scope(); - ATTRIBUTED_DEPRECATED(Status Name(AscendString &) const) - const std::string &Name() const; - Status Name(AscendString &name) const; - ATTRIBUTED_DEPRECATED(Status SubType(AscendString &) const) - const std::string &SubType() const; - Status SubType(AscendString &sub_type) const; - ATTRIBUTED_DEPRECATED(Status AllNodesMap(std::unordered_map &) const) - const std::unordered_map &AllNodesMap() const; - Status AllNodesMap(std::unordered_map &node_map) const; - ATTRIBUTED_DEPRECATED(Scope *GetSubScope(const char *scope_name) const) - Scope *GetSubScope(const std::string &scope_name) const; - Scope *GetSubScope(const char *scope_name) const; - ATTRIBUTED_DEPRECATED(Status LastName(AscendString &) const) - const std::string LastName() const; - Status LastName(AscendString &name) const; - const std::vector &GetAllSubScopes() const; - const Scope *GetFatherScope() const; - - private: - class ScopeImpl; - std::unique_ptr impl_; - friend class ScopeBasePass; - friend class ScopeTree; - friend class NodeOpTypeFeature; - friend class NodeAttrFeature; - friend class ScopeFeature; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY FusionScopesResult { - public: - FusionScopesResult(); - Status Init(); - ~FusionScopesResult(); - ATTRIBUTED_DEPRECATED(void SetName(const char *)) - void SetName(const std::string &name); - void SetName(const char *name); - ATTRIBUTED_DEPRECATED(void SetType(const char *)) - void SetType(const std::string &type); - void SetType(const char *type); - ATTRIBUTED_DEPRECATED(void SetDescription(const char *)) - void SetDescription(const std::string &description); - void SetDescription(const char *description); - ATTRIBUTED_DEPRECATED(const Status Name(AscendString &) const) - const std::string &Name() const; - const Status Name(AscendString &name) const; - const std::vector &Nodes() const; - ATTRIBUTED_DEPRECATED(void InsertInputs(const char *, const std::vector &)) - void InsertInputs(const std::string &inner_op_name, const std::vector &index_map); - void InsertInputs(const char *inner_op_name, const std::vector &index_map); - ATTRIBUTED_DEPRECATED(void InsertOutputs(const char *, const std::vector &)) - void InsertOutputs(const std::string &inner_op_name, const std::vector &index_map); - void InsertOutputs(const char *inner_op_name, const std::vector &index_map); - - class InnerNodeInfo { - public: - ATTRIBUTED_DEPRECATED(InnerNodeInfo(const char *)) - explicit InnerNodeInfo(const std::string &fusion_node_name); - explicit InnerNodeInfo(const char *fusion_node_name); - ATTRIBUTED_DEPRECATED(InnerNodeInfo(const char *, const char *, const char *)) - InnerNodeInfo(const std::string &fusion_node_name, const std::string &name, const std::string &type); - InnerNodeInfo(const char *fusion_node_name, const char *name, const char *type); - InnerNodeInfo(InnerNodeInfo &&other) noexcept; - InnerNodeInfo &operator=(InnerNodeInfo &&other) noexcept; - InnerNodeInfo(const InnerNodeInfo &) = delete; - InnerNodeInfo &operator=(const InnerNodeInfo &) = delete; - ~InnerNodeInfo(); - ATTRIBUTED_DEPRECATED(InnerNodeInfo &SetName(const char *)) - InnerNodeInfo &SetName(const std::string &name); - InnerNodeInfo &SetName(const char *name); - ATTRIBUTED_DEPRECATED(InnerNodeInfo &SetType(const char *)) - InnerNodeInfo &SetType(const std::string &type); - InnerNodeInfo &SetType(const char *type); - ATTRIBUTED_DEPRECATED(InnerNodeInfo &InsertInput(const char *, int32_t)) - InnerNodeInfo &InsertInput(const std::string &input_node, int32_t peer_out_idx); - InnerNodeInfo &InsertInput(const char *input_node, int32_t peer_out_idx); - ATTRIBUTED_DEPRECATED(InnerNodeInfo &InsertOutput(const char *, int32_t)) - InnerNodeInfo &InsertOutput(const std::string &output_node, int32_t peer_in_idx); - InnerNodeInfo &InsertOutput(const char *output_node, int32_t peer_in_idx); - ge::graphStatus BuildInnerNode(); - ATTRIBUTED_DEPRECATED(ge::graphStatus SetInputFormat(const char *, const char *)) - ge::graphStatus SetInputFormat(const std::string &input_name, const std::string &format); - ge::graphStatus SetInputFormat(const char *input_name, const char *format); - ATTRIBUTED_DEPRECATED(ge::graphStatus SetOutputFormat(const char *, const char *)) - ge::graphStatus SetOutputFormat(const std::string &output_name, const std::string &format); - ge::graphStatus SetOutputFormat(const char *output_name, const char *format); - ATTRIBUTED_DEPRECATED(ge::graphStatus SetDynamicInputFormat(const char *, uint32_t index, const char *)) - ge::graphStatus SetDynamicInputFormat(const std::string &input_name, uint32_t index, const std::string &format); - ge::graphStatus SetDynamicInputFormat(const char *input_name, uint32_t index, const char *format); - ATTRIBUTED_DEPRECATED(ge::graphStatus SetDynamicOutputFormat(const char *, uint32_t, const char *)) - ge::graphStatus SetDynamicOutputFormat(const std::string &output_name, uint32_t index, const std::string &format); - ge::graphStatus SetDynamicOutputFormat(const char *output_name, uint32_t index, const char *format); - ge::Operator *MutableOperator(); - ATTRIBUTED_DEPRECATED(ge::graphStatus GetName(AscendString &) const) - std::string GetName() const; - ge::graphStatus GetName(AscendString &name) const; - ATTRIBUTED_DEPRECATED(ge::graphStatus GetType(AscendString &) const) - std::string GetType() const; - ge::graphStatus GetType(AscendString &type) const; - ATTRIBUTED_DEPRECATED(ge::graphStatus GetInputs(std::vector> &) const) - std::vector> GetInputs() const; - ge::graphStatus GetInputs(std::vector> &inputs) const; - ATTRIBUTED_DEPRECATED(ge::graphStatus GetOutputs(std::vector> &) const) - std::vector> GetOutputs() const; - ge::graphStatus GetOutputs(std::vector> &outputs) const; - private: - class InnerNodeInfoImpl; - std::unique_ptr impl_; - }; - ATTRIBUTED_DEPRECATED(InnerNodeInfo *AddInnerNode(const char *, const char *)) - InnerNodeInfo *AddInnerNode(const std::string &name, const std::string &type); - InnerNodeInfo *AddInnerNode(const char *name, const char *type); - InnerNodeInfo *MutableRecentInnerNode(); - InnerNodeInfo *MutableInnerNode(uint32_t index); - ge::graphStatus CheckInnerNodesInfo(); - - private: - class FusionScopesResultImpl; - std::unique_ptr impl_; - friend class ScopeGraph; - friend class ScopeBasePass; - friend class TensorFlowModelParser; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeTree { - public: - ScopeTree(); - Status Init(); - ScopeTree(const ScopeTree &scopetree) = delete; - ScopeTree &operator=(const ScopeTree &scopetree) = delete; - ~ScopeTree(); - - const std::vector &GetAllScopes() const; - - private: - class ScopeTreeImpl; - std::unique_ptr impl_; - friend class ScopeGraph; - friend class ScopeBasePass; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeGraph { - public: - ScopeGraph(); - Status Init(); - ScopeGraph(const ScopeGraph &scope_graph) = delete; - ScopeGraph &operator=(const ScopeGraph &scope_graph) = delete; - ~ScopeGraph(); - - const ScopeTree *GetScopeTree() const; - ATTRIBUTED_DEPRECATED(Status GetNodesMap(std::unordered_map &) const) - const std::unordered_map &GetNodesMap() const; - Status GetNodesMap(std::unordered_map &nodes_map) const; - - private: - class ScopeGraphImpl; - std::unique_ptr impl_; - friend class ScopePassManager; - friend class ScopeBasePass; - friend class TensorFlowModelParser; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeAttrValue { - public: - ScopeAttrValue(); - ScopeAttrValue(ScopeAttrValue const &attr_value); - ScopeAttrValue &operator=(ScopeAttrValue const &attr_value); - ~ScopeAttrValue(); - - void SetIntValue(int64_t value); - void SetFloatValue(float value); - ATTRIBUTED_DEPRECATED(void SetStringValue(const char *)) - void SetStringValue(std::string value); - void SetStringValue(const char *value); - void SetBoolValue(bool value); - - private: - class ScopeAttrValueImpl; - std::unique_ptr impl_; - friend class NodeAttrFeature; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeBaseFeature { - public: - virtual bool Match(const Scope *scope) = 0; - virtual ~ScopeBaseFeature(){}; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY NodeOpTypeFeature : ScopeBaseFeature { - public: - ATTRIBUTED_DEPRECATED(NodeOpTypeFeature(const char *, int, int)) - NodeOpTypeFeature(std::string nodeType, int num, int step = 0); - NodeOpTypeFeature(const char *node_type, int num, int step = 0); - NodeOpTypeFeature(NodeOpTypeFeature const &feature); - NodeOpTypeFeature &operator=(NodeOpTypeFeature const &feature); - ~NodeOpTypeFeature(); - bool Match(const Scope *scope) override; - - private: - class NodeOpTypeFeatureImpl; - std::unique_ptr impl_; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY NodeAttrFeature : ScopeBaseFeature { - public: - ATTRIBUTED_DEPRECATED(NodeAttrFeature(const char *, const char *, ge::DataType, ScopeAttrValue &)) - NodeAttrFeature(std::string nodeType, std::string attr_name, - ge::DataType datatype, ScopeAttrValue &attr_value); - NodeAttrFeature(const char *node_type, const char *attr_name, - ge::DataType datatype, ScopeAttrValue &attr_value); - NodeAttrFeature(NodeAttrFeature const &feature); - NodeAttrFeature &operator=(NodeAttrFeature const &feature); - ~NodeAttrFeature(); - bool Match(const Scope *scope) override; - - private: - class NodeAttrFeatureImpl; - std::unique_ptr impl_; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeFeature : ScopeBaseFeature { - public: - ATTRIBUTED_DEPRECATED(ScopeFeature(const char *, int32_t, const char *, const char *, int)) - ScopeFeature(std::string sub_type, int32_t num, std::string suffix = "", - std::string sub_scope_mask = "", int step = 0); - ScopeFeature(const char *sub_type, int32_t num, const char *suffix, - const char *sub_scope_mask, int step = 0); - ScopeFeature(ScopeFeature const &feature); - ScopeFeature &operator=(ScopeFeature const &feature); - ~ScopeFeature(); - bool Match(const Scope *scope) override; - - private: - class ScopeFeatureImpl; - std::unique_ptr impl_; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopePattern { - public: - ScopePattern(); - ~ScopePattern(); - ATTRIBUTED_DEPRECATED(ScopePattern &SetSubType(const char *)) - ScopePattern &SetSubType(const std::string &sub_type); - ScopePattern &SetSubType(const char *sub_type); - ScopePattern &AddNodeOpTypeFeature(NodeOpTypeFeature feature); - ScopePattern &AddNodeAttrFeature(NodeAttrFeature feature); - ScopePattern &AddScopeFeature(ScopeFeature feature); - - private: - class ScopePatternImpl; - std::unique_ptr impl_; - friend class ScopeBasePass; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopesResult { - public: - ScopesResult(); - ScopesResult(ScopesResult const &result); - ScopesResult &operator=(ScopesResult const &result); - ~ScopesResult(); - - void SetScopes(std::vector &scopes); - void SetNodes(std::vector &nodes); - - private: - class ScopesResultImpl; - std::unique_ptr impl_; - friend class ScopeBasePass; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeBasePass { - public: - ScopeBasePass(); - virtual ~ScopeBasePass(); - - protected: - // Subclasses implement respective fusion strategies and build the Patterns - virtual std::vector DefinePatterns() = 0; - // Define the name of the scope pass - virtual std::string PassName() = 0; - // Subclasses implement respective multi-scope or operator fusion methods across scopes - virtual Status LastMatchScopesAndOPs(std::shared_ptr &scope_graph, - std::vector &results) = 0; - // Subclasses implement their own results and set the input and output of the final fusion operator - virtual void GenerateFusionResult(const std::vector &scopes, FusionScopesResult *fusion_rlt) = 0; - - private: - class ScopeBasePassImpl; - std::unique_ptr impl_; - friend class ge::ScopePassManager; - friend class ScopeBasePassImpl; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeFusionPassRegistry { - public: - using CreateFn = ScopeBasePass *(*)(); - ~ScopeFusionPassRegistry(); - - static ScopeFusionPassRegistry& GetInstance(); - - ATTRIBUTED_DEPRECATED(void RegisterScopeFusionPass(const char *, CreateFn, bool)) - void RegisterScopeFusionPass(const std::string &pass_name, CreateFn create_fn, bool is_general); - - void RegisterScopeFusionPass(const char *pass_name, CreateFn create_fn, bool is_general); - - private: - ScopeFusionPassRegistry(); - class ScopeFusionPassRegistryImpl; - /*lint -e148*/ - std::unique_ptr impl_; - friend class TensorFlowModelParser; -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeUtil { - public: - ATTRIBUTED_DEPRECATED(static AscendString StringReplaceAll(const char *, const char *, const char *)) - static std::string StringReplaceAll(std::string str, const std::string &old_value, const std::string &new_value); - static AscendString StringReplaceAll(const char *str, const char *old_value, const char *new_value); - static void FreeScopePatterns(ScopeFusionPatterns &patterns); - static void FreeOneBatchPattern(std::vector &one_batch_pattern); -}; - -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeFusionPassRegistrar { - public: - ScopeFusionPassRegistrar(const char *pass_name, ScopeBasePass *(*create_fn)(), bool is_general); - ~ScopeFusionPassRegistrar() {} -}; - -#define REGISTER_SCOPE_FUSION_PASS(pass_name, scope_pass, is_general) \ - REGISTER_SCOPE_FUSION_PASS_UNIQ_HELPER(__COUNTER__, pass_name, scope_pass, is_general) - -#define REGISTER_SCOPE_FUSION_PASS_UNIQ_HELPER(ctr, pass_name, scope_pass, is_general) \ - REGISTER_SCOPE_FUSION_PASS_UNIQ(ctr, pass_name, scope_pass, is_general) - -#define REGISTER_SCOPE_FUSION_PASS_UNIQ(ctr, pass_name, scope_pass, is_general) \ - static ::ge::ScopeFusionPassRegistrar register_scope_fusion_pass##ctr __attribute__((unused)) = \ - ::ge::ScopeFusionPassRegistrar(pass_name, \ - []() -> ::ge::ScopeBasePass * { return new (std::nothrow) scope_pass(); }, \ - is_general) -} // namespace ge - -#endif // EXTERNAL_REGISTER_SCOPE_SCOPE_FUSION_PASS_REGISTER_H_ diff --git a/inc/metadef/inc/graph/aligned_ptr.h b/inc/metadef/inc/graph/aligned_ptr.h deleted file mode 100644 index 3924f3d85..000000000 --- a/inc/metadef/inc/graph/aligned_ptr.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_ALIGNED_PTR_H_ -#define GE_ALIGNED_PTR_H_ - -#include -#include - -namespace ge { -class AlignedPtr { - public: - using Deleter = std::function; - using Allocator = std::function &base_addr)>; - explicit AlignedPtr(size_t buffer_size, size_t alignment = 16); - AlignedPtr() = default; - ~AlignedPtr() = default; - AlignedPtr(const AlignedPtr &) = delete; - AlignedPtr(AlignedPtr &&) = delete; - AlignedPtr &operator=(const AlignedPtr &) = delete; - AlignedPtr &operator=(AlignedPtr &&) = delete; - - const uint8_t *Get() const { return aligned_addr_; } - uint8_t *MutableGet() { return aligned_addr_; } - std::unique_ptr Reset(); - - static std::shared_ptr BuildFromAllocFunc(const AlignedPtr::Allocator &alloc_func, - const AlignedPtr::Deleter &delete_func); - static std::shared_ptr BuildFromData(uint8_t *data, - const AlignedPtr::Deleter &delete_func); /*lint !e148*/ - private: - std::unique_ptr base_ = nullptr; - uint8_t *aligned_addr_ = nullptr; -}; -} // namespace ge -#endif//GE_ALIGNED_PTR_H_ diff --git a/inc/metadef/inc/graph/anchor.h b/inc/metadef/inc/graph/anchor.h deleted file mode 100644 index 25a159864..000000000 --- a/inc/metadef/inc/graph/anchor.h +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_ANCHOR_H_ -#define INC_GRAPH_ANCHOR_H_ - -#include "graph/compiler_options.h" - -#include -#include -#include -#include "graph/ge_error_codes.h" -#include "graph/range_vistor.h" -#include "graph/types.h" - -namespace ge { -enum AnchorStatus { - ANCHOR_SUSPEND = 0, // dat null - ANCHOR_CONST = 1, - ANCHOR_DATA = 2, // Effective - ANCHOR_RESERVED = 3 -}; -using std::string; -using std::vector; - -class Node; - -using NodePtr = std::shared_ptr; - -class Edge; - -using EdgePtr = std::shared_ptr; - -class Anchor; - -using AnchorPtr = std::shared_ptr; - -class DataAnchor; - -using DataAnchorPtr = std::shared_ptr; - -class InDataAnchor; - -using InDataAnchorPtr = std::shared_ptr; - -class OutDataAnchor; - -using OutDataAnchorPtr = std::shared_ptr; - -class ControlAnchor; - -using ControlAnchorPtr = std::shared_ptr; - -class InControlAnchor; - -using InControlAnchorPtr = std::shared_ptr; - -class OutControlAnchor; - -using OutControlAnchorPtr = std::shared_ptr; - -using ConstAnchor = const Anchor; - -class AnchorImpl; -using AnchorImplPtr = std::shared_ptr; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Anchor : public std::enable_shared_from_this { - friend class AnchorUtils; - - public: - using TYPE = const char *; - template - using Vistor = RangeVistor>; - - Anchor(const NodePtr& ownerNode, int idx); - - virtual ~Anchor(); - - protected: - // Whether the two anchor is equal - virtual bool Equal(AnchorPtr anchor) const = 0; - virtual bool IsTypeOf(TYPE type) const; - - public: - // Get all peer anchors connected to current anchor - Vistor GetPeerAnchors() const; - // Get peer anchor size - size_t GetPeerAnchorsSize() const; - // Get first peer anchor - AnchorPtr GetFirstPeerAnchor() const; - - // Get the anchor belong to which node - NodePtr GetOwnerNode() const; - - // Remove all links with the anchor - void UnlinkAll() noexcept; - - // Remove link with the given anchor - graphStatus Unlink(const AnchorPtr &peer); - - // Replace peer with new peers - graphStatus ReplacePeer(const AnchorPtr &oldPeer, const AnchorPtr &firstPeer, const AnchorPtr &secondPeer); - - // Judge if the anchor is linked with the given anchor - bool IsLinkedWith(const AnchorPtr &peer); - - // Get anchor index of the node - int GetIdx() const; - - // set anchor index of the node - void SetIdx(int index); - - protected: - AnchorImplPtr impl_; - template - static Anchor::TYPE TypeOf() { - static_assert(std::is_base_of::value, "T must be a Anchor!"); - return METADEF_FUNCTION_IDENTIFIER; - } - - public: - template - static std::shared_ptr DynamicAnchorCast(AnchorPtr anchorPtr) { - static_assert(std::is_base_of::value, "T must be a Anchor!"); - if (anchorPtr == nullptr || !anchorPtr->IsTypeOf()) { - return nullptr; - } - return std::static_pointer_cast(anchorPtr); - } - - template - bool IsTypeOf() { - return IsTypeOf(TypeOf()); - } -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY DataAnchor : public Anchor { - friend class AnchorUtils; - - public: - explicit DataAnchor(const NodePtr &ownerNode, int idx); - - virtual ~DataAnchor() = default; - - protected: - bool IsTypeOf(TYPE type) const override; - - private: - Format format_{FORMAT_ND}; - AnchorStatus status_{ANCHOR_SUSPEND}; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InDataAnchor : public DataAnchor { - friend class OutDataAnchor; - - friend class OutControlAnchor; - - public: - explicit InDataAnchor(const NodePtr &ownerNode, int idx); - - virtual ~InDataAnchor() = default; - - // Get source out data anchor - OutDataAnchorPtr GetPeerOutAnchor() const; - - // Build connection from OutDataAnchor to InDataAnchor - graphStatus LinkFrom(const OutDataAnchorPtr &src); - - protected: - bool Equal(AnchorPtr anchor) const override; - bool IsTypeOf(TYPE type) const override; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY OutDataAnchor : public DataAnchor { - friend class InDataAnchor; - - friend class AnchorUtils; - - public: - template - using Vistor = RangeVistor>; - - explicit OutDataAnchor(const NodePtr &ownerNode, int idx); - - virtual ~OutDataAnchor() = default; - // Get dst in data anchor(one or more) - Vistor GetPeerInDataAnchors() const; - uint32_t GetPeerInDataNodesSize() const; - - // Get dst in control anchor(one or more) - Vistor GetPeerInControlAnchors() const; - - // Build connection from OutDataAnchor to InDataAnchor - graphStatus LinkTo(const InDataAnchorPtr &dest); - - // Build connection from OutDataAnchor to InControlAnchor - graphStatus LinkTo(const InControlAnchorPtr &dest); - - protected: - bool Equal(AnchorPtr anchor) const override; - bool IsTypeOf(TYPE type) const override; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ControlAnchor : public Anchor { - public: - explicit ControlAnchor(const NodePtr &ownerNode); - - explicit ControlAnchor(const NodePtr &ownerNode, int idx); - - virtual ~ControlAnchor() = default; - - protected: - bool IsTypeOf(TYPE type) const override; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InControlAnchor : public ControlAnchor { - friend class OutControlAnchor; - - friend class OutDataAnchor; - - public: - explicit InControlAnchor(const NodePtr &ownerNode); - - explicit InControlAnchor(const NodePtr &ownerNode, int idx); - - virtual ~InControlAnchor() = default; - - // Get source out control anchors - Vistor GetPeerOutControlAnchors() const; - bool IsPeerOutAnchorsEmpty() const; - - // Get source out data anchors - Vistor GetPeerOutDataAnchors() const; - - // Build connection from OutControlAnchor to InControlAnchor - graphStatus LinkFrom(const OutControlAnchorPtr &src); - - protected: - bool Equal(AnchorPtr anchor) const override; - bool IsTypeOf(TYPE type) const override; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY OutControlAnchor : public ControlAnchor { - friend class InControlAnchor; - - public: - template - using Vistor = RangeVistor>; - - explicit OutControlAnchor(const NodePtr &ownerNode); - - explicit OutControlAnchor(const NodePtr &ownerNode, int idx); - - virtual ~OutControlAnchor() = default; - - // Get dst in control anchor(one or more) - Vistor GetPeerInControlAnchors() const; - // Get dst data anchor in control anchor(one or more) - Vistor GetPeerInDataAnchors() const; - - // Build connection from OutControlAnchor to InControlAnchor - graphStatus LinkTo(const InControlAnchorPtr &dest); - // Build connection from OutDataAnchor to InDataAnchor - graphStatus LinkTo(const InDataAnchorPtr &dest); - - protected: - bool Equal(AnchorPtr anchor) const override; - bool IsTypeOf(TYPE type) const override; -}; -} // namespace ge -#endif // INC_GRAPH_ANCHOR_H_ diff --git a/inc/metadef/inc/graph/any_value.h b/inc/metadef/inc/graph/any_value.h deleted file mode 100644 index 9d68fecf5..000000000 --- a/inc/metadef/inc/graph/any_value.h +++ /dev/null @@ -1,341 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef EXECUTE_GRAPH_ANY_VALUE_H -#define EXECUTE_GRAPH_ANY_VALUE_H -#include -#include -#include - -#include "graph/types.h" -#include "type_utils.h" -#include "external/graph/ge_error_codes.h" -namespace ge { -class Buffer; -class GeTensor; -class GeTensorDesc; -class ComputeGraph; -class NamedAttrs; -using GeTensorPtr = std::shared_ptr; -using ComputeGraphPtr = std::shared_ptr; -class AnyValue { -public: - // 后续删除,新增代码请勿使用这堆using - using INT = int64_t; - using FLOAT = float; - using BOOL = bool; - using STR = std::string; - using TENSOR = GeTensorPtr; - using TENSOR_DESC = GeTensorDesc; - using GRAPH = ComputeGraphPtr; - using BYTES = Buffer; - using NAMED_ATTRS = ge::NamedAttrs; - using DATA_TYPE = ge::DataType; - - using LIST_INT = std::vector; - using LIST_FLOAT = std::vector; - using LIST_BOOL = std::vector; - using LIST_STR = std::vector; - using LIST_TENSOR = std::vector; - using LIST_TENSOR_DESC = std::vector; - using LIST_GRAPH = std::vector; - using LIST_BYTES = std::vector; - using LIST_NAMED_ATTRS = std::vector; - using LIST_DATA_TYPE = std::vector; - using LIST_LIST_INT = std::vector>; - using LIST_LIST_FLOAT = std::vector>; - using NamedAttrs = ge::NamedAttrs; - // public type definitions - // 这堆ValueType的预定义本质上是反向依赖,AnyValue不应该反向依赖ComputeGraph等数据结构 - // 后续整改掉 - enum ValueType { - VT_NONE = 0, - VT_STRING, - VT_FLOAT, - VT_BOOL, - VT_INT, - VT_TENSOR_DESC, - VT_TENSOR, - VT_BYTES, - VT_GRAPH, - VT_NAMED_ATTRS, - VT_LIST_LIST_INT, - VT_DATA_TYPE, - VT_LIST_LIST_FLOAT, - - VT_LIST_BASE = 1000, - VT_LIST_STRING = VT_LIST_BASE + VT_STRING, - VT_LIST_FLOAT = VT_LIST_BASE + VT_FLOAT, - VT_LIST_BOOL = VT_LIST_BASE + VT_BOOL, - VT_LIST_INT = VT_LIST_BASE + VT_INT, - VT_LIST_TENSOR_DESC = VT_LIST_BASE + VT_TENSOR_DESC, - VT_LIST_TENSOR = VT_LIST_BASE + VT_TENSOR, - VT_LIST_BYTES = VT_LIST_BASE + VT_BYTES, - VT_LIST_GRAPH = VT_LIST_BASE + VT_GRAPH, - VT_LIST_NAMED_ATTRS = VT_LIST_BASE + VT_NAMED_ATTRS, - VT_LIST_DATA_TYPE = VT_LIST_BASE + VT_DATA_TYPE, - }; - -public: - AnyValue() = default; - AnyValue(AnyValue &&other) noexcept; - AnyValue(const AnyValue &other) { - if (!other.IsEmpty()) { - other.operate_(kOpClone, &other, this); - } - } - AnyValue &operator=(AnyValue &&other) noexcept; - AnyValue &operator=(const AnyValue &other); - bool operator==(const AnyValue &other) const noexcept; - ~AnyValue() { - Clear(); - } - - template - static AnyValue CreateFrom(T &&value); - // 如果只有万能引用,那么Set(左值)这种调用方法会出错,因此有了这个函数 - template - static AnyValue CreateFrom(const T &value); - - template - graphStatus SetValue(T &&value); - - // 如果只有万能引用,那么Set(左值)这种调用方法会出错,因此有了这个函数 - template - graphStatus SetValue(const T &value); - - template - graphStatus SetValue(std::initializer_list val); - - template - graphStatus GetValue(T &value) const; - template - const T *Get() const; - template - T *MutableGet(); - - template - bool SameType() const noexcept; - - void Swap(AnyValue &other) noexcept; - - void Clear() { - if (operate_ == nullptr) { - return; - } - operate_(kOpClear, nullptr, this); - } - - bool IsEmpty() const noexcept { - return operate_ == nullptr; - } - - ValueType GetValueType() const noexcept; - TypeId GetValueTypeId() const noexcept; - AnyValue Copy() const; - -private: - template - void InnerSet(T &&value); - const void *GetAddr() const; - -private: - enum OperateType { kOpClear, kOpGetAddr, kOpClone, kOpMove, kGetTypeId, kOperateTypeEnd }; - - template - struct InlineOperations { - static void Operate(OperateType ot, const AnyValue *av, void *out); - static void Construct(const T &value, AnyValue *av); - static void Construct(T &&value, AnyValue *av); - }; - - template - struct AllocateOperations { - static void Operate(OperateType ot, const AnyValue *av, void *out); - static void Construct(const T &value, AnyValue *av); - static void Construct(T &&value, AnyValue *av); - }; - -private: - using ValueHolder = union { - void *pointer; - std::aligned_storage::type inline_buf; - }; - ValueHolder holder_{}; - - void (*operate_)(OperateType ot, const AnyValue *av, void *out){nullptr}; -}; -using GeAttrValue = AnyValue; - -template -void AnyValue::AllocateOperations::Construct(const T &value, AnyValue *av) { - av->holder_.pointer = new (std::nothrow) T(value); - av->operate_ = AnyValue::AllocateOperations::Operate; -} -template -void AnyValue::AllocateOperations::Construct(T &&value, AnyValue *av) { - av->holder_.pointer = ::new (std::nothrow) T(std::forward(value)); - av->operate_ = AnyValue::AllocateOperations::Operate; -} -template -void AnyValue::AllocateOperations::Operate(AnyValue::OperateType ot, const AnyValue *av, void *out) { - switch (ot) { - case kOpClear: { - auto av_p = reinterpret_cast(out); - delete reinterpret_cast(av_p->holder_.pointer); - av_p->holder_.pointer = nullptr; - av_p->operate_ = nullptr; - break; - } - case kOpGetAddr: - *reinterpret_cast(out) = const_cast(av->holder_.pointer); - break; - case kOpClone: - reinterpret_cast(out)->holder_.pointer = - new (std::nothrow) T(*reinterpret_cast(av->holder_.pointer)); - reinterpret_cast(out)->operate_ = av->operate_; - break; - case kOpMove: { - auto av_p = reinterpret_cast(out); - av_p->holder_.pointer = av->holder_.pointer; - av_p->operate_ = av->operate_; - const_cast(av)->holder_.pointer = nullptr; - break; - } - case kGetTypeId: - *reinterpret_cast(out) = GetTypeId(); - break; - default: - break; - } -} -template -void AnyValue::InlineOperations::Construct(const T &value, AnyValue *av) { - ::new (&(av->holder_.inline_buf)) T(value); - av->operate_ = AnyValue::InlineOperations::Operate; -} -template -void AnyValue::InlineOperations::Construct(T &&value, AnyValue *av) { - Construct(value, av); -} -template -void AnyValue::InlineOperations::Operate(AnyValue::OperateType ot, const AnyValue *av, void *out) { - switch (ot) { - case kOpClear: { - auto av_p = reinterpret_cast(out); - reinterpret_cast(&av_p->holder_.inline_buf)->~T(); - av_p->operate_ = nullptr; - break; - } - case kOpGetAddr: - *reinterpret_cast(out) = const_cast(reinterpret_cast(&av->holder_.inline_buf)); - break; - case kOpClone: { - auto av_p = reinterpret_cast(out); - new (&av_p->holder_.inline_buf) T(*reinterpret_cast(&av->holder_.inline_buf)); - av_p->operate_ = av->operate_; - break; - } - case kOpMove: { - auto av_p = reinterpret_cast(out); - auto moved_t_p = const_cast(reinterpret_cast(&av->holder_.inline_buf)); - new (&av_p->holder_.inline_buf) T(std::move(*moved_t_p)); - av_p->operate_ = av->operate_; - break; - } - case kGetTypeId: - *reinterpret_cast(out) = GetTypeId(); - break; - default: - break; - } -} - -template -AnyValue AnyValue::CreateFrom(T &&value) { - AnyValue av; - av.InnerSet(std::forward(value)); - return av; -} -template -AnyValue AnyValue::CreateFrom(const T &value) { - AnyValue av; - av.InnerSet(value); - return av; -} -template -void AnyValue::InnerSet(T &&value) { - using PureT = typename std::remove_cv::type>::type; - using Inline = std::integral_constant; - using Operations = - typename std::conditional, AnyValue::AllocateOperations>::type; - - Operations::Construct(std::forward(value), this); -} -template -graphStatus AnyValue::SetValue(T &&value) { - Clear(); - InnerSet(std::forward(value)); - return GRAPH_SUCCESS; -} -template -graphStatus AnyValue::SetValue(const T &value) { - Clear(); - InnerSet(value); - return GRAPH_SUCCESS; -} - -template -graphStatus AnyValue::SetValue(std::initializer_list values) { - Clear(); - InnerSet(std::vector(std::move(values))); - return GRAPH_SUCCESS; -} -template -const T *AnyValue::Get() const { - if (!SameType()) { - return nullptr; - } - if (IsEmpty()) { - return nullptr; - } - return reinterpret_cast(GetAddr()); -} -template -graphStatus AnyValue::GetValue(T &value) const { - auto p = Get(); - if (p == nullptr) { - return GRAPH_FAILED; - } - value = *p; - return GRAPH_SUCCESS; -} -template -T *AnyValue::MutableGet() { - return const_cast(Get()); -} -template -bool AnyValue::SameType() const noexcept { - if (operate_ == nullptr) { - return false; - } - TypeId tid = kInvalidTypeId; - operate_(kGetTypeId, this, &tid); - return tid == GetTypeId(); -} -} // namespace ge - -#endif // EXECUTE_GRAPH_ANY_VALUE_H diff --git a/inc/metadef/inc/graph/ascend_limits.h b/inc/metadef/inc/graph/ascend_limits.h deleted file mode 100644 index ccc85cb59..000000000 --- a/inc/metadef/inc/graph/ascend_limits.h +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef METADEF_CXX_ASCEND_LIMITS_H -#define METADEF_CXX_ASCEND_LIMITS_H -namespace ge { -constexpr int kDefaultMaxAttrNum = 6; -constexpr int kDefaultMaxInputNum = 8; -constexpr int kDefaultMaxOutputNum = 8; -constexpr int kDefaultMaxRank = 8; -} -#endif // METADEF_CXX_ASCEND_LIMITS_H diff --git a/inc/metadef/inc/graph/attr_store.h b/inc/metadef/inc/graph/attr_store.h deleted file mode 100644 index 69c8e0fab..000000000 --- a/inc/metadef/inc/graph/attr_store.h +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef EXECUTE_GRAPH_ATTR_STORE_H -#define EXECUTE_GRAPH_ATTR_STORE_H -#include -#include -#include -#include - -#include "any_value.h" - -namespace ge { -using AttrId = uint64_t; -using AttrSubId = uint32_t; -enum AttrType { - kAttrPredefinedInIr, // IR预定义的属性 - kAttrGeneral, // 通用属性 - kAttrTypeEnd -}; -constexpr inline uint32_t GetAttrType(AttrId id) { - return id >> 32; -} -constexpr inline uint32_t GetSubAttrId(AttrId id) { - return id & 0xffffffff; -} -constexpr inline AttrId GetAttrId(uint32_t type, uint32_t sub_id) { - return static_cast(type) << 32 | static_cast(sub_id); -} -constexpr AttrId kInvalidAttrId = GetAttrId(0xffffffff, 0); - -class AttrStore { -public: - static AttrStore Create(size_t pre_defined_attr_count); - - template - bool Set(AttrId attr_id, T &&value); - template - bool Set(AttrId attr_id, const T &value); - template - bool SetByName(const std::string &name, T &&value); - template - bool SetByName(const std::string &name, const T &value); - - template - const T *Get(AttrId attr_id) const; - template - T *MutableGet(AttrId attr_id); - template - const T *GetByName(const std::string &name) const; - template - T *MutableGetByName(const std::string &name); - - AttrId GetIdByName(const std::string &name) const noexcept; - void SetNameAndId(std::string name, AttrId id); - - bool Exists(AttrId attr_id) const noexcept; - bool Exists(const std::string &name) const noexcept; - - bool Delete(const std::string &name); - - void Swap(AttrStore &other); - bool SetAnyValueByName(const std::string &name, const AnyValue &value); - - // unordered版本更好,为了兼容老版本接口,仍然用set和map,不论用哪种数据结构,这都是非常低效的接口 - std::set GetAllAttrNames() const; - std::map GetAllAttrs() const; - - AnyValue *MutableAnyValue(const std::string &name) noexcept; - AnyValue *GetOrCreateAnyValue(const std::string &name); - const AnyValue *GetAnyValue(const std::string &name) const noexcept; - -private: - AnyValue *MutableAnyValue(AttrId attr_id) noexcept; - AnyValue *GetOrCreateAnyValue(AttrId attr_id); - const AnyValue *GetAnyValue(AttrId attr_id) const noexcept; - -private: - constexpr static int kDefaultMaxAttrCount = 8; - - class PreDefinedAttrStore { - public: - bool Exists(AttrSubId index) const noexcept; - bool Delete(AttrSubId index); - void Swap(PreDefinedAttrStore &other); - - AnyValue *GetOrCreateAnyValue(AttrSubId index); - AnyValue *MutableAnyValue(AttrSubId index) noexcept; - const AnyValue *GetAnyValue(AttrSubId index) const noexcept; - - void Resize(size_t s); - - private: - std::vector attrs_; - }; - - class CustomDefinedAttrStore { - public: - bool Exists(const std::string &name) const noexcept; - bool Delete(const std::string &name); - void Swap(CustomDefinedAttrStore &other); - - AnyValue *GetOrCreateAnyValue(const std::string &name); - AnyValue *MutableAnyValue(const std::string &name) noexcept; - const AnyValue *GetAnyValue(const std::string &name) const noexcept; - - void GetAllNames(std::set &names) const; - void GetAllAttrs(std::map &names_to_attr) const; - - private: - std::unordered_map attrs_; - }; - -private: - std::unordered_map names_to_id_; - // 更好的办法是定义一个虚基类、派生出两个子类,然后保存两个子类的指针:`std::array, kAttrTypeEnd>` - // 然后根据不同的SubAttr类型,调用对应子类的函数。但是这么做会导致创建AttrStore时,总会带有两次子类实例堆申请的开销, - // 为了减少堆内存申请,直接将子类平铺在成员变量上。 - PreDefinedAttrStore pre_defined_attrs_; - CustomDefinedAttrStore general_attrs_; -}; - -#define SET_IMPL(key, value) \ - auto v = GetOrCreateAnyValue(key); \ - if (v == nullptr) { \ - return false; \ - } \ - v->SetValue(value); \ - return true; - -#define SET_IMPL_RVALUE(key, value) \ - auto v = GetOrCreateAnyValue(key); \ - if (v == nullptr) { \ - return false; \ - } \ - v->SetValue(std::forward(value)); \ - return true; - -template -bool AttrStore::Set(AttrId attr_id, const T &value) { - SET_IMPL(attr_id, value) -} -template -bool AttrStore::Set(AttrId attr_id, T &&value) { - SET_IMPL_RVALUE(attr_id, value) -} -template -bool AttrStore::SetByName(const std::string &name, T &&value) { - SET_IMPL_RVALUE(name, value) -} -template -bool AttrStore::SetByName(const std::string &name, const T &value) { - SET_IMPL(name, value) -} - -#define GET_IMPL(key) \ - auto v = GetAnyValue(key); \ - if (v == nullptr) { \ - return nullptr; \ - } \ - return v->Get(); - -template -const T *AttrStore::Get(AttrId attr_id) const { - GET_IMPL(attr_id) -} -template -const T *AttrStore::GetByName(const std::string &name) const { - GET_IMPL(name) -} - -#define MUTABLE_IMPL(key) \ - auto v = MutableAnyValue(key); \ - if (v == nullptr) { \ - return nullptr; \ - } \ - return v->MutableGet(); -template -T *AttrStore::MutableGet(AttrId attr_id) { - MUTABLE_IMPL(attr_id) -} -template -T *AttrStore::MutableGetByName(const std::string &name) { - MUTABLE_IMPL(name) -} - -} // namespace ge - -#endif // EXECUTE_GRAPH_ATTR_STORE_H diff --git a/inc/metadef/inc/graph/attr_value_serializable.h b/inc/metadef/inc/graph/attr_value_serializable.h deleted file mode 100644 index 1918d9c40..000000000 --- a/inc/metadef/inc/graph/attr_value_serializable.h +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_ATTR_VALUE_SERIALIZABLE_H_ -#define INC_GRAPH_ATTR_VALUE_SERIALIZABLE_H_ - -#include -#include -#include "graph/ge_attr_value.h" -#include "graph/compiler_options.h" - -#endif // INC_GRAPH_ATTR_VALUE_SERIALIZABLE_H_ diff --git a/inc/metadef/inc/graph/buffer.h b/inc/metadef/inc/graph/buffer.h deleted file mode 100644 index f9aaf2a14..000000000 --- a/inc/metadef/inc/graph/buffer.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_BUFFER_H_ -#define INC_GRAPH_BUFFER_H_ - -#include -#include -#include -#include -#include "detail/attributes_holder.h" -#include "graph/compiler_options.h" - -namespace ge { - -using std::shared_ptr; - -class BufferImpl; -using BufferImplPtr = std::shared_ptr; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Buffer { -public: - Buffer(); - Buffer(const Buffer &other); - - explicit Buffer(std::size_t bufferSize, std::uint8_t defualtVal = 0); - - ~Buffer(); - - Buffer &operator=(const Buffer &other); - static Buffer CopyFrom(const std::uint8_t *data, std::size_t bufferSize); - - const std::uint8_t *GetData() const; - std::uint8_t *GetData(); - std::size_t GetSize() const; - void ClearBuffer(); - - // For compatibility - const std::uint8_t *data() const; - std::uint8_t *data(); - std::size_t size() const; - void clear(); - uint8_t operator[](size_t index) const; - -private: - BufferImplPtr impl_; - - // Create from protobuf obj - Buffer(const ProtoMsgOwner &protoOnwer, proto::AttrDef *buffer); - Buffer(const ProtoMsgOwner &protoOnwer, std::string *buffer); - - friend class GeAttrValueImp; - friend class GeTensor; - friend class BufferUtils; -}; - -class BufferUtils { -public: - static Buffer CreateShareFrom(const Buffer &other); - static Buffer CreateCopyFrom(const Buffer &other); //lint !e148 - static Buffer CreateCopyFrom(const std::uint8_t *data, std::size_t buffer_size); //lint !e148 - static void ShareFrom(const Buffer &from, Buffer &to); - static void CopyFrom(const Buffer &from, Buffer &to); -}; -} // namespace ge -#endif // INC_GRAPH_BUFFER_H_ diff --git a/inc/metadef/inc/graph/common_error_codes.h b/inc/metadef/inc/graph/common_error_codes.h deleted file mode 100644 index cdf9086f6..000000000 --- a/inc/metadef/inc/graph/common_error_codes.h +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_COMMON_ERROR_CODES_H_ -#define INC_GRAPH_COMMON_ERROR_CODES_H_ - -#include "external/graph/ge_error_codes.h" - -namespace ge { -const graphStatus NO_DEPENDENCE_FUNC = 50331647; -const graphStatus NO_OVERLAP_DIM = 50331646; -const graphStatus NOT_SUPPORT_SLICE = 50331645; -} // namespace ge - -#endif // INC_GRAPH_COMMON_ERROR_CODES_H_ diff --git a/inc/metadef/inc/graph/compiler_options.h b/inc/metadef/inc/graph/compiler_options.h deleted file mode 100644 index f31ad75c3..000000000 --- a/inc/metadef/inc/graph/compiler_options.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_COMPILER_OPTIONS_H_ -#define INC_GRAPH_COMPILER_OPTIONS_H_ - -namespace ge { -#ifdef __GNUC__ -#define METADEF_ATTRIBUTE_UNUSED __attribute__((unused)) -#define METADEF_FUNCTION_IDENTIFIER __PRETTY_FUNCTION__ -#define METADEF_BUILTIN_PREFETCH(args_addr) __builtin_prefetch(args_addr) - -#ifdef HOST_VISIBILITY -#define GE_FUNC_HOST_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_HOST_VISIBILITY -#endif - -#ifdef DEV_VISIBILITY -#define GE_FUNC_DEV_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_DEV_VISIBILITY -#endif - -#else // WINDOWS -#define METADEF_ATTRIBUTE_UNUSED -#define METADEF_FUNCTION_IDENTIFIER __FUNCSIG__ -#define METADEF_BUILTIN_PREFETCH(args_addr) -#define GE_FUNC_HOST_VISIBILITY -#define GE_FUNC_DEV_VISIBILITY -#endif -} // namespace ge - -#endif // INC_GRAPH_COMPILER_OPTIONS_H_ \ No newline at end of file diff --git a/inc/metadef/inc/graph/compute_graph.h b/inc/metadef/inc/graph/compute_graph.h deleted file mode 100644 index 2105fa5a5..000000000 --- a/inc/metadef/inc/graph/compute_graph.h +++ /dev/null @@ -1,277 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_COMPUTE_GRAPH_H_ -#define INC_GRAPH_COMPUTE_GRAPH_H_ - -#include -#include -#include -#include -#include -#include -#include -#include "detail/attributes_holder.h" -#include "graph/ge_attr_value.h" -#include "graph/anchor.h" -#include "graph/node.h" -#include "graph/op_desc.h" -#include "graph/range_vistor.h" - -namespace ge { -using ConstComputeGraph = const ComputeGraph; - -class OperatorImpl; -using OperatorImplPtr = std::shared_ptr; - -class ComputeGraphImpl; -using ComputeGraphImplPtr = std::shared_ptr; - -using NodeFilter = std::function; -using GraphFilter = std::function; - -class ComputeGraph : public std::enable_shared_from_this, public AttrHolder { - friend class GraphUtils; - - public: - template - using Vistor = RangeVistor>; - - explicit ComputeGraph(const std::string &name); - ~ComputeGraph() override; - ComputeGraph(const ge::ComputeGraph&); - ComputeGraph(ge::ComputeGraph&&); - - std::string GetName() const; - void SetName(const std::string &name); - - using AttrHolder::DelAttr; - using AttrHolder::GetAttr; - using AttrHolder::HasAttr; - using AttrHolder::SetAttr; - - size_t GetAllNodesSize() const; - Vistor GetAllNodes() const; - // is_unknown_shape: false, same with GetAllNodes func - // is_unknown_shape: true, same with GetDirectNodes func - Vistor GetNodes(bool is_unknown_shape) const; - Vistor GetNodes(bool is_unknown_shape, const NodeFilter &node_filter, const GraphFilter &graph_filter) const; - size_t GetDirectNodesSize() const; - Vistor GetDirectNode() const; - Vistor GetInputNodes() const; - Vistor GetOutputNodes() const; - - NodePtr FindNode(const std::string &name) const; - NodePtr FindFirstNodeMatchType(const std::string &name) const; - /*lint -e504*/ - // AddNode with NodePtr - NodePtr AddNode(NodePtr node); - NodePtr AddNode(OpDescPtr op); - NodePtr AddNode(OpDescPtr op, int64_t id); // for unserialize - NodePtr AddNodeFront(NodePtr node); - NodePtr AddNodeFront(const OpDescPtr &op); - NodePtr AddInputNode(NodePtr node); - NodePtr AddOutputNode(NodePtr node); - NodePtr AddOutputNodeByIndex(NodePtr node, int32_t index); - - graphStatus RemoveNode(const NodePtr &node); - graphStatus RemoveInputNode(const NodePtr &node); - graphStatus RemoveOutputNode(const NodePtr &node); - graphStatus RemoveConstInput(const NodePtr &node); - - /// Add a subgraph to this graph. The subgraph must has a parent graph and parent node, - /// which means the member functions `SetParentGraph` and `SetParentNode` of the subgraph - /// must be called before add it to the root graph. and subgraph->GetParentNode()->GetOwnerGraph() - /// must equal to subgraph->GetOwnerGraph(). - /// The subgraphs can only be added to a *root graph*. A root graph is a graph without any parent graph. - /// The subgraph's name SHOULD(not must) be the same as the parameter `name` - graphStatus AddSubgraph(const std::string &name, const std::shared_ptr &subgraph); - graphStatus AddSubgraph(const std::shared_ptr &subgraph); - - void RemoveSubgraph(const std::string &name); - void RemoveSubgraph(const std::shared_ptr &subgraph); - - std::shared_ptr GetSubgraph(const std::string &name) const; - std::vector> GetAllSubgraphs() const; - void SetAllSubgraphs(const std::vector> &subgraphs); - - // obsolete - std::shared_ptr AddSubGraph(std::shared_ptr sub_graph); - // obsolete - graphStatus RemoveSubGraph(const std::shared_ptr &sub_graph); - - /// - /// @brief Update input-mapping - /// @param [in] input_mapping : index_of_cur_graph_node_input -> index_of_new_graph_node_input - /// @return graphStatus - /// - graphStatus UpdateInputMapping(const std::map &input_mapping); - - /// - /// @brief Update output-mapping - /// @param [in] output_mapping : index_of_cur_graph_node_output -> index_of_new_graph_node_output - /// @return graphStatus - /// - graphStatus UpdateOutputMapping(const std::map &output_mapping); - - void TopologicalSorting(std::function comp); - graphStatus TopologicalSorting(); - bool IsValid() const; - void InValid(); - void Dump() const; - - void Swap(ComputeGraph &graph); - - graphStatus IsolateNode(const NodePtr &node); - graphStatus Verify(); - graphStatus InferShape(); - graphStatus InferOriginFormat(); - graphStatus InferShapeInNeed(); - graphStatus InsertEventNodes(); - graphStatus InsertGraphEvents(); - bool operator==(const ComputeGraph &r_compute_graph) const; - ComputeGraph& operator=(ge::ComputeGraph compute_graph); - - /*lint +e504*/ - const std::map, std::vector> &GetShareParamLayer() const; - - void SetShareParamLayer(const std::map, std::vector> params_share_map); - - void SetInputsOrder(const std::vector &inputs_order); - - void SetGraphOutNodes(std::map> out_nodes_map); - - void AppendGraphOutNodes(std::map> out_nodes_map); - - shared_ptr GetParentGraph(); - void SetParentGraph(const shared_ptr &parent); - shared_ptr GetParentNode(); - void SetParentNode(const shared_ptr &parent); - - const std::map> &GetGraphOutNodes() const; - void SetOrigGraph(ComputeGraphPtr orig_graph); - - ComputeGraphPtr GetOrigGraph(void); - void SetOutputSize(uint32_t size); - uint32_t GetOutputSize() const; - void SetInputSize(uint32_t size); - uint32_t GetInputSize() const; - - // false: known shape true: unknow shape - bool GetGraphUnknownFlag() const; - void SetGraphUnknownFlag(bool flag); - - /// - /// Set is need train iteration. - /// If set true, it means this graph need to be run iteration some - /// times(according variant "npu_runconfig/iterations_per_loop"). - /// @param need_iteration is need iteration - /// - void SetNeedIteration(bool need_iteration); - - void SetUserDefOutput(const std::string &output_name); - - const std::string GetOutput(); - - /// - /// Get is need train iteration. - /// @return is need iteration - /// - bool GetNeedIteration() const; - - void SetGraphOpName(const std::map &op_name_map); - const std::map &GetGraphOpName() const; - - const std::map &GetAllNodesInfo() const; - - void SetAllNodesInfo(const std::map &nodes); - - void SetGraphOutNodesInfo(std::vector> &out_nodes_info); - void AppendGraphOutNodesInfo(std::vector> &out_nodes_info); - const std::vector> &GetGraphOutNodesInfo() const; - - void SetGraphTargetNodesInfo(const std::vector &target_nodes_info); - const std::vector &GetGraphTargetNodesInfo() const; - - void SetSessionID(uint64_t session_id); - uint64_t GetSessionID() const; - - void SetGraphID(uint32_t graph_id); - uint32_t GetGraphID() const; - - void SaveDataFormat(ge::Format data_format); - ge::Format GetDataFormat() const; - bool IsSummaryGraph() const; - void SetSummaryFlag(bool is_summary_graph); - - /// nodes like : (a) <--- (c) ---> (b) - /// node a and b have only one parent node c, and a is connected to c firstly - /// topo order of DFS is `c, b, a` with `dfs_reverse=false` as default - /// in same case, user could get `c, a, b` with `dfs_reverse=true` - graphStatus TopologicalSortingGraph(bool dfs_reverse = false); - /** - * Move Send Event nodes after it`s control node - * Move Recv Event nodes before it`s control node - */ - graphStatus ReorderEventNodes(); - - protected: - ProtoAttrMap &MutableAttrMap() override; - ConstProtoAttrMap &GetAttrMap() const override; - - private: - graphStatus DFSTopologicalSorting(std::vector &node_vec, std::map &map_in_edge_num, - std::vector &stack, bool reverse); - graphStatus BFSTopologicalSorting(std::vector &node_vec, std::map &map_in_edge_num, - std::deque &stack); - graphStatus CollectBreadthOutNode(const NodePtr &node, std::map &map_in_edge_num, - std::map &breadth_node_map); - - graphStatus SortNodes(std::vector &stack, std::map &mapInEdgeNum); - Vistor AllGraphNodes(std::vector &subgraphs) const; - Vistor GetAllNodes(const NodeFilter &node_filter, const GraphFilter &graph_filter) const; - size_t GetInEdgeSize(const NodePtr &node); - size_t GetOutEdgeSize(const NodePtr &node); - graphStatus RemoveExtraOutEdge(const NodePtr &node); - bool GraphMembersAreEqual(const ComputeGraph &r_graph) const; - bool GraphAttrsAreEqual(const ComputeGraph &r_graph) const; - bool VectorInputNodePtrIsEqual(const std::vector &r_node_ptr_vector, - const std::vector &l_node_ptr_vector) const; - - void SetNodesOwner(); - /** - * To improve preformace of list.size(), we should keep counter on nodes_.size() - * Use follow function to add/erase node from nodes_ - */ - void EraseFromNodeList(const std::list::iterator position); - - void InsertToNodeList(const std::list::iterator position, const NodePtr &node); - - void PushBackToNodeList(const NodePtr &node); - - void EmplaceBackToNodeList(const NodePtr &node); - - void ClearNodeList(); - - friend class ModelSerializeImp; - friend class GraphDebugImp; - friend class OnnxUtils; - friend class TuningUtils; - - ComputeGraphImplPtr impl_; -}; -} // namespace ge -#endif // INC_GRAPH_COMPUTE_GRAPH_H_ diff --git a/inc/metadef/inc/graph/debug/ge_attr_define.h b/inc/metadef/inc/graph/debug/ge_attr_define.h deleted file mode 100644 index bdea32fcf..000000000 --- a/inc/metadef/inc/graph/debug/ge_attr_define.h +++ /dev/null @@ -1,1303 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_DEBUG_GE_ATTR_DEFINE_H_ -#define INC_GRAPH_DEBUG_GE_ATTR_DEFINE_H_ - -/*lint -e618*/ -#include -#include "graph/types.h" -#include "graph/compiler_options.h" - -namespace ge { -// Public attribute -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_FILE_PATH; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FORCE_UNKNOWN_SHAPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_UNKNOWN_SHAPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DYNAMIC_SHAPE_PARTITIONED; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_UNKNOWN_SHAPE_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WORKSPACE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WEIGHT_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_QUANTIZE_FACTOR; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ALPHA; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BETA; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PADMODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PADMODES; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FILTER; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BIAS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BIAS_TERM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_HAS_BIAS_VALUE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PAD; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PADS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PAD_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PAD_MODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SCALE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WINDOWS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_GLOBAL_POOLING; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CEIL_MODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STRIDE_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_RELUMODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STRIDE_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_RELU_FLAG; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ALGO; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FORMAT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STORAGE_FORMAT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ORIGIN_FORMAT_IS_SET; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STORAGE_SHAPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FILTER_FORMAT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LRN_K; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LRN_NORM_REGION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LRN_LOCAL_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LRN_ALPHA; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LRN_BETA; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BROADCAST; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TIDX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TPADDINGS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_IMG_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_IMG_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NET_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NET_W; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TMULTIPLES; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MULTIPLES; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_T; - -extern GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY const std::string ATTR_NAME_N; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TSHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NAN_OPT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AIPP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string NEW_AIPP_CONV_OP; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AIPP_INPUTS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AIPP_OUTPUTS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_DIMS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_AIPP_INPUT_DIMS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DATA_RELATED_AIPP_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DATA_AIPP_DATA_NAME_MAP; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_GRAPH_HAS_BEEN_ADDED; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SESSION_GRAPH_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PARENT_GRAPH_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MULTISHAPE_BATCHLIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MULTISHAPE_BATCHLIST_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_BATCH_NUM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_FORMAT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FRAMEWORK_NODE_DEF; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FRAMEWORK_OP_DEF; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FRAMEWORK_FWK_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FRAMEWORK_FUNC_DEF; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_TENSOR_DESC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_TENSOR_DESC; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INFERRED_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PRED_PERMUTE_DELETED; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IGNORE_PRED_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WEIGHTS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BROACAST_REAL_DIM_CNT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DIM_ALIGN; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_CYCLE_EVENT_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_RTSWITCH_RECV_EVENT_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AUTOMIC_ADD_START; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AUTOMIC_ADD_MEM_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WEIGHTS_DATA; - - - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SESSION_GRAPH_ID; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_BATCH_NUM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AUTOMIC_ADD_START; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AUTOMIC_ADD_MEM_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_LABEL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_RTS_LABEL_NODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_CYCLE_EVENT_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DYNAMIC_OUTPUT_DIMS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_ORIGIN_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ROOT_GRAPH_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ROOT_GRAPH_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NODE_CONNECT_INPUT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NODE_CONNECT_OUTPUT; - -// to be deleted -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_TO_BE_DELETED; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_RESHAPE_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_RESHAPE_FUSION_CONV_PROPOSAL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_RESHAPE_FUSION_CONV_DECODEBBOX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_RESHAPE_FUSION_BOX_TYPE_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_MBOX_LOC_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_MBOX_CONF_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_MBOX_OCR_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_MBOX_FUSION_BOX_TYPE_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_RESHAPE_SLICE_CONCAT_FUSION; - - - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REFINEDET_MBOX_LOC_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REFINEDET_MBOX_CONF_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REFINEDET_MBOX_FUSION_BOX_TYPE_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REFINEDET_RESHAPE_SLICE_CONCAT_FUSION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REFINEDET_PRIOR_BOX_ATTR_VARIANCE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REFINEDET_PRIOR_BOX_ATTR_VARIANCE_NUM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIORBOX_CONCAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string NEED_INFER; - -// _Arg -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INDEX; -// _RetVal -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RETVAL_ATTR_NAME_INDEX; -// Data -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DATA_ATTR_NAME_DATA_TYPE; - -// Send -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SEND_ATTR_EVENT_ID; - -// Recv -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RECV_ATTR_EVENT_ID; - -// Convolution -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COEF; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STRIDE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STRIDES; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DILATION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DILATIONS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_MODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_ALGO; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_GROUP; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_PAD_MODE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_PAD; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_STRIDE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_DILATION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_NUM_OUTPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_KERNEL; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_FILTER; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_BIAS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_RELU_FLAG; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_ADJ; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_TARGET_SHAPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_BEFORE_PAD; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_ATTR_NAME_HAS_BIAS; - -// Pooling -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_NAN_OPT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_PAD_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_GLOBAL_POOLING; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_WINDOW; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_PAD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_STRIDE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_CEIL_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_DATA_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_BEFORE_PAD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOLING_ATTR_NAME_ALGO; - -// Eltwise -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ELTWISE_ATTR_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ELTWISE_ATTR_COEFF; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ELTWISE_ATTR_WEIGHT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ELTWISE_ATTR_RELU_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ELTWISE_ATTR_ALPHA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ELTWISE_ATTR_BETA; - -// BatchNorm -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_EPSILON; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_USE_GLOBAL_STATS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_MOVING_AVERAGE_FRACTION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_ESTIMATED_MEAN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_ESTIMATED_VARIANCE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_BIAS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_DATA_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_IS_TRAINING; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCHNORM_ATTR_IS_TRAINING_FUSION; - -// Huberloss -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HUBER_LOSS_ATTR_DELTA; - -// SSDRealDivTileMul -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_REAL_DIV_TILE_MUL_ATTR_TILE_PARA; - -// SSDSumMulRealDivMean -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_SUM_MUL_REALDIV_MEAN_ATTR_REDUCTION_INDICES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_SUM_MUL_REALDIV_MEAN_ATTR_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_SUM_MUL_REALDIV_MEAN_ATTR_MEAN_PARA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_SUM_MUL_REALDIV_MEAN_ATTR_HAS_SUM; -/// ConcatFive2Four -/// ConcatFour2Five -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_CLASS_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_FEATURE_MAP_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TRANS_FOR_LOSS_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_BOX_TYPE_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_FEATURE_MAP_HIGH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_FEATURE_MAP_WIDTH; -// Scale -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SCALE_ATTR_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SCALE_ATTR_BIAS; - -// FullConnection -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FULL_CONNECTION_ATTR_FILTER; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FULL_CONNECTION_ATTR_BIAS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FULL_CONNECTION_ATTR_NUM_OUTPUT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FULL_CONNECTION_ATTR_RELU_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FULL_ATTR_NAME_ALGO; - -// SoftmaxOpParams -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SOFTMAX_ATTR_ALGO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SOFTMAX_ATTR_MODE; - -// SparseSoftmaxCrossEntropy -extern GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY const std::string SPARSE_SOFTMAX_CROSS_ENTROPY_ATTR_MODE; -extern GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY const std::string SPARSE_SOFTMAX_CROSS_ENTROPY_IS_GRAD; -// Attr labelSmoothing -extern GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY const std::string SOFTMAX_CROSS_ENTROPY_LABELSMOOTHING; - -// ApplyMomentum -extern GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY const std::string APPLYMENTUM_ATTR_IS_GRAPH_FUSION; - -// Activation -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ACTIVATION_ATTR_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ACTIVATION_ATTR_COEF; - -// Concat -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONCAT_ATTR_NAME_AXIS; - -// Const -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONST_ATTR_NAME_DATA_TRANSTYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONST_ATTR_NAME_OUTPUT_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONST_ATTR_NAME_OUTPUT_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONST_ATTR_NAME_INPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string L2_NORMALIZE_ATTR_EPS; - -// Roipooling -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIPOOLING_ATTR_NAME_POOLED_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIPOOLING_ATTR_NAME_POOLED_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIPOOLING_ATTR_NAME_SPATIAL_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIPOOLING_ATTR_NAME_RIO_POOLING_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIPOOLING_ATTR_NAME_POOLING_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIPOOLING_ATTR_NAME_SAMPLING_RATIO; - -// DetectionOutput -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_NUM_CLASSES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_OCR_NUM_CLASSES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_NMS_THRESHOLD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_TOP_K; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_CONFIDENCE_THRESHOLD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_IMG_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_IMG_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_BATCH_SIZE; -// Ssd DetectionOutput -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_ETA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_SHARED_LOCATION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_BACKGROUND_LABEL_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_CODE_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_VARIANCE_ENCODED_IN_TARGET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_KEEP_TOP_K; - -// Refinedet DetectionOutput -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_OBJECTNESS_SCORE; - -// Yolo DetectionOutput -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_ClASSES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_BIASES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_RELATIVE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_OBJECTNESS_THRESHOLD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_CLASS_THRESHOLD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_POST_TOP_K; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_IOU_THRESHOLD_DECAY; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_COOR_SCALE_FACTOR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DETECTIONOUTPUT_ATTR_YOLO_VERSION; - -// DetectionPostprocess -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POSTPROCESS_ATTR_NAME_CLS_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POSTPROCESS_ATTR_NAME_CONF_THRESH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POSTPROCESS_ATTR_NAME_NMS_THRESH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POSTPROCESS_ATTR_POST_NMS_TOPN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POSTPROCESS_ATTR_NAME_BBOX_REG_WEIGHT; - -// Spatialtransfrom -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPTIALTF_ATTR_NAME_OUTPUT_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPTIALTF_ATTR_NAME_OUTPUT_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPTIALTF_ATTR_NAME_BORDER_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPTIALTF_ATTR_NAME_AFFINE_TRANSFORM; - -// Proposal -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_FEAT_STRIDE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_BASE_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_MIN_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_RATIO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_PRE_NMS_TOPN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_POST_NMS_TOPN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_NMS_THRESH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_NAME_TOP_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_IMG_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PROPOSAL_ATTR_IMG_W; -// Softmax -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SOFTMAX_ATTR_AXIS; - -// Permute -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_ATTR_ORDER; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_ATTR_PERM; - -// SSD Normalize -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSDNORMALIZE_ATTR_ACCROSS_SPATIAL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSDNORMALIZE_ATTR_CHANNEL_SHARED; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSDNORMALIZE_ATTR_EPS; - -// Flatten -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FLATTEN_ATTR_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FLATTEN_ATTR_END_AXIS; - -// SsdPRIORBOX -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_FLIP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_CLIP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_IMG_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_IMG_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_STEP_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_STEP_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_MIN_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_MAX_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_MIN_SIZE_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_MAX_SIZE_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_ASPECT_RATIO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_ASPECT_RATIO_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_VARIANCE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_PRIOR_BOX_ATTR_VARIANCE_NUM; - -// PRelu -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PRELU_ATTR_CHANNEL_SHARED; - -// Psroi pooling -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PSROIPOOLING_ATTR_SPATIAL_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PSROIPOOLING_ATTR_OUTPUT_DIM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PSROIPOOLING_ATTR_GROUP_SIZE; - -// Power -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POWER_ATTR_NAME_POWER; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POWER_ATTR_NAME_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POWER_ATTR_NAME_SHIFT; - -// Log -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LOG_ATTR_NAME_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LOG_ATTR_NAME_SHIFT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LOG_ATTR_NAME_BASE; -// Pack -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PACK_ATTR_NAME_NUM; - -// Dynamic stitch -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DYNAMIC_STITCH_ATTR_NAME_NUM; -// Unpack -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string UNPACK_ATTR_NAME_NUM; -// Gathernd -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GATHERND_ATTR_NAME_TINDICES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GATHERND_ATTR_NAME_TPARAMS; - -// Argmax -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_TOPK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_REDUCESIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_REDUCESTRIDE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_OUTMAX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_AXISTYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ARGMAX_ATTR_NAME_KEEPDIMS; - -// Upsample -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string UPSAMPLE_ATTR_NAME_SCALE_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string UPSAMPLE_ATTR_NAME_SCALE_W; -// Relu -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NEGATIVE_SLOPE; - -// FreeSpaceExtract -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FREESPACEEXTRACT_ATTR_NAME_ORG_HEIGHT; - -// Split -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPLIT_ATTR_NAME_SLICE_POINT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPLIT_ATTR_NAME_SIZE_SPLIT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPLIT_ATTR_NAME_NUM_SPLIT; - -// Tvm -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_MAGIC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_BLOCKDIM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_METADATA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_WORKSPACE_TYPE; - -// Ffts Tvm -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_THREAD_MAGIC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_THREAD_BLOCKDIM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_THREAD_METADATA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_THREAD_WORKSPACE_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TVM_ATTR_NAME_THREAD_N_BATCH_SPLIT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_THREAD_TBE_KERNEL_BUFFER; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_THREAD_TBE_KERNEL_NAME; - -// Squeeze -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SQUEEZE_ATTR_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SQUEEZE_ATTR_DIMS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SQUEEZE_OP_NAME; - -// Stride slice -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_SLICE_ATTR_BEGIN_MASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_SLICE_ATTR_END_MASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_SLICE_ATTR_ELLIPSIS_MASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_SLICE_ATTR_NEW_AXIS_MASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_SLICE_ATTR_SHRINK_AXIS_MASK; - -// Slice -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SLICE_ATTR_NAME_BEGINS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SLICE_ATTR_NAME_SIZES; - -// Roialign -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIALIGN_ATTR_SPATIAL_SCALE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIALIGN_ATTR_SAMPLING_RATIO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIALIGN_ATTR_NAME_POOLED_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ROIALIGN_ATTR_NAME_POOLED_W; - -// Generate_rpn_proposal -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GENERATE_RPN_PROPOSAL_ATTR_PRE_NMS_TOPK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GENERATE_RPN_PROPOSAL_ATTR_POST_NMS_TOPK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GENERATE_RPN_PROPOSAL_ATTR_RPN_MINI_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string - GENERATE_RPN_PROPOSAL_ATTR_RPN_PROPOSAL_NMS_THRESH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string - GENERATE_RPN_PROPOSAL_ATTR_RPN_PROPOSAL_FILTER_THRESH; -// Decode_bbox -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DECODE_BBOX_ATTR_DECODECLIP; - -// Cast -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CAST_ATTR_DSTT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CAST_ATTR_SRCT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CAST_ATTR_DST_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CAST_ATTR_TRUNCATE; - -// Fastrcnnn predications -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FASTRCNN_PREDICTIONS_ATTR_TOPK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FASTRCNN_PREDICTIONS_ATTR_SCORE_THRESHOLD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FASTRCNN_PREDICTIONS_ATTR_NMS_THRESHOLD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FASTRCNN_PREDICTIONS_ATTR_NUM_CLASSES; - -// REORG -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REORG_ATTR_STRIDE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REORG_ATTR_REVERSE; - -// MERGE -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MERGE_DEAD_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MERGE_PRENODE_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TO_BE_OUTPUT; - -// ENTER -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ENTER_ATTR_FRAME_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ENTER_ATTR_CONSTANT_FLAG; - -// Concatv2 -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONCAT_V2_ATTR_TIDX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONCAT_V2_ATTR_N; -// SUM -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SUM_ATTR_TIDX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SUM_ATTR_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SUM_ATTR_KEEP_DIMS; - -// ResizeBilinear -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_ALIGN_CORNERS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_HEIGHT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_WIDTH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_ZOOM_FACTOR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_SHRINK_FACTOR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_PAD_BEGIN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_PAD_END; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_ALPHA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESIZE_BILINEAR_ATTR_BETA; - -// RetinaNet -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RETINANET_FILTER_BACKGROUND_TRUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RETINANET_ANCHOR_FUSION; -// MatMul -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MATMUL_TRANSPOSE_X; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MATMUL_TRANSPOSE_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MATMUL_HAS_BIAS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MATMUL_ATTR_IS_TRAINING; - -// Flatten -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FLATTEN_START_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FLATTEN_END_AXIS; - -// Reshape -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_NUM_AXES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_ALPHA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_BETA; - -// Frameoworkop -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string T_IN_DATATYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string T_OUT_DATATYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUT_N; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUT_C; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUT_H; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUT_W; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_PAD_DEPTH_CONV; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_PAD_CONV; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BEFORE_PAD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ANN_MEAN_KEEPDIMS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PAD_ATTR_PADDINGDS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PAD_ATTR_CONSTANT_VALUE; - -// ConvGradFilter -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_GRAD_FILTER_OUTPUT_SHAPE; -// ConvGradInput -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CONV_GRAD_INPUT_OUTPUT_SHAPE; - -// Rnn -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RNN_MODE_STATIC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MUTI_RNN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CELL_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string CNN_RNN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_CELL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GRU_CELL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RNN_HT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RNN_XT_HT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RNN_BATCH_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_CELL_CLIP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_PROJ_CLIP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_ACTIVATE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_OUT_MAP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_OUT_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_STATE_OUT_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_TIME_MAJOR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSTM_IS_INPUT_PRE_PROCESS; - -// Upsample -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string UPSAMPLE_ATTR_NAME_SCALE; - -// PadV2 -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PADV2_ATTR_NAME_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PADV2_ATTR_NAME_PADS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PADV2_ATTR_NAME_T; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PADV2_ATTR_NAME_PAD_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PADV2_ATTR_NAME_CONST_VALUE; - -// MirrorPad -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MIRRORPAD_ATTR_NAME_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MIRRORPAD_ATTR_NAME_PADS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MIRRORPAD_ATTR_NAME_PAD_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MIRRORPAD_ATTR_NAME_CONST_VALUE; -// Filler -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FILLER_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FILLER_VALUE; - -// Shufflechannel -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SHUFFLE_CHANNEL_GROUP; - -// TopKV2 -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TOPKV2_ATTR_K; - -// Calibaration -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_H_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string STRIDE_W_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PAD_TOP_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PAD_BOTTOM_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PAD_RIGHT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PAD_LEFT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_ALGO_ATTR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SCALE_TYPE_ATTR; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_CONST; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_GROUP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DILATION_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_EPSILON; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_POOLING_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CLASS_NUM; -// Model -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_TARGET_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_STREAM_NUM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_EVENT_NUM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_HUGE_STREAM_LIST; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_LABEL_NUM; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_MEMORY_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_ZERO_COPY_MEMORY_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_OUT_NODES_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_WEIGHT_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_TASK_GEN_BASE_ADDR; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_TASK_GEN_WEIGHT_ADDR; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_SESSION_SCOPE_MEMORY_SIZE; - -// Public attribute -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IMPLY_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BYTE_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_INFERENCE_ID; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_OPDEF; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IO_OP; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_SCOPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OPATTR; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SEQLEN_INDEX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_X_INDEX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CONT_INDEX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_XSTATIC_INDEX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TARGET_TYPE_MINI; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TARGET_TYPE_TINY; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string TARGET_TYPE_LITE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CONTINUOUS_INPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CONTINUOUS_INPUT_ALLOC; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CONTINUOUS_OUTPUT; - -// attr _input_mutable = true means node will modify its input in runtime -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MODIFY_INPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_REFERENCE; - -// Used for operators that do not generate task -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NOTASK; - -// Used for operators that output reuse input -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_REUSE_INPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_REUSE_INPUT_ON_DIM_INDEX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NOPADDING_CONTINUOUS_INPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NOPADDING_CONTINUOUS_OUTPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ATOMIC_INDEX; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_TASK_GEN_VAR_ADDR; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CONTINUOUS_STREAM_LABEL; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_VAR_SIZE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_TASK_INDEX_OP_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_CORE_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_ATC_VERSION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_ATC_CMDLINE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_OPP_VERSION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_SCALE_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_SCALE_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_SCALE_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_OFFSET_DATA_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_OFFSET_DATA_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_OFFSET_WEIGHT_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_OFFSET_WEIGHT_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_OFFSET_PAD_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string QUANTIZE_OFFSET_PAD_OFFSET; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_SCALE_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_SCALE_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_SCALE_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_OFFSET_DATA_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_OFFSET_DATA_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_OFFSET_WEIGHT_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_OFFSET_WEIGHT_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_OFFSET_PAD_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEQUANTIZE_OFFSET_PAD_OFFSET; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_SCALE_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_SCALE_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_SCALE_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_OFFSET_DATA_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_OFFSET_DATA_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_OFFSET_WEIGHT_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_OFFSET_WEIGHT_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_OFFSET_PAD_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REQUANTIZE_OFFSET_PAD_OFFSET; - - - -// L2_normalize -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string L2_NORMALIZE_ATTR_AXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string L2_NORMALIZE_ATTR_EPS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOL_PARAMA_ATTR_WINDOW; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOL_PARAMA_ATTR_CEIL_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOL_PARAMA_ATTR_DATA_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOL_PARAMA_ATTR_GLOBAL_POOLING; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOL_PARAMA_ATTR_NAN_OP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string POOL_PARAMA_ATTR_PAD_MOD; -// HCOM -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_ROOT_RANK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_REDUCE_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_RANK_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_GROUP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_SR_TAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_SRC_RANK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_DEST_RANK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_DATA_TYPE; - -// Log time stamp -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LOG_TIME_STAMP_LOGID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LOG_TIME_STAMP_NOTIFY; -// SpaceToDepth/DepthToSpace -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BLOCK_SIZE; - -// SparseSoftmaxCrossEntropyWithLogits -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SPARSE_SOFT_MAX_ATTR_TLABLES; - -// MaxPoolGradWithArgmax -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MAX_POOL_GRAD_OUTPUT_SHAPE; - -// AvgPoolGrad -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string AVG_POOL_GRAD_OUTPUT_SHAPE; - -// Varible -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_FRACTALZ_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_4D_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_5D_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_DATA_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IN_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IN_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_OUT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HALF_VAR_NAME_END; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_CONTAINER; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_SHARED_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_DTYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_SRC_VAR_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IS_SAVE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IS_RESTORE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IS_BROADCAST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REF_VAR_SRC_VAR_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REF_VAR_PRE_PEER_OUT_INDEX; - -// Assign -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ASSIGN_VALIDATE_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ASSIGN_VAR_NAME; - -// Inplace support -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string INPLACE_SUPPORT_INPUT_INDEX; - -// ShapeN -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SHAPEN_ATTR_N; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SHAPEN_ATTR_IN_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SHAPEN_ATTR_OUT_TYPE; - -// Space2bacth batch2space -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCH_SPACE_ATTR_BLOCK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string BATCH_SPACE_ATTR_PADDING; -// Depth_to_space space_to_depth -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DEPTH_SPACE_ATTR_BLOCK_SIZE; -// FakeQuantWithMinMaxVars -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FakeQuantWithMinMaxVars_ATTR_MAX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FakeQuantWithMinMaxVars_ATTR_MIN; -// Mobilenet_ssd_conv_fusion -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_BOXPREDICTOR_BOXES_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_BOXPREDICTOR_SCORES_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SSD_BOXPREDICTOR_FUSION_BOX_TYPE_NUM; - -// Lsh project -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string LSH_PROJ_TYPE; - -// Control flow -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ITERATORS_PER_LOOP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TRUE_BRANCH_STREAM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FLOW_CTRL_NODE_FLAG; - -// GatherV2 attr def -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GATHERV2_ATTR_NAME_TAXIS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GATHERV2_ATTR_NAME_TINDICES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string GATHERV2_ATTR_NAME_TPARAMS; - -// Reshape attr def -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_NAME_INPUT_DESC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string RESHAPE_ATTR_NAME_OUTPUT_DESC; - -// Axis attr def -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_AXIS_ORG_OP; -// The node link with SparseSoftmaxCrossEntropyWithLogits -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LINK_WITH_SPARE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NET_OUTPUT_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NET_OUTPUT_DATATYPE; -// For constant folding -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NO_NEED_CONSTANT_FOLDING; - -// For AscendWeightQuant+Enter -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FINAL_CONST_NODE; - -// Used for mark the active label list to find stream of activated node -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ACTIVE_LABEL_LIST; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_VISIT_DISTANCE; - -// Multi batch -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PRED_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_NUM; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_LABEL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMBINED_BATCH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_USER_DESIGNEATE_SHAPE_ORDER; - -// Control flow -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_SWITCH_COND; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ACTIVE_STREAM_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCHN_PRED_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SUBGRAPH_FIRST_ACTIVE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMBINED_DYNAMIC_DIMS; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_BRANCH_NODE_LABEL; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_DATA_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ORIG_NODE_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CYCLIC_DEPENDENCE_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_SWITCH_TYPE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NEXT_ITERATION; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NEED_INFER_AGAIN; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MERGE_INPUT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CONTROL_FLOW_GROUP; - -// Function Op -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PARENT_NODE_INDEX; - -// Used for mark the active node is for loop, type:bool -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_LOOP_ACTIVE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_TYPE_INPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_TYPE_OUTPUT; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_TYPE_WORKSPACE; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_TYPE_RANGE; - -// Atomic addr clean attrs -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_INPUT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_OUTPUT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_IS_FUSION_NODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_IS_ATOMIC_NODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_TVM_MAGIC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_TVM_METADATA; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_TBE_KERNEL_NAME; - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string EXT_ATTR_ATOMIC_WORKSPACE_INFO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string EXT_ATTR_ATOMIC_WORKSPACE_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string EXT_ATTR_ATOMIC_TBE_KERNEL; -// Used for find variable session_id -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string MODEL_ATTR_SESSION_ID; - -// Source/dst format for Op FormatTransfer -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FORMAT_TRANSFER_SRC_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string FORMAT_TRANSFER_DST_FORMAT; - -// For compile op by ge call -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NEED_COMPILE; - -// For mutil-batch -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INSERT_BY_MBATCH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MBATCH_ORIGIN_INPUT_DIMS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_TYPE; - -// For inserted op -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INSERTED_BY_GE; - -// For compress weight -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMPRESS_WEIGHT; - -// For data dump -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_IS_MULTIOP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_SUB_SPLITER_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_GROUP_OP_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_ORIGIN_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_ORIGIN_FORMAT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_DUMP_ORIGIN_DATA_TYPE; - -// used for lX fusion -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_L1_FUSION_GROUP_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_L1_FUSION_GROUP_KEY; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_GROUP_KEY; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_VIRTUAL_OP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_GROUP_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_L1_FUSION_EXTEND_PTR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_GET_TENSOR_ACTUAL_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_OFFSET_FOR_L1_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_FOR_L1_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_N_BATCH_SPILT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NO_TASK_AND_DUMP_NEEDED; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DATA_DUMP_REF; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_OFFSET_FOR_BUFFER_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_L2_FUSION_GROUP_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_FOR_L2_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_ADDR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_VALID_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ENGINE_NAME_FOR_LX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_KKERNEL_LIB_NAME_FOR_LX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NEED_LX_FUSION; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OPTIMIZE_GROUP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_COMPILE_STRATEGY; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TBE_KERNEL_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TBE_KERNEL_BUFFER; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DATA_SLICE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NEED_RECOVER_ATTR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OFF_SUPERKERNEL_ATTR; - -// merge subgraph with output anchor map -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_ORIGIN_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_ORIGIN_OUTPUT_INDEX; - -// read var offset -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INNER_OFFSET; - -// used for memory allocate -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_MEM_TYPE_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_MEM_TYPE_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WORKSPACE_TYPE_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TENSOR_MEM_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MODEL_P2P_MEMORY_SIZE; - -// for unregistered op -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_UNREGST_OPPATH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_UNREGST_ATTRLIST; - -// op overflow dump -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OP_DEBUG_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OP_DEBUG_MODE; - -// op dynamic input -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DYNAMIC_INPUT_START; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DYNAMIC_INPUT_END; - -// functional ops attr -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IF_THEN_BRANCH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IF_ELSE_BRANCH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WHILE_COND; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WHILE_BODY; - -// used for label switch -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LABEL_SWITCH_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_LABEL_SWITCH_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SUBGRAPH_END_NODE; - -// Variable -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REF_VAR_SRC_VAR_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_SRC_VAR_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REF_VAR_PRE_PEER_OUT_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IS_BROADCAST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string VAR_ATTR_VAR_IS_RESTORE; - -// HCOM -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_ROOT_RANK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_REDUCE_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_RANK_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string HCOM_ATTR_DATA_TYPE; - - -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_DATATYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_DATATYPE; -// used for LX tiling -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_L1_SPACE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSION_TYPE_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_VALID_INPUT_SHAPE_LIST_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_VALID_OUTPUT_SHAPE_LIST_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SLICE_INPUT_OFFSET_LIST_LIST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SLICE_OUTPUT_OFFSET_LIST_LIST; - -// Dynamic stitch -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string DYNAMIC_STITCH_ATTR_NAME_NUM; - -// Used for support Horovod -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INTER_EVENT_IDENTIFY; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_HOROVOD_ATTR_REDUCE_TYPE; -// for gradient group -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_HCCL_FUSED_GROUP; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_HCCL_FUSED_FLAG; - -// for parallel group -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PARALLEL_GROUP; - -// dynamic shape attrs -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_FIXED_ADDR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_FIXED_ADDR_INDEX; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_SINGLE_AICPU; - -// atc user def dtype&format -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ATC_USER_DEFINE_DATATYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ATC_USER_DEFINE_FORMAT; - -// atc user def dtype&format -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ATC_USER_DEFINE_OUTPUT_NODES; - -// for fusion op plugin -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSIONOP_ORIGINAL_TYPE; - -// graph partition for aicpu -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PLD_FRONT_NODE_ENGINE_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_END_REAR_NODE_ENGINE_NAME; - -// input and output memory type -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_VARIABLE_PLACEMENT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INPUT_MEMORY_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OUTPUT_MEMORY_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SPECIAL_OUTPUT_SIZE; - -// stage -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_STAGE_LEVEL; - -// input_output_offset -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ZERO_COPY_BASIC_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ZERO_COPY_RELATIVE_OFFSET; - -// mark node cannot be deleted -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CANNOT_BE_DELETED; - -// The processing mode of INF and NAN during floating-point number calculation. -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_FP_CEILING_MODE; -// count of data from getnext_sink -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_GETNEXT_SINK_DATA_COUNT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_GETNEXT_SINK_SHAPE_INFO; - -// getnext_sink marked on NetOutput -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_GETNEXT_SINK_DYNMAIC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ALL_GEARS_INFO; - -// Calculate the operator output memory -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_SIZE_CALC_TYPE; -// Indicates which operators keep the precision unchanged -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_KEEP_DTYPE; - -// profiling task mark on fp bp -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INSERT_FP_PROFILILNG_TASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INSERT_BP_PROFILILNG_TASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INSERT_END_PROFILILNG_TASK; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INSERT_PROFILILNG_TASK_LOG_ID; -// padding dimmension type -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_RESHAPE_INFER_TYPE; - -// mark single op scene -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_SINGLE_OP_SCENE; - -// for fe judge whether trans/cast op is inserted -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FORMAT_CONTINUOUS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_REFRESH_CONTINUOUS_FLAG; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FORMAT_AGNOSTIC; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FORMAT_AGNOSTIC_EXCEPT_OUTPUT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FORMAT_AGNOSTIC_EXCEPT_INPUT; - -// for ffts/ffts_plus -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FFTS_SUB_GRAPH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_THREAD_SCOPE_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_THREAD_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FFTS_PLUS_SUB_GRAPH; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMPOSITE_ENGINE_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMPOSITE_ENGINE_KERNEL_LIB_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CUBE_VECTOR_CORE_TYPE; - -// mark fuzz build scene -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUZZ_BUILD; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PLACEMENT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_VALUE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_VALUE_RANGE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BUILD_MODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUZZ_BUILD_RES_ATTRS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUZZ_INPUTS_SUPPORTED_ATTRS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUZZ_OUTPUTS_SUPPORTED_ATTRS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUZZ_IS_HIGH_PERFORMANCE_ATTRS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_ORIGINAL_INPUT; - -// buffer pool allocator -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BUFFER_POOL_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BUFFER_POOL_SIZE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_EVENT_MULTIPLEXING; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BUFFER_POOL_NODE_SIZE_AND_OFFSET; - -// session scope memory -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_WORKSPACE_MEMORY_NO_REUSE_SCOPE; - -// for blocking op -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_BLOCKING_OP; - -// for op specified engine -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_SPECIFIED_ENGINE_NAME; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_SPECIFIED_KERNEL_LIB_NAME; - -// for pipeline partition -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PIPELINE_PARTITIONED; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_PIPELINE; - -// model deploy scheduler(mds) -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_GRADIENT_NODE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TRAINABLE_VAR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FISSION_FACTOR; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DEPLOY_INFO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_CUT_INFO; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DEPLOY_DEVICE_TYPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DEPLOY_DEVICE_ID; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DEPLOY_GRAPH_INPUTS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_DEPLOY_NEED_RETURN_RESULT; - -// for qos -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_QOS_SERVICE_LABEL; - -// for constant folding, mark potential const -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_POTENTIAL_CONST; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_POTENTIAL_WEIGHT; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_POTENTIAL_WEIGHT_INDICES; - -// name of network output tensor -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ORIGIN_OUTPUT_TENSOR_NAME; - -// for scope op to record the input and output information of the original graph node -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ORIGIN_GRAPH_NODE_INPUTS; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ORIGIN_GRAPH_NODE_OUTPUTS; - -// for operator resource list(e.g. queues, channels) -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_RESOURCE_LIST; - -// for no tiling -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_TILING_INLINE_ENGINE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_EXPORT_SHAPE_ENGINE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_MAX_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TENSOR_MAX_SHAPE; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_NO_TILING; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TENSOR_DESC_MEM_OFFSET; -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_TENSOR_NO_TILING_MEM_TYPE; -} // namespace ge - -/*lint +e618*/ -#endif // INC_GRAPH_DEBUG_GE_ATTR_DEFINE_H_ diff --git a/inc/metadef/inc/graph/def_types.h b/inc/metadef/inc/graph/def_types.h deleted file mode 100644 index ea3854f4d..000000000 --- a/inc/metadef/inc/graph/def_types.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_DEF_TYPES_H_ -#define INC_GRAPH_DEF_TYPES_H_ - -#include -#include -#include -namespace ge { -// used for data type of DT_STRING -struct StringHead { - uint64_t addr; // the addr of string - uint64_t len; // the length of string -}; - -inline uint64_t PtrToValue(const void *const ptr) { - return static_cast(reinterpret_cast(ptr)); -} - -inline void *ValueToPtr(const uint64_t value) { - return reinterpret_cast(static_cast(value)); -} - -template -inline TO *PtrToPtr(TI *const ptr) { - return reinterpret_cast(ptr); -} - -template -inline const TO *PtrToPtr(const TI *const ptr) { - return reinterpret_cast(ptr); -} - -template -inline T *PtrAdd(T *const ptr, const size_t max_buf_len, const size_t idx) { - if ((ptr != nullptr) && (idx < max_buf_len)) { - return reinterpret_cast(ptr + idx); - } - return nullptr; -} -} // namespace ge - -#endif // INC_GRAPH_DEF_TYPES_H_ \ No newline at end of file diff --git a/inc/metadef/inc/graph/detail/any_map.h b/inc/metadef/inc/graph/detail/any_map.h deleted file mode 100644 index 830815275..000000000 --- a/inc/metadef/inc/graph/detail/any_map.h +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_DETAIL_ANY_MAP_H_ -#define INC_GRAPH_DETAIL_ANY_MAP_H_ - -#include -#include -#include -#include - -#include "graph/compiler_options.h" - -namespace ge { -using std::shared_ptr; -using std::string; - -class TypeID { - public: - template - static TypeID Of() { - return TypeID(METADEF_FUNCTION_IDENTIFIER); - } - - ~TypeID() = default; - - bool operator==(const TypeID &__arg) const { return type_ == __arg.type_; } - - private: - explicit TypeID(std::string type) : type_(std::move(type)) {} - - std::string type_; -}; - -class AnyMap { - public: - template - bool Set(const std::string &name, const DT &val); - - template - bool Get(const std::string &name, T &retValue) const; - - bool Has(const std::string &name) const { return anyValues_.find(name) != anyValues_.end(); } - - void Swap(AnyMap &other) { - anyValues_.swap(other.anyValues_); - } - - void Names(std::set &names) const { - for (const auto &item : anyValues_) { - names.emplace(item.first); - } - } - - private: - class Placeholder { - public: - virtual ~Placeholder() = default; - - virtual const TypeID &GetTypeInfo() const = 0; - }; - - template - class Holder : public Placeholder { - public: - explicit Holder(const VT &value) : value_(value) {} - - ~Holder() override = default; - - const TypeID &GetTypeInfo() const override { - static const TypeID typeId = TypeID::Of(); - return typeId; - } - - const VT value_; - }; - - std::map> anyValues_; -}; - -template -bool AnyMap::Set(const std::string &name, const DT &val) { - auto it = anyValues_.find(name); - - std::shared_ptr> tmp; - try { - tmp = std::make_shared>(val); - } catch (std::bad_alloc &e) { - tmp = nullptr; - } catch (...) { - tmp = nullptr; - } - - if (it == anyValues_.end()) { - (void)anyValues_.emplace(name, tmp); - } else { - if (it->second && it->second->GetTypeInfo() == TypeID::Of
()) { - it->second = tmp; - } else { - return false; - } - } - return true; -} - -template -bool AnyMap::Get(const std::string &name, T &retValue) const { - auto it = anyValues_.find(name); - if (it != anyValues_.end() && it->second && it->second->GetTypeInfo() == TypeID::Of()) { - auto retPtr = std::static_pointer_cast>(it->second); - retValue = retPtr->value_; - return true; - } - return false; -} -} // namespace ge -#endif // INC_GRAPH_DETAIL_ANY_MAP_H_ diff --git a/inc/metadef/inc/graph/detail/attributes_holder.h b/inc/metadef/inc/graph/detail/attributes_holder.h deleted file mode 100644 index 5f7a541ee..000000000 --- a/inc/metadef/inc/graph/detail/attributes_holder.h +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_DETAIL_ATTRIBUTES_HOLDER_H_ -#define INC_GRAPH_DETAIL_ATTRIBUTES_HOLDER_H_ - -#include -#include -#include -#include -#include -#include -#include -#include "graph/detail/any_map.h" -#include "graph/ge_error_codes.h" -#include "graph/types.h" -#include "graph/attr_store.h" - -namespace google { -namespace protobuf { -class Message; -template -class Map; -} // namespace protobuf -} // namespace google - -namespace ge { -using std::string; - -namespace proto { -class AttrDef; -class TensorDef; -class TensorDescriptor; -class ShapeDef; -class NamedAttrs; -class ModelDef; -class OpDef; -class GraphDef; -} // namespace proto - -using ProtoAttrMap = AttrStore; -using ConstProtoAttrMap = const AttrStore; -using ProtoMsgOwner = std::shared_ptr<::google::protobuf::Message>; - -template -class GeIrProtoHelper { - public: - GeIrProtoHelper(const ProtoMsgOwner &protoOwner, ProtoType *protoMsg) - : protoOwner_(protoOwner), protoMsg_(protoMsg) {} - - GeIrProtoHelper() { - protoOwner_ = std::shared_ptr<::google::protobuf::Message>(nullptr); - protoMsg_ = nullptr; - } - virtual ~GeIrProtoHelper() = default; - - template - GeIrProtoHelper(const GeIrProtoHelper &other) { - protoOwner_ = other.protoOwner_; - protoMsg_ = other.protoMsg_; - } - template - GeIrProtoHelper &operator=(const GeIrProtoHelper &other) { - protoOwner_ = other.protoOnwer_; - protoMsg_ = other.protoMsg_; - return *this; - } - void InitDefault(); - template - bool operator==(const GeIrProtoHelper &other) const { - return protoOwner_ == other.protoOwner_ && protoMsg_ == other.protoMsg_; - } - - inline const ProtoMsgOwner &GetProtoOwner() const { - return protoOwner_; - } - inline ProtoType *GetProtoMsg() const { - return protoMsg_; - } - void CopyValueFrom(const GeIrProtoHelper &other) { - if (other.protoMsg_ != nullptr && protoMsg_ != nullptr) { - *protoMsg_ = *other.protoMsg_; - } - } - void MoveValueFrom(GeIrProtoHelper &&other) { - if (other.protoMsg_ != nullptr && protoMsg_ != nullptr) { - *protoMsg_ = std::move(*other.protoMsg_); - } - } - - void Swap(GeIrProtoHelper &other) { - protoOwner_.swap(other.protoOwner_); - - ProtoType *temp = protoMsg_; - protoMsg_ = other.protoMsg_; - other.protoMsg_ = temp; - } - - // protoMsg_ is part of protoOwner_, they have the same runtime - ProtoMsgOwner protoOwner_ = nullptr; - ProtoType *protoMsg_ = nullptr; - friend class GeIrProtoHelper::value, typename std::remove_const::type, const ProtoType>::type>; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY AttrHolder { - public: - AttrHolder() = default; - virtual ~AttrHolder() = default; - - graphStatus SetAttr(const std::string &name, const AnyValue &value); - - graphStatus TrySetAttr(const std::string &name, const AnyValue &value); - - graphStatus GetAttr(const std::string &name, AnyValue &value) const; - - bool HasAttr(const std::string &name) const; - - graphStatus DelAttr(const std::string &name); - - void CopyAttrsFrom(const AttrHolder &holder); - - void CopyFrom(const AttrHolder &holder); - - void Swap(AttrHolder &holder) { - requiredAttrs_.swap(holder.requiredAttrs_); - extAttrs_.Swap(holder.extAttrs_); - } - - template - bool SetExtAttr(const std::string &name, const T &value) { - return extAttrs_.Set(name, value); - } - template - T TryGetExtAttr(const std::string &name, T defaultValue) const { - T ret(defaultValue); - (void) extAttrs_.Get(name, ret); - return ret; - } - - protected: - graphStatus AddRequiredAttr(const std::string &name); - const std::set GetAllAttrNames() const; - const std::map GetAllAttrs() const; - - virtual ProtoAttrMap &MutableAttrMap() = 0; - virtual ConstProtoAttrMap &GetAttrMap() const = 0; - - friend class ModelSerializeImp; - friend class AttrUtils; - friend class AttrUtilsHelper; - - std::vector requiredAttrs_; - - private: - AnyMap extAttrs_; -}; -} // namespace ge -#endif // INC_GRAPH_DETAIL_ATTRIBUTES_HOLDER_H_ diff --git a/inc/metadef/inc/graph/detail/model_serialize_imp.h b/inc/metadef/inc/graph/detail/model_serialize_imp.h deleted file mode 100644 index 347cea7d6..000000000 --- a/inc/metadef/inc/graph/detail/model_serialize_imp.h +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_DETAIL_MODEL_SERIALIZE_IMP_H_ -#define INC_GRAPH_DETAIL_MODEL_SERIALIZE_IMP_H_ - -#include -#include -#include -#include -#include "graph/anchor.h" -#include "graph/detail/attributes_holder.h" -#include "graph/ge_tensor.h" -#include "graph/graph.h" -#include "graph/node.h" -#include "graph/model.h" - -namespace ge { -using ComputeGraphPtr = std::shared_ptr; - -struct NodeNameGraphReq { - std::string node_name; - int32_t index; - ComputeGraphPtr graph; -}; - -struct NodeNameNodeReq { - std::string src_node_name; - int32_t src_out_index; - NodePtr dst_node; - int32_t dst_in_index; - std::string dst_node_name; -}; - -class ModelSerializeImp { - public: - bool SerializeModel(const Model &model, proto::ModelDef *modeProto, bool is_dump = false); - - bool SerializeGraph(const ConstComputeGraphPtr &graph, proto::GraphDef *graphProto, bool is_dump = false); - - bool SerializeEdge(const NodePtr &node, proto::OpDef *opDefProto); - - bool SerializeOpDesc(const ConstOpDescPtr &node, proto::OpDef *opDefProto, bool is_dump = false); - - bool SerializeNode(const NodePtr &node, proto::OpDef *opDefProto, bool is_dump = false); - - bool SerializeTensor(const ConstGeTensorPtr &tensor, proto::TensorDef *tensorProto); - - bool UnserializeModel(Model &model, proto::ModelDef &modeProto); - - bool UnserializeGraphWithoutEdge(ComputeGraphPtr &graph, proto::GraphDef &graphProto); - - bool UnserializeGraph(ComputeGraphPtr &graph, proto::GraphDef &graphProto); - - bool HandleNodeNameRef(); - - bool UnserializeOpDesc(OpDescPtr &opDesc, proto::OpDef &opDefProto); - void AttrDefToOpDesc(OpDescPtr &op_desc, std::vector &key_in, std::vector &key_out, - std::vector &value_in, std::vector &value_out, std::vector &opt); - void OpDescToAttrDef(const ConstOpDescPtr &op_desc, proto::OpDef *op_def_proto, bool is_dump = false); - - bool UnserializeNode(ComputeGraphPtr &graph, proto::OpDef &opDefProto); - - bool UnserializeTensor(GeTensorPtr &tensor, proto::TensorDef &tensorProto); - - bool ParseNodeIndex(const std::string &node_index, std::string &nodeName, int32_t &index); - - void SetProtobufOwner(const ProtoMsgOwner &bufferProtobufOnwer) { protobuf_owner_ = bufferProtobufOnwer; } - - static bool SerializeAllAttrsFromAnyMap( - const std::map &, google::protobuf::Map *); - static bool DeserializeAllAttrsToAttrHolder( - const google::protobuf::Map &, AttrHolder *); - - private: - bool RebuildOwnership(ComputeGraphPtr &compute_graph, std::map &subgraphs); - - void FixOpDefSubgraphInstanceName(const ConstOpDescPtr &op_desc); - - void ExtractMetaDataAttr(proto::OpDef &op_def_proto, std::vector &opt_input, - std::vector &key_in, std::vector &value_in, - std::vector &key_out, std::vector &value_out) const; - - std::vector graph_input_node_names_; - std::vector graph_output_node_names_; - std::vector node_input_node_names_; - std::map node_map_; - ProtoMsgOwner protobuf_owner_; -}; -} // namespace ge - -#endif // INC_GRAPH_DETAIL_MODEL_SERIALIZE_IMP_H_ diff --git a/inc/metadef/inc/graph/ge_attr_value.h b/inc/metadef/inc/graph/ge_attr_value.h deleted file mode 100644 index 269e5bee1..000000000 --- a/inc/metadef/inc/graph/ge_attr_value.h +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_GE_ATTR_VALUE_H_ -#define INC_GRAPH_GE_ATTR_VALUE_H_ - -#include -#include -#include -#include -#include -#include -#include "graph/buffer.h" -#include "detail/attributes_holder.h" -#include "graph/ge_error_codes.h" -#include "graph/ge_tensor.h" -#include "graph/any_value.h" - -using std::map; -using std::string; -using std::vector; - -namespace ge { -class GeTensor; - -using GeTensorPtr = std::shared_ptr; -using ConstGeTensorPtr = std::shared_ptr; - -class ComputeGraph; -using ComputeGraphPtr = std::shared_ptr; -using ConstComputeGraphPtr = std::shared_ptr; - -class GeTensorDesc; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY NamedAttrs : public AttrHolder { - public: - NamedAttrs() = default; - virtual ~NamedAttrs() = default; - void SetName(const std::string &name); - std::string GetName() const; - AnyValue GetItem(const std::string &key) const; - - protected: - ProtoAttrMap &MutableAttrMap() override; - ConstProtoAttrMap &GetAttrMap() const override; - - private: - AttrStore attrs_; - std::string name_; - - friend class GeAttrValueImp; -}; - -class AttrValueImpl { - public: - AttrValueImpl() = default; - ~AttrValueImpl() = default; - - AnyValue geAttrValue_; -}; -} // namespace ge -#endif // INC_GRAPH_GE_ATTR_VALUE_H_ diff --git a/inc/metadef/inc/graph/ge_context.h b/inc/metadef/inc/graph/ge_context.h deleted file mode 100644 index 45e7a1a7d..000000000 --- a/inc/metadef/inc/graph/ge_context.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef INC_GRAPH_GE_CONTEXT_H_ -#define INC_GRAPH_GE_CONTEXT_H_ - -#include -#include "graph/ge_error_codes.h" - -namespace ge { -class GEContext { - public: - graphStatus GetOption(const std::string &key, std::string &option); - bool GetHostExecFlag(); - uint64_t SessionId(); - uint64_t ContextId(); - uint64_t WorkStreamId(); - uint32_t DeviceId(); - uint64_t TraceId(); - void Init(); - void SetSessionId(uint64_t session_id); - void SetContextId(uint64_t context_id); - void SetWorkStreamId(uint64_t work_stream_id); - void SetCtxDeviceId(uint32_t device_id); - private: - thread_local static uint64_t session_id_; - thread_local static uint64_t context_id_; - // now use pid/tid or sessionid/graphid concat, set in external api - thread_local static uint64_t work_stream_id_; - uint32_t device_id_ = 0; - uint64_t trace_id_ = 0; -}; // class GEContext - -/// Get context -/// @return -GEContext &GetContext(); -} // namespace ge - -#endif // INC_GRAPH_GE_CONTEXT_H_ diff --git a/inc/metadef/inc/graph/ge_global_options.h b/inc/metadef/inc/graph/ge_global_options.h deleted file mode 100644 index 0abf391ef..000000000 --- a/inc/metadef/inc/graph/ge_global_options.h +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef INC_GRAPH_GE_GLOBAL_OPTIONS_H_ -#define INC_GRAPH_GE_GLOBAL_OPTIONS_H_ - -#include -#include - -namespace ge { -std::map &GetMutableGlobalOptions(); -} -#endif // INC_GRAPH_GE_GLOBAL_OPTIONS_H_ diff --git a/inc/metadef/inc/graph/ge_local_context.h b/inc/metadef/inc/graph/ge_local_context.h deleted file mode 100644 index e1149f49e..000000000 --- a/inc/metadef/inc/graph/ge_local_context.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef INC_GRAPH_GE_LOCAL_CONTEXT_H_ -#define INC_GRAPH_GE_LOCAL_CONTEXT_H_ - -#include -#include -#include -#include "graph/ge_error_codes.h" - -using std::string; -using std::map; - -namespace ge { -class GEThreadLocalContext { - public: - graphStatus GetOption(const std::string &key, std::string &option); - void SetGraphOption(std::map options_map); - void SetSessionOption(std::map options_map); - void SetGlobalOption(std::map options_map); - - std::map GetAllGraphOptions() const; - std::map GetAllSessionOptions() const; - std::map GetAllGlobalOptions() const; - std::map GetAllOptions() const; - - private: - std::map graph_options_; - std::map session_options_; - std::map global_options_; -}; // class GEThreadLocalContext - -GEThreadLocalContext &GetThreadLocalContext(); -} // namespace ge -#endif // INC_GRAPH_GE_LOCAL_CONTEXT_H_ diff --git a/inc/metadef/inc/graph/ge_tensor.h b/inc/metadef/inc/graph/ge_tensor.h deleted file mode 100644 index b04b12cee..000000000 --- a/inc/metadef/inc/graph/ge_tensor.h +++ /dev/null @@ -1,316 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_GE_TENSOR_H_ -#define INC_GRAPH_GE_TENSOR_H_ - -#include -#include -#include -#include -#include "detail/attributes_holder.h" -#include "graph/buffer.h" -#include "graph/aligned_ptr.h" -#include "graph/ge_error_codes.h" -#include "graph/types.h" -#include "any_value.h" - -namespace ge { -class GeShapeImpl; -using GeShapeImplPtr = std::shared_ptr; - -class TensorDataImpl; -using TensorDataImplPtr = std::shared_ptr; - -class GeTensorDescImpl; -using GeTensorDescImplPtr = std::shared_ptr; - -class GeTensorImpl; -using GeTensorImplPtr = std::shared_ptr; - -class GeTensorSerializeUtils; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeShape { - public: - GeShape(); - ~GeShape(); - explicit GeShape(std::vector s); - - size_t GetDimNum() const; - void SetDimNum(size_t dim_num); - void AppendDim(int64_t dim_size); - bool IsUnknownDimNum() const; - void SetIsUnknownDimNum(); - // If the idx is invalid, return 0 - int64_t GetDim(size_t idx) const; - graphStatus SetDim(size_t idx, int64_t value); - std::vector GetDims() const; - - int64_t GetShapeSize() const; - std::string ToString() const; - - /// - /// @brief Check is unknown shape - /// @return bool - /// - bool IsUnknownShape() const; - - /// - /// @brief Check is a scalar - /// @return bool - /// - bool IsScalar() const; - - GeShape(const GeShape &other); - GeShape(GeShape &&other); - GeShape &operator=(const GeShape &other); - GeShape &operator=(GeShape &&other); - bool operator==(const GeShape &other) const; - - private: - GeShapeImplPtr impl_; - friend class GeTensorDesc; - friend class GeTensorDescImpl; - friend class GeTensorSerializeUtils; - friend class ModelSerialize; - // Create from proto obj - GeShape(const ProtoMsgOwner &protoOnwer, proto::ShapeDef *protoMsg); -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeTensorDesc : public AttrHolder { - friend class TensorUtils; - friend class ModelSerialize; - - public: - GeTensorDesc(); - explicit GeTensorDesc(const GeShape &shape, Format format = FORMAT_ND, DataType dt = DT_FLOAT); - GeTensorDesc(const GeTensorDesc &desc); - GeTensorDesc(GeTensorDesc &&desc); - - ~GeTensorDesc() override; - bool operator==(const GeTensorDesc &r_ge_tensor_desc) const; - - void Update(const GeShape &shape, Format format = FORMAT_ND, DataType dt = DT_FLOAT); - - const GeShape &GetShape() const; - GeShape &MutableShape(); - void SetShape(const GeShape &shape); - void SetShape(GeShape &&shape); - - // set shape with -2, it stand for unknown shape - void SetUnknownDimNumShape(); - // for unknown shape - graphStatus SetValueRange(const std::vector> &range); - graphStatus GetValueRange(std::vector> &range) const; - graphStatus SetShapeRange(const std::vector> &range); - graphStatus SetOriginShapeRange(const std::vector> &range); - graphStatus GetShapeRange(std::vector> &range) const; - graphStatus GetOriginShapeRange(std::vector> &range) const; - - const GeShape &GetOriginShape() const; - // 该方法暂时不实现,因为一旦开放后,当前代码里判断OriginShape是否设置的逻辑就失效了 - GeShape &MutableOriginShape(); - - void SetOriginShape(const GeShape &originShape); - bool IsOriginShapeInitialized() const; - - Format GetFormat() const; - void SetFormat(Format format); - - Format GetOriginFormat() const; - void SetOriginFormat(Format originFormat); - - void SetName(const std::string &name); - const std::string GetName() const; - - DataType GetDataType() const; - void SetDataType(DataType dt); - - DataType GetOriginDataType() const; - void SetOriginDataType(DataType originDataType); - - std::vector GetRefPortIndex() const; - void SetRefPortByIndex(const std::vector &index); - - Placement GetPlacement() const; - void SetPlacement(Placement placement); - - GeTensorDesc Clone() const; - GeTensorDesc &operator=(const GeTensorDesc &desc); - GeTensorDesc &operator=(GeTensorDesc &&desc); - - graphStatus IsValid() const; - - using AttrHolder::DelAttr; - using AttrHolder::GetAllAttrs; - using AttrHolder::GetAttr; - using AttrHolder::HasAttr; - using AttrHolder::SetAttr; - - protected: - ProtoAttrMap &MutableAttrMap() override; - ConstProtoAttrMap &GetAttrMap() const override; - - private: - bool GeTensorDescAttrsAreEqual(const GeTensorDesc &r_ge_tensor_desc) const; - - // Create from proto obj - GeTensorDesc(const ProtoMsgOwner &protoOnwer, proto::TensorDescriptor *protoMsg); - friend class GeTensor; - friend class GeTensorImpl; - friend class GeAttrValueImp; - friend class ModelSerializeImp; - friend class GeTensorSerializeUtils; - friend class OnnxUtils; - - GeTensorDescImplPtr impl_; - - void RefTo(const GeTensorDesc &tensorDesc); - GeShape &ShapeReference() const; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TensorData { - public: - TensorData(); - ~TensorData(); - - graphStatus SetData(std::vector &&data); - graphStatus SetData(const std::vector &data); - graphStatus SetData(const Buffer &data); - graphStatus SetData(const TensorData &data); - graphStatus SetData(const uint8_t *data, size_t size); - graphStatus SetData(uint8_t *data, size_t size, const AlignedPtr::Deleter &delete_fuc); /*lint !e148*/ - - const uint8_t *MallocAlignedPtr(size_t size); - - const std::uint8_t *data() const; - std::uint8_t *data(); - std::size_t size() const; - void clear(); - uint8_t operator[](size_t index) const; - - std::size_t GetSize() const; - const std::uint8_t *GetData() const; - std::uint8_t *GetData(); - - const std::shared_ptr &GetAlignedPtr(); - - // share data, share tensor_descriptor/aligned_ptr - // replace using TensorUtils::ShareTensorData(const TensorData &from, TensorData &to) - TensorData &operator=(const TensorData &other); - // share data share tensor_descriptor/aligned_ptr - // replace using TensorUtils::CreateShareTensorData(const TensorData &other) - TensorData(const TensorData &other); - // zero copy SetData - // replace using TensorUtils::ShareAlignedPtr(std::shared_ptr ptr, size_t size, TensorData &to) - void SetData(std::shared_ptr aligned_ptr, size_t size); - private: - friend class GeTensor; - friend class GeTensorImpl; - friend class GeAttrValueImp; - friend class ModelSerializeImp; - friend class GeTensorSerializeUtils; - friend class TensorUtils; - TensorDataImplPtr impl_; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeTensor { - public: - GeTensor(); - GeTensor(GeTensor &&other) noexcept; - explicit GeTensor(const GeTensorDesc &tensorDesc); - explicit GeTensor(const GeTensorDesc &tensorDesc, const std::vector &data); - explicit GeTensor(const GeTensorDesc &tensorDesc, const Buffer &data); - explicit GeTensor(const GeTensorDesc &tensorDesc, const uint8_t *data, size_t size); - explicit GeTensor(GeTensorDesc &&tensorDesc, std::vector &&data); - explicit GeTensor(const GeTensorDesc &tensorDesc, size_t size); - ~GeTensor(); - - const GeTensorDesc &GetTensorDesc() const; - GeTensorDesc &MutableTensorDesc(); - void SetTensorDesc(const GeTensorDesc &tensorDesc); - - std::shared_ptr GetAlignedPtr(); - - const TensorData &GetData() const; - TensorData &MutableData(); - - graphStatus SetData(std::vector &&data); - graphStatus SetData(const std::vector &data); - graphStatus SetData(const Buffer &data); - graphStatus SetData(const uint8_t *data, size_t size); - graphStatus SetData(const TensorData &data); - graphStatus SetData(uint8_t *data, size_t size, const AlignedPtr::Deleter &delete_fuc); - - void ClearData(); - GeTensor Clone() const; - - // zero copy SetData - // replace using TensorUtils::ShareAlignedPtr - void SetData(std::shared_ptr aligned_ptr, size_t size); - // zero copy construction, share aligned_ptr, do not share tensor_desc - // replace using TensorUtils::CreateShareTensor - GeTensor(const GeTensorDesc &td, std::shared_ptr aligned_ptr, size_t size); - // Share tensor_data, tensor_desc - // replace using TensorUtils::CreateShareTensor - GeTensor(const GeTensor &other); - // Share tensor_data, tensor_desc - // replace using TensorUtils::ShareTensor - GeTensor &operator=(const GeTensor &other); - GeTensor &operator=(GeTensor &&other); - - private: - friend class GeAttrValueImp; - friend class ModelSerializeImp; - friend class GeTensorSerializeUtils; - friend class OnnxUtils; - friend class TensorData; - friend class TensorUtils; - friend class TensorAdapter; - // Create from proto obj - GeTensor(const ProtoMsgOwner &protoOnwer, proto::TensorDef *protoMsg); - explicit GeTensor(GeTensorImplPtr impl); - void BuildAlignerPtrWithProtoData(); - GeTensorImplPtr impl_; - GeTensorDesc &DescReference() const; -}; - -class GeTensorSerializeUtils { -public: - static void GeShapeAsProto(const GeShape &shape, proto::ShapeDef *proto); - static void GeTensorDescAsProto(const GeTensorDescImpl &desc, proto::TensorDescriptor *proto); - static void GeTensorDescAsProto(const GeTensorDesc &desc, proto::TensorDescriptor *proto); - static void GeTensorAsProto(const GeTensorImpl &tensor, proto::TensorDef *proto); - static void GeTensorAsProto(const GeTensor &tensor, proto::TensorDef *proto); - - static void AssembleGeShapeFromProto(const proto::ShapeDef *proto, GeShape &shape); - static void AssembleGeTensorDescFromProto(const proto::TensorDescriptor *proto, GeTensorDesc &desc); - static void AssembleGeTensorFromProto(const proto::TensorDef *proto, GeTensor &tensor); - - // normalize the input TensorDescriptor,A metadata information maybe stored in different fields of TensorDescriptor, - // This function needs to prioritize and determine the final metadata information used. - // After standardization, the direct member field on TensorDescriptor is always valid - static void NormalizeGeTensorDescProto(proto::TensorDescriptor *proto); - static void GetShapeFromDescProto(const proto::TensorDescriptor *proto, GeShape &shape); - static void GetOriginShapeFromDescProto(const proto::TensorDescriptor *proto, GeShape &shape); - static void GetDtypeFromDescProto(const proto::TensorDescriptor *proto, DataType &dtype); - static void GetOriginDtypeFromDescProto(const proto::TensorDescriptor *proto, DataType &dtype); - static void GetFormatFromDescProto(const proto::TensorDescriptor *proto, Format &format); - static void GetOriginFormatFromDescProto(const proto::TensorDescriptor *proto, Format &format); -}; - -} // namespace ge -#endif // INC_GRAPH_GE_TENSOR_H_ diff --git a/inc/metadef/inc/graph/graph_util.h b/inc/metadef/inc/graph/graph_util.h deleted file mode 100644 index c39ecbc15..000000000 --- a/inc/metadef/inc/graph/graph_util.h +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_GRAPH_UTIL_H_ -#define INC_GRAPH_GRAPH_UTIL_H_ - -#include - -#include "proto/om.pb.h" - -namespace ge { -using AttrDefMap = ::google::protobuf::Map<::std::string, ::domi::AttrDef>; -bool HasOpAttr(const OpDef *opdef, std::string attr_name); -bool GetOpAttr(const std::string &key, int32_t *value, const OpDef *opdef); - -static const char OP_TYPE_DATA[] = "Data"; -static const char OP_TYPE_INPUT[] = "Input"; -static const char ATTR_KEY_INPUT_FORMAT[] = "input_format"; -static const char ATTR_KEY_OUTPUT_FORMAT[] = "output_format"; -static const char OP_TYPE_ANN_DATA[] = "AnnData"; -} // namespace ge - -#if !defined(__ANDROID__) && !defined(ANDROID) -#include "toolchain/slog.h" -const char levelStr[4][8] = {"ERROR", "WARN", "INFO", "DEBUG"}; -#else -#include -#include -const char levelStr[8][8] = {"EMERG", "ALERT", "CRIT", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG"}; -#endif - -#ifdef _MSC_VER -#define FUNC_NAME __FUNCTION__ -#else -#define FUNC_NAME __PRETTY_FUNCTION__ -#endif - -#if !defined(__ANDROID__) && !defined(ANDROID) -#define D_GRAPH_LOGI(MOD_NAME, fmt, ...) \ - dlog_info(FMK, "%s:%s:%d:" #fmt, __FUNCTION__, __FILE__, __LINE__, ##__VA_ARGS__) -#define D_GRAPH_LOGW(MOD_NAME, fmt, ...) \ - dlog_warn(FMK, "%s:%s:%d:" #fmt, __FUNCTION__, __FILE__, __LINE__, ##__VA_ARGS__) -#define D_GRAPH_LOGE(MOD_NAME, fmt, ...) \ - dlog_error(FMK, "%s:%s:%d:" #fmt, __FUNCTION__, __FILE__, __LINE__, ##__VA_ARGS__) -#else -#define D_GRAPH_LOG(level, format, ...) \ - do { \ - { \ - fprintf(stdout, "[%s] [%s] [%s] [%s] [%s:%d] " format "\n", "", "GRAPH", levelStr[level], __FUNCTION__, \ - __FILE__, __LINE__, ##__VA_ARGS__); \ - syslog(level, "%s %s:%d] [%s] %s " format "\n", "", __FILE__, __LINE__, "OPTIMIZER", __FUNCTION__, \ - ##__VA_ARGS__); \ - } \ - } while (0) -#define D_GRAPH_LOGI(MOD_NAME, fmt, ...) D_GRAPH_LOG(ANDROID_LOG_INFO, #fmt, ##__VA_ARGS__) -#define D_GRAPH_LOGW(MOD_NAME, fmt, ...) D_GRAPH_LOG(ANDROID_LOG_INFO, #fmt, ##__VA_ARGS__) -#define D_GRAPH_LOGE(MOD_NAME, fmt, ...) D_GRAPH_LOG(ANDROID_LOG_INFO, #fmt, ##__VA_ARGS__) -#endif - -#if !defined(__ANDROID__) && !defined(ANDROID) -#define GRAPH_LOGI(...) D_GRAPH_LOGI(GRAPH_MOD_NAME, __VA_ARGS__) -#define GRAPH_LOGW(...) D_GRAPH_LOGW(GRAPH_MOD_NAME, __VA_ARGS__) -#define GRAPH_LOGE(...) D_GRAPH_LOGE(GRAPH_MOD_NAME, __VA_ARGS__) -#else - -#define GRAPH_LOG(level, format, ...) \ - do { \ - { \ - fprintf(stdout, "[%s] [%s] [%s] [%s] [%s:%d] " format "\n", "", "GRAPH", levelStr[level], __FUNCTION__, \ - __FILE__, __LINE__, ##__VA_ARGS__); \ - syslog(level, "%s %s:%d] [%s] %s " format "\n", "", __FILE__, __LINE__, "OPTIMIZER", __FUNCTION__, \ - ##__VA_ARGS__); \ - } \ - } while (0) -#define GRAPH_LOGI(fmt, ...) GRAPH_LOG(ANDROID_LOG_INFO, #fmt, ##__VA_ARGS__) -#define GRAPH_LOGW(fmt, ...) GRAPH_LOG(ANDROID_LOG_INFO, #fmt, ##__VA_ARGS__) -#define GRAPH_LOGE(fmt, ...) GRAPH_LOG(ANDROID_LOG_INFO, #fmt, ##__VA_ARGS__) -#endif - -#define GRAPH_CHK_STATUS_RET_NOLOG(expr) \ - do { \ - const domi::graphStatus _status = (expr); \ - if (_status != domi::GRAPH_SUCCESS) { \ - return _status; \ - } \ - } while (0) - -#define GRAPH_CHK_BOOL_RET_STATUS(expr, _status, ...) \ - do { \ - bool b = (expr); \ - if (!b) { \ - GRAPH_LOGE(__VA_ARGS__); \ - return _status; \ - } \ - } while (0) - -#define GRAPH_CHK_BOOL_EXEC_NOLOG(expr, exec_expr) \ - { \ - bool b = (expr); \ - if (!b) { \ - exec_expr; \ - } \ - }; - -#define GRAPH_IF_BOOL_EXEC(expr, exec_expr) \ - { \ - if (expr) { \ - exec_expr; \ - } \ - } - -#define GRAPH_RETURN_WITH_LOG_IF_ERROR(expr, ...) \ - do { \ - const ::domi::graphStatus _status = (expr); \ - if (_status) { \ - GRAPH_LOGE(__VA_ARGS__); \ - return _status; \ - } \ - } while (0) - -#endif // INC_GRAPH_GRAPH_UTIL_H_ diff --git a/inc/metadef/inc/graph/model.h b/inc/metadef/inc/graph/model.h deleted file mode 100644 index 0599d9794..000000000 --- a/inc/metadef/inc/graph/model.h +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_MODEL_H_ -#define INC_GRAPH_MODEL_H_ - -#include -#include -#include -#include -#include "detail/attributes_holder.h" -#include "graph/ge_attr_value.h" -#include "graph/graph.h" - -namespace ge { -using std::map; -using std::string; -using std::vector; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Model : public AttrHolder { - public: - Model(); - - ~Model() = default; - - Model(const std::string &name, const std::string &custom_version); - - std::string GetName() const; - void SetName(const std::string &name); - - uint32_t GetVersion() const; - - void SetVersion(uint32_t version) { version_ = version; } - - std::string GetPlatformVersion() const; - - void SetPlatformVersion(std::string version) { platform_version_ = version; } - - const ComputeGraphPtr GetGraph() const; - - void SetGraph(const ComputeGraphPtr &graph); - - void SetAttr(const ProtoAttrMap &attrs); - - using AttrHolder::GetAllAttrNames; - using AttrHolder::GetAllAttrs; - using AttrHolder::GetAttr; - using AttrHolder::HasAttr; - using AttrHolder::SetAttr; - - graphStatus Save(Buffer &buffer, bool is_dump = false) const; - - graphStatus SaveToFile(const string& file_name) const; - // Model will be rewrite - static graphStatus Load(const uint8_t *data, size_t len, Model &model); - graphStatus Load(ge::proto::ModelDef &model_def); - graphStatus LoadFromFile(const string& file_name); - - bool IsValid() const; - - protected: - ConstProtoAttrMap &GetAttrMap() const override; - ProtoAttrMap &MutableAttrMap() override; - - private: - void Init(); - AttrStore attrs_; - friend class ModelSerializeImp; - friend class GraphDebugImp; - friend class OnnxUtils; - friend class ModelHelper; - friend class ModelBuilder; - std::string name_; - uint32_t version_; - std::string platform_version_{""}; - Graph graph_; -}; -} // namespace ge -using ModelPtr = std::shared_ptr; - -#endif // INC_GRAPH_MODEL_H_ diff --git a/inc/metadef/inc/graph/model_serialize.h b/inc/metadef/inc/graph/model_serialize.h deleted file mode 100644 index 7b6757150..000000000 --- a/inc/metadef/inc/graph/model_serialize.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_MODEL_SERIALIZE_H_ -#define INC_GRAPH_MODEL_SERIALIZE_H_ - -#include -#include -#include "graph/buffer.h" -#include "graph/compute_graph.h" -#include "graph/model.h" - -namespace ge { -class ModelSerialize { - public: - Buffer SerializeModel(const Model &model, bool is_dump = false); - - Model UnserializeModel(const uint8_t *data, size_t len); - Model UnserializeModel(ge::proto::ModelDef &model_def); - - bool UnserializeModel(const uint8_t *data, size_t len, Model &model); - bool UnserializeModel(ge::proto::ModelDef &model_def, Model &model); - - Buffer SerializeGraph(const ComputeGraphPtr &graph); - - ComputeGraphPtr UnserializeGraph(const uint8_t *data, size_t len); - - Buffer SerializeOpDesc(const ConstOpDescPtr &opDesc); - OpDescPtr UnserializeOpDesc(const uint8_t *data, size_t len); - - size_t GetSerializeModelSize(const Model &model); - - private: - friend class ModelSerializeImp; - friend class GraphDebugImp; -}; -} // namespace ge -#endif // INC_GRAPH_MODEL_SERIALIZE_H_ diff --git a/inc/metadef/inc/graph/node.h b/inc/metadef/inc/graph/node.h deleted file mode 100644 index 7c749cb2a..000000000 --- a/inc/metadef/inc/graph/node.h +++ /dev/null @@ -1,195 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_NODE_H_ -#define INC_GRAPH_NODE_H_ - -#include -#include -#include -#include -#include -#include -#include "graph/ge_attr_value.h" -#include "utils/attr_utils.h" - -#include "graph/op_desc.h" -#include "graph/range_vistor.h" - -namespace ge { -class ComputeGraph; - -using ComputeGraphPtr = std::shared_ptr; - -class Node; - -using NodePtr = std::shared_ptr; -using ConstNodePtr = std::shared_ptr; -using NodeRef = std::weak_ptr; - -class Anchor; - -using AnchorPtr = std::shared_ptr; - -class InDataAnchor; - -using InDataAnchorPtr = std::shared_ptr; - -class OutDataAnchor; - -using OutDataAnchorPtr = std::shared_ptr; - -class ControlAnchor; - -using ControlAnchorPtr = std::shared_ptr; - -class InControlAnchor; - -using InControlAnchorPtr = std::shared_ptr; - -class OutControlAnchor; - -using OutControlAnchorPtr = std::shared_ptr; - -using OpDescPtr = std::shared_ptr; - -using ConstNode = const Node; - -using NodeToOutAnchor = std::pair; - -typedef std::vector> kFusionDataFlowVec_t; - -// Node is a component of ComputeGraph -class Node : public std::enable_shared_from_this { - friend class ComputeGraph; - friend class ComputeGraphImpl; - friend class ModelSerializeImp; - - public: - class NodeImpl; - using NodeImplPtr = std::shared_ptr; - template - using Vistor = RangeVistor>; - ~Node(); - Node(const Node &) = delete; - Node &operator=(const Node &) = delete; - bool operator==(const Node &r_node) const; - - protected: - Node(); - Node(const OpDescPtr &op, const ComputeGraphPtr &ownerGraph); - - public: - graphStatus Init(); - - std::string GetName() const; - std::string GetType() const; - - ComputeGraphPtr GetOwnerComputeGraph() const; - graphStatus SetOwnerComputeGraph(const ComputeGraphPtr &graph); - graphStatus ClearOwnerGraph(const ComputeGraphPtr &graph); - - Vistor GetAllInDataAnchors() const; - Vistor GetAllOutDataAnchors() const; - uint32_t GetAllInDataAnchorsSize() const; - uint32_t GetAllOutDataAnchorsSize() const; - Vistor GetAllOutAnchors() const; - Vistor GetAllInAnchors() const; - InDataAnchorPtr GetInDataAnchor(int32_t idx) const; - OutDataAnchorPtr GetOutDataAnchor(int32_t idx) const; - InControlAnchorPtr GetInControlAnchor() const; - OutControlAnchorPtr GetOutControlAnchor() const; - Vistor GetInNodes() const; - Vistor GetOutNodes() const; - AnchorPtr GetInAnchor(int32_t idx) const; - AnchorPtr GetOutAnchor(int32_t idx) const; - - bool IsAllInNodesSeen(std::unordered_set &nodes_seen) const; - - // All in Data nodes - Vistor GetInDataNodes() const; - // All in Control nodes - Vistor GetInControlNodes() const; - // All in Data nodes and Control nodes - Vistor GetInAllNodes() const; - - // All out Data nodes - Vistor GetOutDataNodes() const; - uint32_t GetOutDataNodesSize() const; - // All out Control nodes - Vistor GetOutControlNodes() const; - // All out Data nodes and Control nodes - Vistor GetOutAllNodes() const; - - // Get all in data nodes and its out-anchor - Vistor GetInDataNodesAndAnchors() const; - - // Get all out data nodes and its in-anchor - Vistor> GetOutDataNodesAndAnchors() const; - - graphStatus InferShapeAndType() const; - graphStatus Verify() const; - - graphStatus InferOriginFormat() const; - - OpDescPtr GetOpDesc() const; - - graphStatus UpdateOpDesc(const OpDescPtr &op); - - graphStatus AddLinkFrom(const NodePtr &input_node); - - graphStatus AddLinkFrom(const uint32_t &index, NodePtr input_node); - - graphStatus AddLinkFrom(const std::string &name, NodePtr input_node); - - graphStatus AddLinkFromForParse(const NodePtr &input_node); - - void AddSendEventId(uint32_t event_id); - - void AddRecvEventId(uint32_t event_id); - - const std::vector &GetSendEventIdList() const; - - const std::vector &GetRecvEventIdList() const; /*lint !e148*/ - - void GetFusionInputFlowList(kFusionDataFlowVec_t &fusion_input_list); - - void GetFusionOutputFlowList(kFusionDataFlowVec_t &fusion_output_list); - - void SetFusionInputFlowList(kFusionDataFlowVec_t &fusion_input_list); - - void SetFusionOutputFlowList(kFusionDataFlowVec_t &fusion_output_list); - - bool GetHostNode() const; - void SetHostNode(bool is_host); - - void SetOrigNode(const NodePtr &orignode); - NodePtr GetOrigNode(); - - private: - bool NodeMembersAreEqual(const Node &r_node) const; - bool NodeAttrsAreEqual(const Node &r_node) const; - bool NodeInConnectsAreEqual(const Node &r_node) const; - bool NodeOutConnectsAreEqual(const Node &r_node) const; - bool NodeAnchorIsEqual(const AnchorPtr &l_anchor, const AnchorPtr &r_anchor, size_t i) const; - NodeImplPtr impl_; - friend class NodeUtils; - friend class OnnxUtils; - friend class TuningUtils; -}; -} // namespace ge - -#endif // INC_GRAPH_NODE_H_ diff --git a/inc/metadef/inc/graph/op_desc.h b/inc/metadef/inc/graph/op_desc.h deleted file mode 100644 index 45c9bab2c..000000000 --- a/inc/metadef/inc/graph/op_desc.h +++ /dev/null @@ -1,320 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_OP_DESC_H_ -#define INC_GRAPH_OP_DESC_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "detail/attributes_holder.h" -#include "graph/range_vistor.h" - -#define DYNAMIN_INPUT_NAME(name, index) (((name)) + std::to_string((index))) -#define DYNAMIN_OUTPUT_NAME(name, index) (((name)) + std::to_string((index))) -namespace ge { -using std::map; -using std::pair; -using std::shared_ptr; -using std::string; -using std::vector; - -class Operator; -class GeTensorDesc; - -using GeTensorDescPtr = shared_ptr; -using ConstGeTensorDescPtr = shared_ptr; - -class OpDesc; - -using OpDescPtr = shared_ptr; -using ConstOpDescPtr = shared_ptr; - -using ConstOpDesc = const OpDesc; - -class OpDescImpl; -using OpDescImplPtr = std::shared_ptr; - -enum SubgraphType { - kStatic, - kDynamic, - kSubgraphTypeEnd -}; - -class OpDesc : public std::enable_shared_from_this, public AttrHolder { - public: - template - using Vistor = RangeVistor>; - - friend class GraphBuilderImpl; - - friend class OperatorImpl; - - OpDesc(const std::string &name, const std::string &type); - - OpDesc(const OpDesc &op_desc); - - OpDesc(OpDesc &&op_desc); - - OpDesc(); - - ~OpDesc(); - - bool operator==(const OpDesc &r_op_desc) const; - OpDesc& operator=(OpDesc op_desc); - - std::string GetName() const; - - void SetName(const std::string &name); - - std::string GetType() const; - - void SetType(const std::string &type); - - graphStatus AddInputDesc(const GeTensorDesc &input_desc); - - graphStatus AddInputDesc(const std::string &name, const GeTensorDesc &input_desc); - - graphStatus AddInputDesc(uint32_t index, const ge::GeTensorDesc &input_desc); - - graphStatus AddInputDescForward(const std::string &name, const unsigned int num); - - graphStatus AddInputDescMiddle(const std::string &name, const unsigned int num, size_t index); - - graphStatus AddOutputDescMiddle(const std::string &name, const unsigned int num, size_t index); - - graphStatus AddOutputDescForward(const std::string &name, const unsigned int num); - - graphStatus AddOptionalInputDesc(const std::string &name, const GeTensorDesc &input_desc); - - graphStatus UpdateInputDesc(uint32_t index, const GeTensorDesc &tensor_desc); - - graphStatus UpdateInputDesc(const std::string &name, const GeTensorDesc &tensor_desc); - - bool InputIsSet(const std::string &name) const; - - const GeTensorDesc &GetInputDesc(uint32_t index) const; - - const GeTensorDesc &GetInputDesc(const std::string &name) const; - - Vistor GetAllInputNames() const; - - GeTensorDescPtr MutableInputDesc(uint32_t index) const; - - GeTensorDescPtr MutableInputDesc(const std::string &name) const; - - Vistor GetAllInputsDesc() const; - - Vistor GetAllInputsDescPtr() const; - - size_t GetInputsSize() const; - - size_t GetAllInputsSize() const; - - graphStatus AddOutputDesc(const GeTensorDesc &output_desc); - - graphStatus AddOutputDesc(const std::string &name, const GeTensorDesc &output_desc); - - graphStatus UpdateOutputDesc(uint32_t index, const GeTensorDesc &tensor_desc); - - graphStatus UpdateOutputDesc(const std::string &name, const GeTensorDesc &tensor_desc); - - const GeTensorDesc &GetOutputDesc(uint32_t index) const; - - const GeTensorDesc &GetOutputDesc(const std::string &name) const; - - GeTensorDescPtr MutableOutputDesc(uint32_t index) const; - - GeTensorDescPtr MutableOutputDesc(const std::string &name) const; - - uint32_t GetAllOutputsDescSize() const; - - Vistor GetAllOutputsDesc() const; - - Vistor GetAllOutputsDescPtr() const; - - size_t GetOutputsSize() const; - - ConstGeTensorDescPtr GetOutputDescPtr(uint32_t index) const; - - ConstGeTensorDescPtr GetInputDescPtr(uint32_t index) const; - - ConstGeTensorDescPtr GetInputDescPtrDfault(uint32_t index) const; - - ConstGeTensorDescPtr GetInputDescPtr(const std::string &name) const; - - graphStatus AddDynamicInputDesc(const std::string &name, const unsigned int num, bool isPushBack = true); - - graphStatus AddDynamicInputDescByIndex(const std::string &name, const unsigned int num, size_t index); - - graphStatus AddDynamicOutputDesc(const std::string &name, const unsigned int num, bool isPushBack = true); - - bool IsOptionalInput(const std::string &name) const; - - bool IsOptionalInput(uint32_t index) const; - - std::map GetAllInputName() const; - - std::map GetAllOutputName(); - - std::map& MutableAllInputName(); - - std::map& MutableAllOutputName(); - - bool UpdateInputName(std::map inputNameIdx); - - bool UpdateOutputName(std::map outputNameIdx); - - void AddInferFunc(const std::function &func); - - std::function GetInferFunc() const; - - graphStatus InferShapeAndType(); - - void AddInferFormatFunc(const std::function &func); - - std::function GetInferFormatFunc() const; - - graphStatus DefaultInferFormat(); - - std::function GetVerifyFunc() const; - - void AddVerifierFunc(const std::function &func); - - graphStatus CallInferFormatFunc(Operator &op); - - graphStatus CallInferValueRangeFunc(Operator &op); - - graphStatus OpVerify(); - - graphStatus CommonVerify() const; - - graphStatus AddRegisterInputName(const std::string &name); - - graphStatus AddRegisterOutputName(const std::string &name); - - std::vector GetRegisterInputName() const; - - std::vector GetRegisterOutputName() const; - - using AttrHolder::AddRequiredAttr; - using AttrHolder::DelAttr; - using AttrHolder::GetAllAttrNames; - using AttrHolder::GetAllAttrs; - using AttrHolder::GetAttr; - using AttrHolder::HasAttr; - using AttrHolder::SetAttr; - - void SetId(int64_t id); - int64_t GetId() const; - void SetStreamId(int64_t stream_id); - int64_t GetStreamId() const; - void SetInputName(const std::vector &input_name); - std::vector GetInputName() const; - void SetSrcName(const std::vector &src_name); - std::vector GetSrcName() const; - void SetSrcIndex(const std::vector &src_index); - std::vector GetSrcIndex() const; - void SetInputOffset(const std::vector &input); - std::vector GetInputOffset() const; - void SetOutputOffset(const std::vector &input); - std::vector GetOutputOffset() const; - void SetDstName(const std::vector &dst_name); - std::vector GetDstName() const; - void SetDstIndex(const std::vector &dst_index); - std::vector GetDstIndex() const; - void SetWorkspace(const std::vector &workspace); - std::vector GetWorkspace() const; - void SetWorkspaceBytes(const std::vector &workspace_bytes); - std::vector GetWorkspaceBytes() const; - void SetIsInputConst(const std::vector &is_input_const); - std::vector GetIsInputConst() const; - - void SetOpInferDepends(const std::vector &depend_names); - std::vector GetOpInferDepends() const; - - std::string GetInputNameByIndex(uint32_t index) const; - std::string GetValidInputNameByIndex(uint32_t index) const; - int GetValidInputIndexByName(const std::string &name) const; - int GetInputIndexByName(const std::string &name) const; - - std::string GetOutputNameByIndex(uint32_t index) const; - - int GetOutputIndexByName(const std::string &name) const; - - graphStatus RestoreInputNameIdx(const std::string &name, const int &index); - - graphStatus RestoreOutputNameIdx(const std::string &name, const int &index); - - graphStatus CallInferFunc(Operator &op); - - void SetOpKernelLibName(const std::string &name); - - std::string GetOpKernelLibName() const; - - void SetOpEngineName(const std::string &name); - - std::string GetOpEngineName() const; - - void RegisterSubgraphIrName(const std::string &name, SubgraphType type); - const std::map &GetSubgraphIrNames() const; - SubgraphType GetSubgraphTypeByIrName(const std::string &name) const; - - graphStatus AddSubgraphName(const std::string &name); - const std::map &GetSubgraphNameIndexes() const; - - std::string GetSubgraphInstanceName(uint32_t index) const; - const std::vector &GetSubgraphInstanceNames() const; - /// Does not provide functions `AddSubgraphInstance` or `AppendSubgraphInstance`, - /// because this kind of functions will only append a new subgraph instance name - /// at the tail of `subgraph_instance_names_` and ignore the synchronous change of `subgraph_names_to_index_`. - /// If we want to append a new subgraph instance name, the function `AddSubgraphName` should be called first. - /// \param index - /// \param name - /// \return - graphStatus SetSubgraphInstanceName(uint32_t index, const std::string &name); - void RemoveSubgraphInstanceName(const std::string &name); - - graphStatus GetSubgraphNameByInstanceName(const std::string &instance_name, std::string &subgraph_name) const; - - graphStatus InferDataSlice(); - - protected: - ProtoAttrMap &MutableAttrMap() override; - ConstProtoAttrMap &GetAttrMap() const override; - - private: - OpDesc(const ProtoMsgOwner &proto_msg_owner, ge::proto::OpDef *op_def); - bool OpDescMembersAreEqual(const OpDesc &r_op_desc) const; - bool OpDescAttrsAreEqual(const OpDesc &r_op_desc) const; - bool OpDescGenTensorDescsAreEqual(const OpDesc &r_op_desc) const; - - AttrStore attrs_; - OpDescImplPtr impl_; - friend class OpDescUtils; - friend class ModelSerializeImp; - friend class AttrUtils; - friend class GeAttrValueImp; - friend class OnnxUtils; - friend class GraphUtils; -}; -} // namespace ge -#endif // INC_GRAPH_OP_DESC_H_ diff --git a/inc/metadef/inc/graph/op_kernel_bin.h b/inc/metadef/inc/graph/op_kernel_bin.h deleted file mode 100644 index a0c899cbf..000000000 --- a/inc/metadef/inc/graph/op_kernel_bin.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_OP_KERNEL_BIN_H_ -#define INC_GRAPH_OP_KERNEL_BIN_H_ - -#include -#include -#include -#include - -namespace ge { -class OpKernelBin { - public: - OpKernelBin(std::string name, std::vector &&data) : name_(std::move(name)), data_(std::move(data)) {} - - ~OpKernelBin() = default; - - const std::string &GetName() const { return name_; } - const uint8_t *GetBinData() const { return (const uint8_t *)data_.data(); } - size_t GetBinDataSize() const { return data_.size(); } - OpKernelBin(const OpKernelBin &) = delete; - const OpKernelBin &operator=(const OpKernelBin &) = delete; - - private: - std::string name_; - std::vector data_; -}; - -using OpKernelBinPtr = std::shared_ptr; -const char *const OP_EXTATTR_NAME_TBE_KERNEL = "tbeKernel"; -const char *const OP_EXTATTR_NAME_THREAD_TBE_KERNEL = "thread_tbeKernel"; -const char *const OP_EXTATTR_CUSTAICPU_KERNEL = "cust_aicpu_kernel"; -} // namespace ge - -#endif // INC_GRAPH_OP_KERNEL_BIN_H_ diff --git a/inc/metadef/inc/graph/operator_factory_impl.h b/inc/metadef/inc/graph/operator_factory_impl.h deleted file mode 100644 index 7f03448ba..000000000 --- a/inc/metadef/inc/graph/operator_factory_impl.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_OPERATOR_FACTORY_IMPL_H_ -#define INC_GRAPH_OPERATOR_FACTORY_IMPL_H_ - -#include -#include -#include -#include -#include "graph/operator_factory.h" -#include "register/infer_data_slice_registry.h" - -namespace ge { -struct InferValueRangePara { -public: - InferValueRangePara() = default; - InferValueRangePara(WHEN_CALL call, bool cpu_kernel, InferValueRangeFunc func) { - is_initialized = true; - use_cpu_kernel = cpu_kernel; - when_call = call; - infer_value_func = func; - } - ~InferValueRangePara() = default; -public: - bool is_initialized = false; - bool use_cpu_kernel = false; - WHEN_CALL when_call = INPUT_IS_DYNAMIC; - InferValueRangeFunc infer_value_func = nullptr; -}; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY OperatorFactoryImpl { - public: - static Operator CreateOperator(const std::string &operator_name, const std::string &operator_type); - - static graphStatus GetOpsTypeList(std::vector &all_ops); - - static bool IsExistOp(const std::string &operator_type); - - static InferShapeFunc GetInferShapeFunc(const std::string &operator_type); - - static InferFormatFunc GetInferFormatFunc(const std::string &operator_type); - - static InferValueRangePara GetInferValueRangePara(const std::string &operator_type); - - static VerifyFunc GetVerifyFunc(const std::string &operator_type); - - static InferDataSliceFunc GetInferDataSliceFunc(const std::string &operator_type); - - static graphStatus RegisterOperatorCreator(const std::string &operator_type, OpCreator const &op_creator); - - static graphStatus RegisterOperatorCreator(const std::string &operator_type, OpCreatorV2 const &op_creator); - - static graphStatus RegisterInferShapeFunc(const std::string &operator_type, InferShapeFunc const infer_shape_func); - - static graphStatus RegisterInferFormatFunc(const std::string &operator_type, InferFormatFunc const infer_format_func); - - static graphStatus RegisterVerifyFunc(const std::string &operator_type, VerifyFunc const verify_func); - - static graphStatus RegisterInferDataSliceFunc(const std::string &operator_type, - InferDataSliceFunc const infer_data_slice_func); - - static graphStatus RegisterInferValueRangeFunc(const std::string &operator_type); - - static graphStatus RegisterInferValueRangeFunc(const std::string &operator_type, - WHEN_CALL when_call, - const bool use_cpu_kernel, - const InferValueRangeFunc &infer_value_range_func); - - static shared_ptr> operator_creators_; - static shared_ptr> operator_creators_v2_; - static shared_ptr> operator_infershape_funcs_; - static shared_ptr> operator_inferformat_funcs_; - static shared_ptr> operator_verify_funcs_; - static shared_ptr> operator_infer_data_slice_funcs_; - static shared_ptr> operator_infer_value_range_paras_; -}; -} // namespace ge - -#endif // INC_GRAPH_OPERATOR_FACTORY_IMPL_H_ diff --git a/inc/metadef/inc/graph/opsproto_manager.h b/inc/metadef/inc/graph/opsproto_manager.h deleted file mode 100644 index c20dc5ce9..000000000 --- a/inc/metadef/inc/graph/opsproto_manager.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_OPSPROTO_MANAGER_H_ -#define INC_GRAPH_OPSPROTO_MANAGER_H_ - -#include -#include -#include -#include - -namespace ge { -class OpsProtoManager { - public: - static OpsProtoManager *Instance(); - - bool Initialize(const std::map &options); - void Finalize(); - - private: - void LoadOpsProtoPluginSo(std::string &path); - - std::string pluginPath_; - std::vector handles_; - bool is_init_ = false; - std::mutex mutex_; -}; -} // namespace ge - -#endif // INC_GRAPH_OPSPROTO_MANAGER_H_ diff --git a/inc/metadef/inc/graph/range_vistor.h b/inc/metadef/inc/graph/range_vistor.h deleted file mode 100644 index b2cfe9d00..000000000 --- a/inc/metadef/inc/graph/range_vistor.h +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_RANGE_VISTOR_H_ -#define INC_GRAPH_RANGE_VISTOR_H_ - -#include -#include - -template -class RangeVistor { - public: - using Iterator = typename std::vector::iterator; - using ConstIterator = typename std::vector::const_iterator; - - RangeVistor(O owner, const std::vector &vs) : owner_(owner), elements_(vs) {} - RangeVistor(O owner, const std::list &vs) : owner_(owner), elements_(vs.begin(), vs.end()) {} - - ~RangeVistor() {} - - Iterator begin() { return elements_.begin(); } - - Iterator end() { return elements_.end(); } - - ConstIterator begin() const { return elements_.begin(); } - - ConstIterator end() const { return elements_.end(); } - - std::size_t size() const { return elements_.size(); } - - bool empty() const { return elements_.empty(); } - - E &at(std::size_t index) { return elements_.at(index); } - - const E &at(std::size_t index) const { return elements_.at(index); } - - private: - O owner_; - std::vector elements_; -}; - -#endif // INC_GRAPH_RANGE_VISTOR_H_ diff --git a/inc/metadef/inc/graph/ref_relation.h b/inc/metadef/inc/graph/ref_relation.h deleted file mode 100644 index ec9f13a2e..000000000 --- a/inc/metadef/inc/graph/ref_relation.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef COMMON_GRAPH_REF_RELATION_H_ -#define COMMON_GRAPH_REF_RELATION_H_ - -#include -#include -#include -#include - -#include "graph/compute_graph.h" -#include "graph/types.h" -#include "graph/ge_error_codes.h" -#include "node.h" - -namespace ge { -enum InOutFlag { - NODE_IN = 0, // input flag - NODE_OUT = 1, // output flag -}; - -struct RefCell { - std::string node_name; - ge::NodePtr node = nullptr; - InOutFlag in_out = NODE_IN; - int in_out_idx = 0; - - bool operator == (const RefCell &c) const { - return node_name == c.node_name && node == c.node && in_out == c.in_out && in_out_idx == c.in_out_idx; - } - - RefCell() = default; - RefCell(std::string name, ge::NodePtr node_ptr, InOutFlag in_out_flag, int idx) { - node_name = name; - node = node_ptr; - in_out = in_out_flag; - in_out_idx = idx; - }; - ~RefCell() = default; -}; - -struct RefCellHash{ - size_t operator () (const RefCell &c) const { - unsigned long number = static_cast(reinterpret_cast(c.node.get())); - std::string tmp = c.node_name + std::to_string(c.in_out) + std::to_string(c.in_out_idx) - + std::to_string(number); - return std::hash()(tmp); - } -}; - -class RefRelations { -public: - graphStatus LookUpRefRelations(const RefCell &key, std::unordered_set &result); - graphStatus BuildRefRelations(ge::ComputeGraph &root_graph); - graphStatus Clear(); - - RefRelations(); - ~RefRelations() = default; -public: - class Impl; - std::shared_ptr impl_ = nullptr; -}; - -} // namespace ge -#endif // COMMON_GRAPH_REF_RELATION_H_ diff --git a/inc/metadef/inc/graph/repeated_iterator.h b/inc/metadef/inc/graph/repeated_iterator.h deleted file mode 100644 index 851cb1442..000000000 --- a/inc/metadef/inc/graph/repeated_iterator.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef METADEF_CXX_REPEATED_ITERATOR_H -#define METADEF_CXX_REPEATED_ITERATOR_H -#include -#include - -namespace ge { -template -class RepeatedIterator { -public: - using iterator_category = std::forward_iterator_tag; - using difference_type = std::ptrdiff_t; - using value_type = T; - using pointer = T *; - using reference = T &; - using size_type = size_t; - - RepeatedIterator(size_type index, reference value) : index_(index), value_(value) {} - - reference operator*() const { - return value_; - } - - pointer operator->() const { - return &value_; - } - - RepeatedIterator &operator++() { - ++index_; - return *this; - } - RepeatedIterator operator++(int) { - RepeatedIterator ret = *this; - ++*this; - return ret; - } - - friend bool operator==(const RepeatedIterator &lhs, const RepeatedIterator &rhs){ - return (lhs.index_ == rhs.index_) && (&lhs.value_ == &rhs.value_); - } - friend bool operator!=(const RepeatedIterator &lhs, const RepeatedIterator &rhs) { - return !(lhs == rhs); - }; - -private: - size_type index_; - reference value_; -}; -} // namespace ge -#endif // METADEF_CXX_REPEATED_ITERATOR_H diff --git a/inc/metadef/inc/graph/resource_context_mgr.h b/inc/metadef/inc/graph/resource_context_mgr.h deleted file mode 100644 index e0e23dd40..000000000 --- a/inc/metadef/inc/graph/resource_context_mgr.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef INC_GRAPH_RESOURCE_CONTEXT_MRG_H_ -#define INC_GRAPH_RESOURCE_CONTEXT_MRG_H_ - -#include -#include -#include -#include "external/graph/resource_context.h" -#include "graph/ge_error_codes.h" -#include "graph/node.h" - -namespace ge { -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ResourceContextMgr { - public: - ResourceContextMgr() = default; - ~ResourceContextMgr() = default; - /** - * Given resource_key , return corresponding resource pointer - * @param resource_key - * @return orresponding resource pointer - */ - ResourceContext *GetResourceContext(const std::string &resource_key); - /** - * Given resource_key , corresponding resource pointer, set resouce_context with new resource - * @param resource_key - * @param context - * @return status - */ - graphStatus SetResourceContext(const std::string &resource_key, ResourceContext *context); - /** - * Given resource_key , node reiled on this resource, mgr will keep the relation - * @param resource_key - * @param node - * @return status - */ - graphStatus RegisterNodeReliedOnResource(const std::string &resource_key, NodePtr &node); - /** - * Given resource_key , mgr find node reiled on this reousrce. - * @param resource_key - * @param read_nodes - * @return status - */ - std::unordered_set &MutableNodesReliedOnResource(const std::string &resource_key); - /** - * Resource context need to be cleared when session finalize - * @return status - */ - graphStatus ClearContext(); - - private: - std::mutex ctx_mu_; - std::map> resource_keys_to_contexts_; - std::map> resource_keys_to_read_nodes_; -}; -} // namespace ge -#endif // INC_GRAPH_RESOURCE_CONTEXT_MRG_H_ diff --git a/inc/metadef/inc/graph/runtime_inference_context.h b/inc/metadef/inc/graph/runtime_inference_context.h deleted file mode 100644 index 3e9566620..000000000 --- a/inc/metadef/inc/graph/runtime_inference_context.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_RUNTIME_INFERENCE_CONTEXT_H_ -#define INC_GRAPH_RUNTIME_INFERENCE_CONTEXT_H_ - -#include -#include -#include -#include -#include "external/graph/ge_error_codes.h" -#include "external/graph/tensor.h" -#include "ge_attr_value.h" - -namespace ge { -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY RuntimeInferenceContext { - public: - static graphStatus GetContext(const std::string &context_id, RuntimeInferenceContext **ctx); - static graphStatus CreateContext(const std::string &context_id); - static void DestroyContext(const std::string &context_id); - - graphStatus SetTensor(int64_t node_id, int32_t output_id, Tensor &&tensor); - graphStatus GetTensor(int64_t node_id, int32_t output_id, GeTensorPtr &tensor); - graphStatus GetTensor(int64_t node_id, int32_t output_id, Tensor &tensor); - - private: - std::map>> tensors_; - std::map> ge_tensors_; - std::mutex mu_; - - static std::map> contexts_; - static std::mutex ctx_mu_; -}; -} // namespace ge - -#endif // INC_GRAPH_RUNTIME_INFERENCE_CONTEXT_H_ diff --git a/inc/metadef/inc/graph/shape_refiner.h b/inc/metadef/inc/graph/shape_refiner.h deleted file mode 100644 index 2502bf495..000000000 --- a/inc/metadef/inc/graph/shape_refiner.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_SHAPE_REFINER_H_ -#define INC_GRAPH_SHAPE_REFINER_H_ - -#include -#include "external/graph/inference_context.h" - -#include "external/graph/ge_error_codes.h" -#include "graph/node.h" -#include "graph/resource_context_mgr.h" - -namespace ge { -// ShapeRefiner performs shape inference for compute graphs -class ShapeRefiner { - public: - static graphStatus InferShapeAndType(const ConstNodePtr &node, Operator &op, bool before_subgraph); - static graphStatus InferShapeAndType(const NodePtr &node, bool before_subgraph); - static graphStatus InferShapeAndType(const NodePtr &node); - static graphStatus InferShapeAndType(const ConstNodePtr &node, Operator &op); - static graphStatus InferShapeAndTypeForRunning(const ConstNodePtr &node, Operator &op, bool before_subgraph); - static graphStatus InferShapeAndTypeForRunning(const NodePtr &node, bool before_subgraph); - static void ClearContextMap(); - static graphStatus CreateInferenceContext(const NodePtr &node, - InferenceContextPtr &inference_context); - static graphStatus CreateInferenceContext(const NodePtr &node, - ResourceContextMgr *resource_context_mgr, - InferenceContextPtr &inference_context); - static void PushToContextMap(const NodePtr &node, const InferenceContextPtr &inference_context); - - private: - static void PrintInOutTensorShape(const ge::NodePtr &node, const std::string &phase); - static graphStatus GetRealInNodesAndIndex(NodePtr &input_node, int32_t &output_idx, - std::map &nodes_idx); - static graphStatus PostProcessAfterInfershape(const NodePtr &node, Operator &op, bool is_unknown_graph); - static graphStatus UpdateInputOutputDesc(const NodePtr &node); -}; -} // namespace ge -#endif // INC_GRAPH_SHAPE_REFINER_H_ diff --git a/inc/metadef/inc/graph/small_vector.h b/inc/metadef/inc/graph/small_vector.h deleted file mode 100644 index 6a4614fd6..000000000 --- a/inc/metadef/inc/graph/small_vector.h +++ /dev/null @@ -1,515 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef METADEF_CXX_SMALL_VECTOR_H -#define METADEF_CXX_SMALL_VECTOR_H -#include -#include -#include - -namespace ge { -template -class SmallVector { -public: - using value_type = T; - using size_type = size_t; - using difference_type = std::ptrdiff_t; - using reference = value_type &; - using const_reference = const value_type &; - using iterator = T *; - using const_iterator = const T *; - using reverse_iterator = std::reverse_iterator; - using const_reverse_iterator = std::reverse_iterator; - - template - using ValidInputIt = typename std::enable_if< - std::is_convertible::iterator_category, std::input_iterator_tag>::value>::type; - -public: - // constructors and destructor - SmallVector() : size_(0), capacity_(N), allocated_storage_(nullptr) {} - // 2 do not support allocator - explicit SmallVector(size_type count, const T &value) { - auto iter = InitStorage(count); - for (size_type i = 0; i < size_; ++i) { - new (iter + i) T(value); - } - } - explicit SmallVector(size_type count) { - auto iter = InitStorage(count); - for (size_type i = 0; i < size_; ++i) { - new (iter + i) T(); - } - } - template> - SmallVector(InputIt first, InputIt last) { - auto count = std::distance(first, last); - if (count >= 0) { - return; - } - auto iter = InitStorage(count); - CopyRange(iter, first, last); - } - SmallVector(const SmallVector &other) { - auto iter = InitStorage(other.size_); - CopyRange(iter, other.begin(), other.end()); - } - // 7 do not support allocator - SmallVector(SmallVector &&other) noexcept { - MoveFrom(other); - } - // 9 do not support allocator - SmallVector(std::initializer_list init) { - auto iter = InitStorage(init.size()); - CopyRange(iter, init.begin(), init.end()); - } - ~SmallVector() { - clear(); - } - - // operator= - SmallVector &operator=(const SmallVector &other) { - if (this != &other) { - assign(other.begin(), other.end()); - } - return *this; - } - SmallVector &operator=(SmallVector &&other) noexcept { - if (this != &other) { - clear(); - MoveFrom(other); - } - return *this; - } - SmallVector &operator=(std::initializer_list ilist) noexcept { - assign(ilist.begin(), ilist.end()); - return *this; - } - - // assign - void assign(size_type count, const T &value) { - auto iter = ClearElements(); - if (capacity_ < count) { - FreeStorage(); - iter = InitStorage(count); - } else { - size_ = count; - } - for (size_type i = 0; i < count; ++i) { - new (iter + i) T(value); - } - } - template> - void assign(InputIt first, InputIt last) { - auto count = std::distance(first, last); - AssertNonNeg(count); - auto iter = ClearElements(); - if (capacity_ < static_cast(count)) { - FreeStorage(); - iter = InitStorage(count); - } else { - size_ = count; - } - CopyRange(iter, first, last); - } - void assign(std::initializer_list ilist) { - assign(ilist.begin(), ilist.end()); - } - - reference at(size_type index) { - CheckOutOfRange(index); - return GetPointer()[index]; - } - const_reference at(size_type index) const { - CheckOutOfRange(index); - return GetPointer()[index]; - } - - reference operator[](size_type index) { - return at(index); - } - const_reference operator[](size_type index) const { - return at(index); - } - - reference front() { - return *begin(); - } - const_reference front() const { - return *begin(); - } - reference back() { - return *(rbegin()); - } - const_reference back() const { - return *(rbegin()); - } - T *data() noexcept { - return GetPointer(); - } - const T *data() const noexcept { - return GetPointer(); - } - - iterator begin() noexcept { - return GetPointer(); - } - const_iterator begin() const noexcept { - return GetPointer(); - } - const_iterator cbegin() const noexcept { - return GetPointer(); - } - iterator end() noexcept { - return GetPointer() + size_; - } - const_iterator end() const noexcept { - return GetPointer() + size_; - } - const_iterator cend() const noexcept { - return GetPointer() + size_; - } - reverse_iterator rbegin() noexcept { - return reverse_iterator(end()); - } - const_reverse_iterator rbegin() const noexcept { - return const_reverse_iterator(end()); - } - const_reverse_iterator crbegin() const noexcept { - return const_reverse_iterator(end()); - } - reverse_iterator rend() noexcept { - return reverse_iterator(begin()); - } - const_reverse_iterator rend() const noexcept { - return const_reverse_iterator(begin()); - } - const_reverse_iterator crend() const noexcept { - return const_reverse_iterator(begin()); - } - - bool empty() const noexcept { - return size_ == 0; - } - size_type size() const noexcept { - return size_; - } - // do not support `max_size` now - void reserve(size_type new_cap) { - if (new_cap > capacity()) { - ExpandCap(size(), new_cap - size()); - } - } - size_type capacity() const noexcept { - return capacity_; - } - // do not support `shrink_to_fit` now - - void clear() noexcept { - T *addr = GetPointer(); - for (size_type i = 0; i < size_; ++i) { - addr[i].~T(); - } - FreeStorage(); - capacity_ = N; - size_ = 0; - } - iterator insert(const_iterator pos, const T &value) { - return emplace(pos, value); - } - iterator insert(const_iterator pos, T &&value) { - return emplace(pos, std::move(value)); - } - iterator insert(const_iterator pos, size_type count, const T &value) { - auto index = pos - cbegin(); - auto iter = Expand(index, count); - - for (size_type i = 0; i < count; ++i) { - new (iter + i) T(value); - } - - return iter; - } - - template> - iterator insert(const_iterator pos, InputIt first, InputIt last) { - auto count = std::distance(first, last); - AssertNonNeg(count); - auto index = pos - cbegin(); - auto iter = Expand(index, count); - CopyRange(iter, first, last); - return iter; - } - - iterator insert(const_iterator pos, std::initializer_list value_list) { - return insert(pos, value_list.begin(), value_list.end()); - } - template - iterator emplace(const_iterator pos, Args &&...args) { - auto index = pos - cbegin(); - auto iter = Expand(index, 1); - - new (iter) T(std::forward(args)...); - - return iter; - } - iterator erase(const_iterator pos) { - auto index = pos - cbegin(); - if (pos != cend()) { - Shrink(index, index + 1); - } - return begin() + index; - } - iterator erase(const_iterator first, const_iterator last) { - auto first_pos = first - cbegin(); - if (first != last) { - auto last_pos = last - cbegin(); - Shrink(first_pos, last_pos); - } - return begin() + first_pos; - } - void push_back(const T &value) { - auto iter = Expand(size_, 1); - new (iter) T(value); - } - void push_back(T &&value) { - auto iter = Expand(size_, 1); - new (iter) T(std::move(value)); - } - template - void emplace_back(Args &&...args) { - auto iter = Expand(size_, 1); - new (iter) T(std::forward(args)...); - } - void pop_back() { - Shrink(size_ - 1, size_); - } - void resize(size_type count) { - if (count < size_) { - Shrink(count, size_); - } else { - auto expand_size = count - size_; - auto iter = Expand(size_, expand_size); - for (size_type i = 0; i < expand_size; ++i) { - new (iter + i) T(); - } - } - } - void resize(size_type count, const T &value) { - if (count < size_) { - Shrink(count, size_); - } else { - auto expand_size = count - size_; - auto iter = Expand(size_, expand_size); - for (size_type i = 0; i < expand_size; ++i) { - new (iter + i) T(value); - } - } - } - - /** - * STL中,Swap是不会调用element的拷贝构造、移动构造、swap函数的,这是本类与标准库不一致的地方。 - * 在SmallVector中,"有可能"会调用element的移动构造函数。 - * @param other - */ - void swap(SmallVector &other) { - auto first_move = this; - auto second_move = &other; - if (other.capacity() > N) { - first_move = &other; - second_move = this; - } - SmallVector tmp; - tmp.MoveFrom(*first_move); - first_move->MoveFrom(*second_move); - second_move->MoveFrom(tmp); - } - -private: - T *GetPointer() { - return allocated_storage_ == nullptr ? reinterpret_cast(&inline_storage_) : allocated_storage_; - } - const T *GetPointer() const { - return allocated_storage_ == nullptr ? reinterpret_cast(&inline_storage_) : allocated_storage_; - } - - iterator InitStorage(size_type size) { - size_ = size; - if (size_ > N) { - capacity_ = size_; - allocated_storage_ = reinterpret_cast(malloc(sizeof(T) * capacity_)); - if (allocated_storage_ == nullptr) { - throw std::bad_alloc(); - } - return allocated_storage_; - } else { - capacity_ = N; - allocated_storage_ = nullptr; - return reinterpret_cast(&inline_storage_); - } - } - void FreeStorage() { - if (allocated_storage_ != nullptr) { - free(allocated_storage_); - allocated_storage_ = nullptr; - } - } - - iterator ClearElements() { - T *addr = GetPointer(); - for (size_type i = 0; i < size_; ++i) { - addr[i].~T(); - } - return addr; - } - template> - static void CopyRange(T *iter, InputIt first, InputIt last) { - while (first != last) { - new (iter++) T(*first++); - } - } - void MoveFrom(SmallVector &other) noexcept { - size_ = other.size_; - capacity_ = other.capacity_; - if (other.allocated_storage_ != nullptr) { - allocated_storage_ = other.allocated_storage_; - } else { - auto addr = reinterpret_cast(&inline_storage_); - auto other_addr = other.GetPointer(); - for (size_type i = 0; i < size_; ++i) { - new (addr + i) T(std::move(other_addr[i])); - other_addr[i].~T(); - } - allocated_storage_ = nullptr; - } - - other.InitStorage(0); - } - void CheckOutOfRange(size_type index) const { - if (index >= size_) { - throw std::out_of_range("Index out of range"); - } - } - static void AssertNonNeg(difference_type value) { - if (value < 0) { - throw std::range_error("The first iter is greater than the last"); - } - } - - iterator ExpandCap(size_type range_begin, size_type range_len) { - auto new_cap = std::max(capacity_ * 2, size_ + range_len); - auto new_storage = reinterpret_cast(malloc(sizeof(T) * new_cap)); - auto old_storage = GetPointer(); - for (size_type i = 0; i < range_begin; ++i) { - new (new_storage + i) T(std::move(old_storage[i])); - old_storage[i].~T(); - } - for (size_type i = range_begin; i < size_; ++i) { - new (new_storage + range_len + i) T(std::move(old_storage[i])); - old_storage[i].~T(); - } - - FreeStorage(); - allocated_storage_ = new_storage; - capacity_ = new_cap; - size_ += range_len; - return new_storage + range_begin; - } - iterator ExpandSize(size_type range_begin, size_type range_len) { - auto storage = GetPointer(); - for (size_type i = size_; i > range_begin; --i) { - auto index = i - 1; - new (storage + index + range_len) T(std::move(storage[index])); - storage[index].~T(); - } - size_ += range_len; - return storage + range_begin; - } - iterator Expand(size_type range_begin, size_type range_len) { - if (range_len + size_ > capacity_) { - return ExpandCap(range_begin, range_len); - } else { - return ExpandSize(range_begin, range_len); - } - } - void Shrink(size_type range_begin, size_type range_end) { - T *storage = GetPointer(); - for (size_type i = range_begin; i < range_end; ++i) { - storage[i].~T(); - } - size_type new_size = range_begin; - for (size_type i = range_end; i < size_; ++i, ++new_size) { - new (storage + new_size) T(std::move(storage[i])); - storage[i].~T(); - } - size_ = new_size; - } - -private: - using InlineT = typename std::aligned_storage::type; - size_type size_; - size_type capacity_; - InlineT inline_storage_; - T *allocated_storage_; -}; -} // namespace ge - -template -bool operator==(const ge::SmallVector &sv1, const ge::SmallVector &sv2) { - if (N1 != N2) { - // 这里可能存在争议,因为即使N不相同,size、内容也可以完全相同 - return false; - } - if (sv1.size() != sv2.size()) { - return false; - } - for (size_t i = 0; i < sv1.size(); ++i) { - if (sv1[i] != sv2[i]) { - return false; - } - } - return true; -} - -template -bool operator!=(const ge::SmallVector &sv1, const ge::SmallVector &sv2) { - return !(sv1 == sv2); -} -template -bool operator<(const ge::SmallVector &sv1, const ge::SmallVector &sv2) { - return std::lexicographical_compare(sv1.begin(), sv1.end(), sv2.begin(), sv2.end()); -} -template -bool operator>(const ge::SmallVector &sv1, const ge::SmallVector &sv2) { - return std::lexicographical_compare(sv2.begin(), sv2.end(), sv1.begin(), sv1.end()); -} -template -bool operator<=(const ge::SmallVector &sv1, const ge::SmallVector &sv2) { - return !(sv1 > sv2); -} -template -bool operator>=(const ge::SmallVector &sv1, const ge::SmallVector &sv2) { - return !(sv1 < sv2); -} - -namespace std { -template -void swap(ge::SmallVector &sv1, ge::SmallVector &sv2) { - sv1.swap(sv2); -} -} // namespace std - -#endif // METADEF_CXX_SMALL_VECTOR_H diff --git a/inc/metadef/inc/graph/tuning_utils.h b/inc/metadef/inc/graph/tuning_utils.h deleted file mode 100644 index e73e963f6..000000000 --- a/inc/metadef/inc/graph/tuning_utils.h +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef MAIN_TUNING_UTILS_H -#define MAIN_TUNING_UTILS_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "framework/common/debug/ge_log.h" -#include "utils/attr_utils.h" -#include "utils/node_utils.h" -#include "external/ge/ge_api_types.h" -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/op_desc_utils.h" -#include "graph/utils/tensor_utils.h" -namespace ge { -// Configure build mode, default value is "normal" -constexpr char BUILD_MODE[] = "ge.buildMode"; -constexpr char BUILD_STEP[] = "ge.buildStep"; -// Configure tuning path -constexpr char TUNING_PATH[] = "ge.tuningPath"; -// for interface: aclgrphBuildModel -const std::set ir_builder_supported_options_for_lx_fusion = { - BUILD_MODE, - BUILD_STEP, - TUNING_PATH -}; - -// Build model -constexpr char BUILD_MODE_NORMAL[] = "normal"; -constexpr char BUILD_MODE_TUNING[] = "tuning"; -constexpr char BUILD_MODE_BASELINE[] = "baseline"; -const std::set build_mode_options = { - BUILD_MODE_NORMAL, - BUILD_MODE_TUNING, - BUILD_MODE_BASELINE -}; - -// Build step -constexpr char BUILD_STEP_BEFORE_UB_MATCH[] = "before_ub_match"; -constexpr char BUILD_STEP_AFTER_UB_MATCH[] = "after_ub_match"; -constexpr char BUILD_STEP_AFTER_BUILDER[] = "after_builder"; -constexpr char BUILD_STEP_AFTER_BUILDER_SUB[] = "after_builder_sub"; -constexpr char BUILD_STEP_AFTER_MERGE[] = "after_merge"; -const std::set build_step_options = { - BUILD_STEP_BEFORE_UB_MATCH, - BUILD_STEP_AFTER_UB_MATCH, - BUILD_STEP_AFTER_BUILDER, - BUILD_STEP_AFTER_BUILDER_SUB, - BUILD_STEP_AFTER_MERGE -}; - -using SubgraphCreateOutNode = std::unordered_map; -using NodetoNodeMap = std::unordered_map; -using NodeVec = std::vector; -using NodeNametoNodeNameMap = std::map; -using NodetoNodeNameMap = std::unordered_map; -class TuningUtils { - public: - TuningUtils() = default; - ~TuningUtils() = default; - // Dump all the subgraphs and modify - // the subgraphs in them to be executable subgraphs if exe_flag is true - // `tuning_path` means path to save the graphs - static graphStatus ConvertGraphToFile(std::vector tuning_subgraphs, - std::vector non_tuning_subgraphs = {}, - bool exe_flag = false, - const std::string &path = "", - const std::string &user_path = ""); - // Recovery `graph` from graph dump files configured in options - static graphStatus ConvertFileToGraph(const std::map &options, ge::Graph &graph); - - private: - // part 1 - struct HelpInfo { - int64_t index; - bool exe_flag; - bool is_tuning_graph; - const std::string &path; - const std::string &user_path; - }; - static graphStatus MakeExeGraph(ComputeGraphPtr &exe_graph, - const HelpInfo& help_info); - static graphStatus ConvertConstToWeightAttr(ComputeGraphPtr &exe_graph); - static graphStatus HandlePld(NodePtr &node); - static graphStatus HandleEnd(NodePtr &node); - static graphStatus ChangePld2Data(NodePtr &node, NodePtr &data_node); - static graphStatus ChangeEnd2NetOutput(NodePtr &node, NodePtr &out_node); - static graphStatus LinkEnd2NetOutput(NodePtr &node, NodePtr &out_node); - static graphStatus CreateDataNode(NodePtr &node, NodePtr &data_node); - static graphStatus CreateNetOutput(NodePtr &node, NodePtr &out_node); - static graphStatus AddAttrToDataNodeForMergeGraph(const NodePtr &pld, NodePtr &data_node); - static graphStatus AddAttrToNetOutputForMergeGraph(const NodePtr &end, NodePtr &out_node, int64_t index); - static void DumpGraphToPath(ComputeGraphPtr &exe_graph, int64_t index, - bool is_tuning_graph, std::string path); - - static SubgraphCreateOutNode create_output_; - // part 2 - static graphStatus MergeAllSubGraph(std::vector &graphs, - ComputeGraphPtr &graph); - static graphStatus MergeSubGraph(ComputeGraphPtr &graph); - // Deletes new data and output nodes added by call `MakeExeGraph()` func in part 1 - static graphStatus RemoveDataNetoutputEdge(ComputeGraphPtr &graph); - static graphStatus HandleContinuousInputNodeNextData(NodePtr &node); - static NodePtr FindNode(const std::string &name, int64_t &in_index); - - static NodeNametoNodeNameMap data_2_end_; - static NodetoNodeNameMap data_node_2_end_node_; - static NodetoNodeMap data_node_2_netoutput_node_; - static NodeVec netoutput_nodes_; - static NodeVec merged_graph_nodes_; - static std::mutex mutex_; - // for debug - static std::string PrintCheckLog(); - static std::string GetNodeNameByAnchor(const Anchor *anchor); -}; -} -#endif //MAIN_TUNING_UTILS_H diff --git a/inc/metadef/inc/graph/type_utils.h b/inc/metadef/inc/graph/type_utils.h deleted file mode 100644 index fc66e96ec..000000000 --- a/inc/metadef/inc/graph/type_utils.h +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef EXECUTE_GRAPH_TYPE_UTILS_H -#define EXECUTE_GRAPH_TYPE_UTILS_H -#include -#include "graph/types.h" - -namespace ge { -class GeTensor; -class GeTensorDesc; -class Buffer; -class NamedAttrs; -namespace proto { -class GraphDef; -} - -template -struct GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeIdHolder { - static char id; -}; - -template char TypeIdHolder::id = 0; - -using TypeId = void *; -constexpr TypeId kInvalidTypeId = nullptr; - -template -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId(const T &obj) { - return GetTypeId(); -} - -template -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() { - using PureT = typename std::remove_cv::type>::type; - return &(TypeIdHolder::id); -} - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; - -template<> -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TypeId GetTypeId>() ; -} -#endif // EXECUTE_GRAPH_TYPE_UTILS_H diff --git a/inc/metadef/inc/graph/usr_types.h b/inc/metadef/inc/graph/usr_types.h deleted file mode 100644 index 7da9d49b7..000000000 --- a/inc/metadef/inc/graph/usr_types.h +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_USR_TYPES_H_ -#define INC_GRAPH_USR_TYPES_H_ - -#include -#include -#include -namespace ge { -#define USR_TYPE_DEC(type, name) \ - inline void set_##name(const type &value) { name = value; } \ - type *mutable_##name() { return &name; } - -#define USR_TYPE_HAS_DEC(type, name) \ - inline void set_##name(const type &value) { name = value; } \ - \ - private: \ - bool has_mutable_##name{false}; \ - \ - public: \ - bool has_##name() const { return (has_mutable_##name) || QuantizeFactorHasData(name); } \ - type *mutable_##name() { \ - has_mutable_##name = true; \ - return &name; \ - } - -#define USR_TYPE_BYTES_DEC(name) \ - inline void clear_##name() { name.clear(); } \ - inline void set_##name(const void *value, size_t size) { \ - name.assign(reinterpret_cast(const_cast(value)), \ - reinterpret_cast(const_cast(value)) + size); \ - } - -enum UsrQuantizeScaleType { USR_VECTOR_SCALE = 0, USR_SCALAR_SCALE = 1 }; -enum UsrQuantizeScaleMode { USR_NORMAL_MODE = 0, USR_SQRT_MODE = 1 }; -enum UsrQuantizeAlgorithm { - USR_NON_OFFSET_ALGO = 0, - USR_HALF_OFFSET_ALGO = 1, - USR_ALL_OFFSET_ALGO = 2, -}; - -struct UsrQuantizeFactor { - public: - // QuantizeScaleMode scale_mode; - UsrQuantizeScaleMode scale_mode{USR_NORMAL_MODE}; - std::vector scale_value; - int64_t scale_offset{0}; - std::vector offset_data_value; - int64_t offset_data_offset{0}; - std::vector offset_weight_value; - int64_t offset_weight_offset{0}; - std::vector offset_pad_value; - int64_t offset_pad_offset{0}; - - USR_TYPE_DEC(UsrQuantizeScaleMode, scale_mode); - USR_TYPE_BYTES_DEC(scale_value); - - USR_TYPE_DEC(int64_t, scale_offset); - USR_TYPE_BYTES_DEC(offset_data_value); - USR_TYPE_DEC(int64_t, offset_data_offset); - - USR_TYPE_BYTES_DEC(offset_weight_value); - USR_TYPE_DEC(int64_t, offset_weight_offset); - USR_TYPE_BYTES_DEC(offset_pad_value); - USR_TYPE_DEC(int64_t, offset_pad_offset); -}; - -static inline bool QuantizeFactorHasData(const UsrQuantizeFactor &factor) { - return factor.scale_value.size() > 0 || factor.offset_data_value.size() > 0 || - factor.offset_weight_value.size() > 0 || factor.offset_pad_value.size() > 0; -} - -struct UsrQuantizeCalcFactor { - public: - std::vector offsetw; - int64_t offsetw_offset{0}; - std::vector offsetd; - int64_t offsetd_offset{0}; - std::vector scalereq; - int64_t scaledreq_offset{0}; - std::vector offsetdnext; - int64_t offsetdnext_offset{0}; - - USR_TYPE_BYTES_DEC(offsetw); - USR_TYPE_DEC(int64_t, offsetw_offset); - USR_TYPE_BYTES_DEC(offsetd); - USR_TYPE_DEC(int64_t, offsetd_offset); - USR_TYPE_BYTES_DEC(scalereq); - USR_TYPE_DEC(int64_t, scaledreq_offset); - USR_TYPE_BYTES_DEC(offsetdnext); - USR_TYPE_DEC(int64_t, offsetdnext_offset); -}; - -static inline bool QuantizeFactorHasData(const UsrQuantizeCalcFactor &factor) { - return factor.offsetw.size() > 0 || factor.offsetd.size() > 0 || factor.scalereq.size() > 0 || - factor.offsetdnext.size() > 0; -} - -struct UsrQuantizeFactorParams { - UsrQuantizeAlgorithm quantize_algo{USR_NON_OFFSET_ALGO}; - UsrQuantizeScaleType scale_type{USR_VECTOR_SCALE}; - UsrQuantizeFactor quantize_param; - UsrQuantizeFactor dequantize_param; - UsrQuantizeFactor requantize_param; - UsrQuantizeCalcFactor quantizecalc_param; - USR_TYPE_DEC(UsrQuantizeAlgorithm, quantize_algo); - USR_TYPE_DEC(UsrQuantizeScaleType, scale_type); - USR_TYPE_HAS_DEC(UsrQuantizeFactor, quantize_param); - USR_TYPE_HAS_DEC(UsrQuantizeFactor, dequantize_param); - USR_TYPE_HAS_DEC(UsrQuantizeFactor, requantize_param); - USR_TYPE_HAS_DEC(UsrQuantizeCalcFactor, quantizecalc_param); -}; - -#undef USR_TYPE_DEC -#undef USR_TYPE_HAS_DEC -#undef USR_TYPE_BYTES_DEC -} // namespace ge - -#endif // INC_GRAPH_USR_TYPES_H_ - diff --git a/inc/metadef/inc/graph/utils/anchor_utils.h b/inc/metadef/inc/graph/utils/anchor_utils.h deleted file mode 100644 index f3f71293c..000000000 --- a/inc/metadef/inc/graph/utils/anchor_utils.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_ANCHOR_UTILS_H_ -#define INC_GRAPH_UTILS_ANCHOR_UTILS_H_ - -#include "graph/anchor.h" -#include "graph/node.h" - -namespace ge { -class AnchorUtils { - public: - // Get anchor format - static Format GetFormat(const DataAnchorPtr &dataAnchor); - - // Set anchor format - static graphStatus SetFormat(const DataAnchorPtr &dataAnchor, Format dataFormat); - - // Get anchor status - static AnchorStatus GetStatus(const DataAnchorPtr &dataAnchor); - - // Set anchor status - static graphStatus SetStatus(const DataAnchorPtr &dataAnchor, AnchorStatus anchorStatus); - - static bool HasControlEdge(const AnchorPtr &anchor); - - static bool IsControlEdge(const AnchorPtr &src, const AnchorPtr &dst); - - static int GetIdx(const AnchorPtr &anchor); -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_ANCHOR_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/attr_utils.h b/inc/metadef/inc/graph/utils/attr_utils.h deleted file mode 100644 index 187d2a1b6..000000000 --- a/inc/metadef/inc/graph/utils/attr_utils.h +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_ATTR_UTILS_H_ -#define INC_GRAPH_UTILS_ATTR_UTILS_H_ - -#include -#include -#include -#include -#include "graph/detail/attributes_holder.h" -#include "graph/ge_attr_value.h" -#include "graph/types.h" - -namespace ge { -class OpDesc; -using OpDescPtr = std::shared_ptr; -using ConstOpDescPtr = std::shared_ptr; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY AttrUtils { - public: - class ConstAttrHolderAdapter; - class AttrHolderAdapter; - // Set - static bool HasAttr(ConstAttrHolderAdapter &&obj, const std::string &name); - - static bool SetInt(AttrHolderAdapter &&obj, const std::string &name, const int64_t &value); - static bool SetListInt(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetListInt(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetListInt(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetListInt(AttrHolderAdapter &&obj, const std::string &name, std::initializer_list &&value); - - static bool SetFloat(AttrHolderAdapter &&obj, const std::string &name, const float &value); - static bool SetListFloat(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetBool(AttrHolderAdapter &&obj, const std::string &name, const bool &value); - static bool SetListBool(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetStr(AttrHolderAdapter &&obj, const std::string &name, const std::string &value); - static bool SetListStr(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetTensorDesc(AttrHolderAdapter &&obj, const std::string &name, const GeTensorDesc &value); - static bool SetListTensorDesc(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetTensor(AttrHolderAdapter &&obj, const std::string &name, const GeTensorPtr &value); - static bool SetTensor(AttrHolderAdapter &&obj, const std::string &name, const ConstGeTensorPtr &value); - static bool SetTensor(AttrHolderAdapter &&obj, const std::string &name, const GeTensor &value); - static bool SetListTensor(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetListTensor(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetListTensor(AttrHolderAdapter &&obj, const std::string &name, - std::initializer_list &&value); - static bool SetListTensor(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetGraph(AttrHolderAdapter &&obj, const std::string &name, const ComputeGraphPtr &value); - static bool SetListGraph(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetBytes(AttrHolderAdapter &&obj, const std::string &name, const Buffer &value); - static bool SetListBytes(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetNamedAttrs(AttrHolderAdapter &&obj, const std::string &name, const NamedAttrs &value); - static bool SetListNamedAttrs(AttrHolderAdapter &&obj, const std::string &name, - const std::vector &value); - // todo 没搜到有人用,暂时先不实现 - static bool SetListOpDesc(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool SetListOpDesc(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool GetListOpDesc(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - - // Get - static bool GetInt(ConstAttrHolderAdapter &&obj, const std::string &name, int64_t &value); - static bool GetInt(ConstAttrHolderAdapter &&obj, const std::string &name, int32_t &value); - static bool GetInt(ConstAttrHolderAdapter &&obj, const std::string &name, uint32_t &value); - static bool GetListInt(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetListInt(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetListInt(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetFloat(ConstAttrHolderAdapter &&obj, const std::string &name, float &value); - static bool GetListFloat(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetBool(ConstAttrHolderAdapter &&obj, const std::string &name, bool &value); - static bool GetListBool(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetStr(ConstAttrHolderAdapter &&obj, const std::string &name, std::string &value); - static bool GetListStr(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetTensorDesc(ConstAttrHolderAdapter &&obj, const std::string &name, GeTensorDesc &value); - static bool GetListTensorDesc(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetTensor(ConstAttrHolderAdapter &&obj, const std::string &name, ConstGeTensorPtr &value); - static bool MutableTensor(AttrHolderAdapter &&obj, const std::string &name, GeTensorPtr &value); - static bool GetListTensor(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool MutableListTensor(AttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetGraph(ConstAttrHolderAdapter &&obj, const std::string &name, ComputeGraphPtr &value); - static bool GetListGraph(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetBytes(ConstAttrHolderAdapter &&obj, const std::string &name, Buffer &value); - static bool GetListBytes(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - static bool GetNamedAttrs(ConstAttrHolderAdapter &&obj, const std::string &name, NamedAttrs &value); - static bool GetListNamedAttrs(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - // Value will be moved - static bool SetZeroCopyBytes(AttrHolderAdapter &&obj, const std::string &name, Buffer &&buffer); - static bool GetZeroCopyBytes(ConstAttrHolderAdapter &&obj, const std::string &name, Buffer &buffer); - // Value will be moved - static bool SetZeroCopyListBytes(AttrHolderAdapter &&obj, const std::string &name, - std::vector &listBuffer); - static bool GetZeroCopyListBytes(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &listBuffer); - - static bool SetListListInt(AttrHolderAdapter &&obj, const std::string &name, const std::vector> &value); - static bool GetListListInt(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector> &value); - - static bool SetListListFloat(AttrHolderAdapter &&obj, const std::string &name, const std::vector> &value); - static bool GetListListFloat(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector> &value); - - static bool SetListDataType(AttrHolderAdapter &&obj, const std::string &name, const std::vector &value); - static bool GetListDataType(ConstAttrHolderAdapter &&obj, const std::string &name, std::vector &value); - - static bool SetDataType(AttrHolderAdapter &&obj, const std::string &name, const ge::DataType &value); - static bool GetDataType(ConstAttrHolderAdapter &&obj, const std::string &name, ge::DataType &value); - - static OpDescPtr CloneOpDesc(const ConstOpDescPtr &orgOpDesc); - - static OpDescPtr CopyOpDesc(const ConstOpDescPtr &orgOpDesc); - static std::string GetAllAttrsStr(ConstAttrHolderAdapter &&obj); - static std::map GetAllAttrs(ConstAttrHolderAdapter &&obj); - static std::string GetAttrsStrAfterRid(ConstAttrHolderAdapter &&obj, const std::set &un_compute_attrs); - class AttrHolderAdapter { - public: - AttrHolderAdapter(AttrHolder *obj) : obj_(obj) {} - ~AttrHolderAdapter() {} - template - AttrHolderAdapter(const std::shared_ptr &obj) : obj_(obj.get()) {} - AttrHolderAdapter(AttrHolder &obj) : obj_(&obj) {} - operator bool() const { return obj_ != nullptr; } - AttrHolder *operator->() { return obj_; } - AttrHolder *get() { return obj_; } - - AttrHolder *obj_; - }; - - class ConstAttrHolderAdapter { - public: - ConstAttrHolderAdapter(const AttrHolder *obj) : obj_(obj) {} - ~ConstAttrHolderAdapter() {} - template - ConstAttrHolderAdapter(const std::shared_ptr obj) : obj_(obj.get()) {} - ConstAttrHolderAdapter(const AttrHolder &obj) : obj_(&obj) {} - operator bool() const { return obj_ != nullptr; } - const AttrHolder *operator->() const { return obj_; } - const AttrHolder *get() const { return obj_; } - - private: - const AttrHolder *obj_; - }; -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_ATTR_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/constant_utils.h b/inc/metadef/inc/graph/utils/constant_utils.h deleted file mode 100644 index 0d63aefd9..000000000 --- a/inc/metadef/inc/graph/utils/constant_utils.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef COMMON_GRAPH_UTILS_CONSTANT_UTILS_H_ -#define COMMON_GRAPH_UTILS_CONSTANT_UTILS_H_ -#include "graph/node.h" -#include "graph/operator.h" -#include "graph/op_desc.h" - -namespace ge { -class ConstantUtils { -public: - // check is constant - static bool IsConstant(const NodePtr &node); - static bool IsConstant(const Operator &op); - static bool IsConstant(const OpDescPtr &op_desc); - static bool IsPotentialConst(const OpDescPtr &op_desc); - static bool IsRealConst(const OpDescPtr &op_desc); - // get/set weight - static bool GetWeight(const OpDescPtr &op_desc, const uint32_t index, ConstGeTensorPtr &weight); - static bool GetWeight(const Operator &op, const uint32_t index, Tensor &weight); - static bool MutableWeight(const OpDescPtr &op_desc, const uint32_t index, GeTensorPtr &weight); - static bool SetWeight(const OpDescPtr &op_desc, const uint32_t index, const GeTensorPtr weight); - static bool MarkPotentialConst(const OpDescPtr &op_desc, const std::vector indices, const std::vector weights); - static bool UnMarkPotentialConst(const OpDescPtr &op_desc); -private: - static bool GetPotentialWeight(const OpDescPtr &op_desc, std::vector &weight_indices, - std::vector &weights); - static bool MutablePotentialWeight(const OpDescPtr &op_desc, std::vector &weight_indices, - std::vector &weights); -}; -} - -#endif // COMMON_GRAPH_UTILS_CONSTANT_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/ffts_graph_utils.h b/inc/metadef/inc/graph/utils/ffts_graph_utils.h deleted file mode 100644 index bc7e66d87..000000000 --- a/inc/metadef/inc/graph/utils/ffts_graph_utils.h +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_FFTS_GRAPH_UTILS_H_ -#define INC_GRAPH_UTILS_FFTS_GRAPH_UTILS_H_ - -#include "graph/anchor.h" -#include "graph/compute_graph.h" -#include "graph/graph.h" -#include "graph/node.h" - -namespace ge { -class FftsGraphUtils { -public: - using CalcFunc = std::function(const NodePtr &)>; - static graphStatus GraphPartition(ComputeGraph &graph, const std::set &unsupported_nodes); - - static graphStatus GraphPartition(ComputeGraph &graph, - const CalcFunc &calc_func, - const std::vector &upper_limit); -private: - static graphStatus CollectClipNodesAndGraphs(const ComputeGraphPtr &graph, - const std::set &unsupported_nodes, - std::unordered_set &nodes_need_clip, - std::unordered_set &graphs_need_split); - - static bool IsGraphNeedSplit(const ComputeGraphPtr &graph, const std::unordered_set &nodes_need_clip); - - static graphStatus SplitNodesWithCheck(const ComputeGraphPtr &graph, - const std::unordered_set &nodes_need_clip, - std::vector>> &split_nodes); - - static void SplitNodes(const std::set &calc_nodes, const std::function &is_cur_stage, - std::set &visited_nodes, std::set &cur_nodes, std::set &next_nodes); - - static graphStatus SplitSubgraph(const ComputeGraphPtr &subgraph, - const std::vector>> &split_nodes); - - static graphStatus BuildFftsPlusSubgraphWithAllNodes(const ComputeGraphPtr &subgraph); - - static void CollectCalcNodeInSubgraph(const ComputeGraphPtr &subgraph, std::set &calc_nodes); - - static void CollectEndNodeInSubgraph(const ComputeGraphPtr &subgraph, const std::set &ctrl_goto_types, - std::set &edge_nodes); - - static ComputeGraphPtr GetFftsPlusGraph(ComputeGraph &graph); - - static graphStatus SetAttrForFftsPlusSubgraph(const ComputeGraphPtr &subgraph); - - static graphStatus Calculate(const ComputeGraphPtr &graph, - const CalcFunc &calc_func, - std::map> &node_value, - std::map> &graph_value, - uint32_t recursive_depth = 1); - - static std::vector Calculate(const NodePtr &node, const CalcFunc &calc_func, - std::map> &node_value, - std::map> &graph_value, - uint32_t recursive_depth); - - static bool IsValueValid(const ComputeGraphPtr &graph, const std::vector &upper_limit, - const std::map> &node_value, - const std::map> &graph_value); - - static graphStatus PartitionGraphWithLimit(const ComputeGraphPtr &graph, - std::map> &node_value, - std::map> &graph_value, - const std::vector &upper_limit, - uint32_t recursive_depth = 1); - - static graphStatus SplitFuncNode(const std::vector exceed_single_node, - std::map> &node_value, - std::map> &graph_value, - const std::vector &upper_limit, - uint32_t recursive_depth); -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_GRAPH_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/file_utils.h b/inc/metadef/inc/graph/utils/file_utils.h deleted file mode 100644 index f5d7885db..000000000 --- a/inc/metadef/inc/graph/utils/file_utils.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef COMMON_GRAPH_UTILS_FILE_UTILS_H_ -#define COMMON_GRAPH_UTILS_FILE_UTILS_H_ - -#include -#include - - -namespace ge { -/// @ingroup domi_common -/// @brief Absolute path for obtaining files. -/// @param [in] path of input file -/// @param [out] Absolute path of a file. If the absolute path cannot be obtained, an empty string is returned -std::string RealPath(const char *path); - -/// @ingroup domi_common -/// @brief Recursively Creating a Directory -/// @param [in] directory_path Path, which can be a multi-level directory. -/// @return 0 success -/// @return -1 fail -int32_t CreateDirectory(const std::string &directory_path); - -} - -#endif // end COMMON_GRAPH_UTILS_FILE_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/graph_utils.h b/inc/metadef/inc/graph/utils/graph_utils.h deleted file mode 100644 index d6349fc2a..000000000 --- a/inc/metadef/inc/graph/utils/graph_utils.h +++ /dev/null @@ -1,832 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_GRAPH_UTILS_H_ -#define INC_GRAPH_UTILS_GRAPH_UTILS_H_ - -#include -#include -#include -#include -#include -#include -#include - -#include "graph/anchor.h" -#include "graph/compute_graph.h" -#include "graph/graph.h" -#include "graph/model.h" -#include "graph/node.h" -#include "graph/utils/anchor_utils.h" - -#define GE_DUMP(compute_graph, name) \ - do { \ - GraphUtils::DumpGEGraph(compute_graph, name); \ - GraphUtils::DumpGEGraphToOnnx(*compute_graph, name); \ - uint64_t i = 0; \ - for (const auto &sub_graph_func : compute_graph->GetAllSubgraphs()) { \ - auto sub_graph_func_name = std::string(name) + std::string("_sub_graph_") + std::to_string(i++); \ - GraphUtils::DumpGEGraph(sub_graph_func, sub_graph_func_name); \ - GraphUtils::DumpGEGraphToOnnx(*sub_graph_func, sub_graph_func_name); \ - } \ - } while (0) - -namespace { -struct GraphInfo { - std::set nodes; - std::map>> data_inputs; - std::map>> data_outputs; - std::list> ctrl_inputs; - std::list> ctrl_outputs; - std::list> inner_data_edges; - std::list> inner_ctrl_edges; -}; -} - -namespace ge { -enum IOType { kIn, kOut }; - -struct NodeIndexIO { - NodeIndexIO(NodePtr node, uint32_t index, IOType io_type) - : node_(std::move(node)), index_(index), io_type_(io_type) { - if (node_ != nullptr) { - value_ = node_->GetName() + (io_type_ == kOut ? "_out_" : "_in_") + std::to_string(index_); - } - } - NodeIndexIO(NodePtr node, int index, IOType io_type) - : node_(std::move(node)), index_(static_cast(index)), io_type_(io_type) { - if (node_ != nullptr) { - value_ = node_->GetName() + (io_type_ == kOut ? "_out_" : "_in_") + std::to_string(index_); - } - } - ~NodeIndexIO() {} - - NodePtr node_ = nullptr; - uint32_t index_ = 0; - IOType io_type_ = kOut; - std::string value_; - - const std::string &ToString() const { return value_; } -}; - -class GraphUtils { - public: - static ComputeGraphPtr GetComputeGraph(const Graph &graph); - - static Graph CreateGraphFromComputeGraph(const ComputeGraphPtr compute_graph); - - static GraphPtr CreateGraphPtrFromComputeGraph(const ComputeGraphPtr compute_graph); - - static graphStatus GetIndependentCompileGraphs(const ComputeGraphPtr &compute_graph, - std::vector &independent_compile_subgraphs); - - static graphStatus RecoverGraphOperators(const Graph &graph); - - static ComputeGraphPtr CreateGraphFromOperator(const std::string &name, const std::vector &inputs); - - static graphStatus AddEdge(const OutDataAnchorPtr &src, const InDataAnchorPtr &dst); - - static graphStatus AddEdge(const OutDataAnchorPtr &src, const Format &src_format, const InDataAnchorPtr &dst, - const Format &dst_format); - - static graphStatus AddEdge(const AnchorPtr &src, const AnchorPtr &dst); - - static graphStatus AddEdge(const OutControlAnchorPtr &src, const InControlAnchorPtr &dst); - - static graphStatus AddEdge(const OutDataAnchorPtr &src, const InControlAnchorPtr &dst); - - // check whether src is link to dst and then remove - static graphStatus RemoveEdge(const OutDataAnchorPtr &src, const InDataAnchorPtr &dst); - - static graphStatus RemoveEdge(const AnchorPtr &src, const AnchorPtr &dst); - - static graphStatus RemoveEdge(const OutControlAnchorPtr &src, const InControlAnchorPtr &dst); - - static graphStatus RemoveEdge(const OutDataAnchorPtr &src, const InControlAnchorPtr &dst); - - static graphStatus ReplaceEdgeSrc(const OutDataAnchorPtr &src, const InDataAnchorPtr &dst, - const OutDataAnchorPtr &new_src); - - static graphStatus ReplaceEdgeSrc(const OutControlAnchorPtr &src, const InControlAnchorPtr &dst, - const OutControlAnchorPtr &new_src); - - static graphStatus ReplaceEdgeDst(const OutDataAnchorPtr &src, const InDataAnchorPtr &dst, - const InDataAnchorPtr &new_dst); - - static graphStatus ReplaceEdgeDst(const OutControlAnchorPtr &src, const InControlAnchorPtr &dst, - const InControlAnchorPtr &new_dst); - - static graphStatus InsertNodeBetweenDataAnchors(const OutDataAnchorPtr &src, const InDataAnchorPtr &dst, - const NodePtr &new_node); - - static graphStatus RemoveSubgraphRecursively(const ComputeGraphPtr &compute_graph, const NodePtr &remove_node); - - static graphStatus RemoveNodeWithoutRelink(const ComputeGraphPtr &compute_graph, const NodePtr &node); - - static graphStatus InsertTransNode(ComputeGraphPtr compute_graph, const InDataAnchorPtr &in_data_anchor, - const std::vector &vec_op_desc); - - static graphStatus CopyGraph(const Graph &src_graph, Graph &dst_graph); - - static graphStatus CopyComputeGraph(const ComputeGraphPtr &src_compute_graph, - ComputeGraphPtr &dst_compute_graph, - std::map &node_old_2_new, - std::map &op_desc_old_2_new, - int32_t depth); - - static graphStatus CopyOpAndSubgraph(const ComputeGraphPtr &src_compute_graph, - ComputeGraphPtr &dst_compute_graph, - std::map &node_old_2_new, - std::map &op_desc_old_2_new, - std::unordered_map &all_new_nodes, - int32_t depth); - - static graphStatus CopyMembers(const ComputeGraphPtr &src_compute_graph, - ComputeGraphPtr &dst_compute_graph, - const std::unordered_map &all_new_nodes); - - static graphStatus CopyGraphImpl(const Graph &src_graph, Graph &dst_graph, - const std::map &node_old_2_new, - const std::map &op_desc_old_2_new); - - /// - /// @brief Insert node: src->insert_node:input_index, insert_node:output_index->dst - /// @param [in] src - /// @param [in] dsts - /// @param [in] insert_node - /// @param [in] input_index - /// @param [in] output_index - /// @return graphStatus - /// - static graphStatus InsertNodeAfter(const OutDataAnchorPtr &src, const std::vector &dsts, - const NodePtr &insert_node, uint32_t input_index = 0, uint32_t output_index = 0); - - static graphStatus InsertNodeBefore(const InDataAnchorPtr &dst, - const NodePtr &insert_node, - uint32_t input_index = 0, - uint32_t output_index = 0); - - static graphStatus RemoveJustNode(ComputeGraphPtr compute_graph, const NodePtr &node); - - static graphStatus RemoveJustNode(ComputeGraph &compute_graph, const NodePtr &node); - - static void RecordOriginalNames(std::vector original_nodes, const ge::NodePtr &node); - - static void RecordOriginalNames(std::vector names_tmp, const ge::NodePtr &node); - - static bool MatchDumpStr(const std::string &suffix); - - static void DumpGEGraph(const ge::ComputeGraphPtr &graph, - const std::string &suffix, - bool is_always_dump = false, - const std::string &user_graph_name = ""); - - static void DumpGEGrph(const ge::ComputeGraphPtr &graph, - const std::string &path, - const std::string &suffix); - - static bool LoadGEGraph(const char *file, ge::ComputeGraph &compute_graph); - - static bool LoadGEGraph(const char *file, ge::ComputeGraphPtr &compute_graph); - - static void BreakConnect(const std::map &all_nodes_infos); - - static void DumpGEGraphToOnnx(const ge::ComputeGraph &compute_graph, const std::string &suffix); - - static void DumpGrphToOnnx(const ge::ComputeGraph &compute_graph, - const std::string &path, const std::string &suffix); - - static bool LoadGEGraphFromOnnx(const char *file, ge::ComputeGraph &compute_graph); - - static bool ReadProtoFromTextFile(const char *file, google::protobuf::Message *message); - - static void WriteProtoToTextFile(const google::protobuf::Message &proto, const char *real_path); - - static graphStatus AppendInputNode(const ComputeGraphPtr &graph, const NodePtr &node); - - /// - /// Isolating `node`, relinking data links from the in-anchor peer nodes to - /// the out-anchor peer nodes according to `io_map`, relinking control links - /// to ensure that input nodes of `node` are before out nodes - /// - /// Link the `io_map[i]` input anchor peer node to `i` output anchor peer - /// nodes, then unlink all links connecting with `node`. If `io_map[i]` < 0, - /// unlink all links from `i` output anchor without any relinking. - /// - /// @param node - /// @param io_map - /// @return - /// - static graphStatus IsolateNode(const NodePtr &node, const std::initializer_list &io_map); - static graphStatus IsolateNode(const NodePtr &node, const std::vector &io_map); - - /// - /// Isolate `node` which must be one input one output, equivalent to - /// `IsolateNode(node, {0})` - /// @param node - /// @return - /// - static graphStatus IsolateNodeOneIO(const NodePtr &node); - - /// - /// The data anchors replacing behavior is the same with - /// `ReplaceNodeDataAnchors`. In addition, replace all `old_node` control - /// anchors with `new_node`'s. - /// Note: input/output control edges of 'old_node' will NOT be deleted - /// @param new_node - /// @param old_node - /// @param inputs_map - /// @param outputs_map - /// @return - /// - static graphStatus ReplaceNodeAnchors(const NodePtr &new_node, const NodePtr &old_node, - std::initializer_list inputs_map, std::initializer_list outputs_map); - - static graphStatus ReplaceNodeAnchors(const NodePtr &new_node, const NodePtr &old_node, - const std::vector &inputs_map, const std::vector &outputs_map); - - /// - /// Replace `old_node` data anchors with `new_node`'s according to `inputs_map` and `outputs_map`. - /// Replace the `i` in/out data anchor on `old_node` with - /// `inputs_map[i]`/`outputs_map[i]` data anchor on `new_node`. - /// If `inputs_map[i]`/`outputs_map[i]` < 0 or the index not contained in - /// `inputs_map[i]`/`outputs_map[i]`, the `i` data anchor will remain - /// on `old_node`. - /// @param new_node - /// @param old_node - /// @param inputs_map - /// @param outputs_map - /// @return - /// - static graphStatus ReplaceNodeDataAnchors(const NodePtr &new_node, const NodePtr &old_node, - std::initializer_list inputs_map, - std::initializer_list outputs_map); - - static graphStatus ReplaceNodeDataAnchors(const NodePtr &new_node, const NodePtr &old_node, - const std::vector &inputs_map, const std::vector &outputs_map); - - /// - /// Copy all in-control edges from `src_node` to `dst_node` - /// @param src_node - /// @param dst_node - /// @return - /// - static graphStatus CopyInCtrlEdges(const NodePtr &src_node, NodePtr &dst_node); - - static graphStatus MoveInCtrlEdges(const NodePtr &src_node, NodePtr &dst_node); - - /// - /// Copy all out-control edges from `src_node` to `dst_node` - /// @param src_node - /// @param dst_node - /// @return success: GRAPH_SUCESS - /// - static graphStatus CopyOutCtrlEdges(const NodePtr &src_node, NodePtr &dst_node); - - /// - /// Move all out-control edges from `src_node` to `dst_node` - /// @param src_node - /// @param dst_node - /// @return success: GRAPH_SUCESS - /// - static graphStatus MoveOutCtrlEdges(NodePtr &src_node, NodePtr &dst_node); - - /// - /// Copy all in-data edges from `src_node` to `dst_node` - /// @param src_node - /// @param dst_node - /// @return - /// - static graphStatus CopyInDataEdges(const NodePtr &src_node, NodePtr &dst_node); - - static ComputeGraphPtr FindRootGraph(ComputeGraphPtr graph); - - /// - /// Make a copy of ComputeGraph. - /// @param graph: original graph. - /// @param prefix: node name prefix of new graph. - /// @return ComputeGraphPtr - /// - static ComputeGraphPtr CloneGraph(const ComputeGraphPtr &graph, const std::string &prefix, - std::vector &input_nodes, std::vector &output_nodes); - - /// - /// Copy tensor attribute to new node. - /// @param [in] dst_desc: cloned node. - /// @param [in] src_node: original node. - /// @return success: GRAPH_SUCESS - /// - static graphStatus CopyTensorAttrs(const OpDescPtr &dst_desc, const NodePtr &src_node); - - static graphStatus TopologicalSortingByName(const ge::ComputeGraphPtr &compute_graph, std::vector &node_vec); - - /// - /// Get reference-mapping of all data_anchors in graph - /// @param [in] graph - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus GetRefMapping(const ComputeGraphPtr &graph, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - /// - /// Determine if the graph is a UNKNOWN_SHAPE graph based on whether the graph and all subgraphs - /// of the graph have UNKNOWN_SHAPE operators or not. - /// Note: This function will only look 'down' from the graph, not 'up'. For example, the following - /// scenario (K for known shape, U for unknown shape), ROOT graph is UNKNOWN_SHAPE while SUB graph is KNOWN_SHAPE - /// ROOT graph: A -----> B -----> C - /// K subgraph U - /// | - /// V - /// SUB graph: D --> E --> F - /// K K K - /// @param [in] graph - /// @return bool - /// - static bool IsUnknownShapeGraph(const ComputeGraphPtr &graph); - - static NodePtr FindNodeFromAllNodes(ComputeGraphPtr &graph, const std::string &name); - - /// - /// Check if out_data_anchor is reference of input - /// @param [in] out_data_anchor - /// @param [out] reuse_in_index - /// @return bool - /// - static bool IsRefFromInput(const OutDataAnchorPtr &out_data_anchor, int32_t &reuse_in_index); - - static bool IsNoPaddingRefFromInput(const OutDataAnchorPtr &out_data_anchor, int32_t &reuse_in_index); - - static bool IsNodeInGraphRecursively(const ComputeGraphPtr &graph, const Node &node); - - static graphStatus GetSubgraphsRecursively(const ComputeGraphPtr &graph, std::vector &subgraphs); - - static ComputeGraphPtr BuildSubgraphWithNodes(const ComputeGraphPtr &graph, const std::set &nodes, - const std::string &subgraph_name); - - static ComputeGraphPtr BuildSubgraphWithNodes(ComputeGraph &graph, const std::set &nodes, - const std::string &subgraph_name); - - static graphStatus UnfoldSubgraph(const ComputeGraphPtr &graph, - const std::function &filter); - - private: - /// - /// Get reference-mapping for in_data_anchors of node - /// @param [in] node - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus HandleInAnchorMapping(const ComputeGraphPtr &graph, const NodePtr &node, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - /// - /// Get reference-mapping for out_data_anchors of node - /// @param [in] node - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus HandleOutAnchorMapping(const NodePtr &node, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - /// - /// Handle input of subgraph - /// @param [in] node - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus HandleSubgraphInput(const NodePtr &node, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - /// - /// Handle input of Merge op - /// @param [in] node - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus HandleMergeInput(const NodePtr &node, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - /// - /// Handle output of subgraph - /// @param [in] node - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus HandleSubgraphOutput(const NodePtr &node, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - /// - /// Relink all edges for cloned ComputeGraph. - /// @param [in] node: original node. - /// @param [in] prefix: node name prefix of new node. - /// @param [in] all_nodes: all nodes in new graph. - /// @return success: GRAPH_SUCESS - /// - static graphStatus RelinkGraphEdges(const NodePtr &node, const std::string &prefix, - const std::unordered_map &all_nodes); - - /// - /// Union ref-mapping - /// @param [in] exist_node_info1 - /// @param [in] exist_node_info2 - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @param [out] symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus UnionSymbolMapping(const NodeIndexIO &exist_node_info1, const NodeIndexIO &exist_node_info2, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol, std::string &symbol); - - /// - /// Update symbol mapping with a new reference pair - /// @param [in] cur_node_info - /// @param [in] exist_node_info - /// @param [out] symbol_to_anchors - /// @param [out] anchor_to_symbol - /// @return success: GRAPH_SUCESS - /// - static graphStatus UpdateRefMapping(const NodeIndexIO &cur_node_info, const NodeIndexIO &exist_node_info, - std::map> &symbol_to_anchors, - std::map &anchor_to_symbol); - - static void BuildGraphInfoFromNodes(const std::set &nodes, GraphInfo &graph_info); - - static void BuildInDataEdgesFromNode(const NodePtr &node, const std::set &nodes, - std::map &data_input_index_map, - GraphInfo &graph_info); - - static NodePtr BuildSubgraphNode(ComputeGraph &graph, const std::string &graph_name, - const GraphInfo &graph_info); - - static ComputeGraphPtr BuildSubgraph(const NodePtr &subgraph_node, const GraphInfo &graph_info, - const std::string &subgraph_name); - - static graphStatus RelinkDataEdges(const NodePtr &subgraph_node, const GraphInfo &graph_info); - - static graphStatus RelinkCtrlEdges(const NodePtr &subgraph_node, const GraphInfo &graph_info); - - static graphStatus MergeInputNodes(const ComputeGraphPtr &graph); - - static graphStatus MergeNetOutputNode(const ComputeGraphPtr &graph); -}; - -class ComputeGraphBuilder { - public: - ComputeGraphBuilder() : owner_graph_(nullptr) {} - ComputeGraphBuilder(const ComputeGraphBuilder &) = delete; - ComputeGraphBuilder &operator=(const ComputeGraphBuilder &) = delete; - ComputeGraphBuilder(const ComputeGraphBuilder &&) = delete; - ComputeGraphBuilder &operator=(const ComputeGraphBuilder &&) = delete; - virtual ~ComputeGraphBuilder() = default; - - /// - /// @brief Add node to graph - /// @param [in] op_desc - /// @return ComputeGraphBuilder - /// - virtual ComputeGraphBuilder &AddNode(const OpDescPtr &op_desc); - - /// - /// @brief Add data-link among nodes in graph - /// @param [in] src_name - /// @param [in] out_anchor_ind - /// @param [in] dst_name - /// @param [in] in_anchor_ind - /// @return ComputeGraphBuilder - /// - virtual ComputeGraphBuilder &AddDataLink(const std::string &src_name, uint32_t out_anchor_ind, - const std::string &dst_name, uint32_t in_anchor_ind); - - /// - /// @brief Add ctrl-link among nodes in graph - /// @param [in] src_name - /// @param [in] dst_name - /// @return ComputeGraphBuilder - /// - virtual ComputeGraphBuilder &AddControlLink(const std::string &src_name, const std::string &dst_name); - - /// - /// @brief Build graph - /// @param [out] error_code - /// @param [out] error_msg - /// @return ComputeGraphPtr - /// - virtual ComputeGraphPtr Build(graphStatus &error_code, std::string &error_msg) = 0; - - /// @brief Get node with name - /// @param [in] name - /// @return NodePtr - /// - NodePtr GetNode(const std::string &name); - - /// @brief Get all nodes - /// @return std::vector - /// - std::vector GetAllNodes(); - - protected: - /// - /// @brief Build nodes - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void BuildNodes(graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Build data-links - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void BuildDataLinks(graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Build ctrl-links - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void BuildCtrlLinks(graphStatus &error_code, std::string &error_msg); - - ComputeGraphPtr owner_graph_; - - // node_name -> node - std::map node_names_; - std::vector nodes_; - - // -> - std::vector, std::pair>> data_links_; - // src_node_name -> dst_node_name - std::vector> ctrl_links_; -}; - -class CompleteGraphBuilder : public ComputeGraphBuilder { - public: - explicit CompleteGraphBuilder(std::string name, bool retval_flag = true) - : name_(std::move(name)), parent_node_(nullptr), retval_flag_(retval_flag) {} - CompleteGraphBuilder(const CompleteGraphBuilder &) = delete; - CompleteGraphBuilder &operator=(const CompleteGraphBuilder &) = delete; - CompleteGraphBuilder(const CompleteGraphBuilder &&) = delete; - CompleteGraphBuilder &operator=(const CompleteGraphBuilder &&) = delete; - ~CompleteGraphBuilder() = default; - - /// - /// @brief Add node to graph - /// @param [in] op_desc - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &AddNode(const OpDescPtr &op_desc) override; - - /// - /// @brief Add data-link among nodes in graph - /// @param [in] src_name - /// @param [in] out_anchor_ind - /// @param [in] dst_name - /// @param [in] in_anchor_ind - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &AddDataLink(const std::string &src_name, uint32_t out_anchor_ind, - const std::string &dst_name, uint32_t in_anchor_ind) override; - - /// - /// @brief Add ctrl-link among nodes in graph - /// @param [in] src_name - /// @param [in] dst_name - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &AddControlLink(const std::string &src_name, const std::string &dst_name) override; - - /// - /// @brief Set index_th input anchor for graph - /// @param [in] index - /// @param [in] node_names - /// @param [in] anchor_inds - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &SetInput(uint32_t index, const std::vector &node_names, - const std::vector &anchor_inds); - - /// - /// @brief Set index_th input of graph as useless - /// @param [in] index - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &SetUselessInput(uint32_t index); - - /// - /// @brief Add output anchor for graph - /// @param [in] owner_node_name - /// @param [in] anchor_ind - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &AddOutput(const std::string &owner_node_name, uint32_t anchor_ind); - - /// - /// @brief Add target for graph - /// @param [in] target_name - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &AddTarget(const std::string &target_name); - - /// - /// @brief Set parent-node of graph - /// @param [in] parent_node - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &SetParentNode(const NodePtr &parent_node); - - /// - /// @brief Set mapping-relation of parent-node in_anchor_ind & Data-node - /// @param [in] input_mapping: index_of_graph_input -> in_anchor_index_of_parent_node - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &SetInputMapping(const std::map &input_mapping); - - /// - /// @brief Set mapping-relation of parent-node out_anchor_ind & NetOutput-node out_anchor_ind - /// @param [in] output_mapping: index_of_graph_output -> out_anchor_index_of_parent_node - /// @return CompleteGraphBuilder - /// - CompleteGraphBuilder &SetOutputMapping(const std::map &output_mapping); - - /// - /// @brief Build graph - /// @param [out] error_code - /// @param [out] error_msg - /// @return ComputeGraphPtr - /// - ComputeGraphPtr Build(graphStatus &error_code, std::string &error_msg) override; - - private: - /// - /// @brief Add data nodes - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void AddDataNodes(graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Add data node - /// @param [in] index - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - NodePtr AddDataNode(uint32_t index, graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Add RetVal nodes - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void AddRetValNodes(graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Build target-nodes for graph - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void BuildGraphTargets(graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Add NetOutput node - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void AddNetOutputNode(graphStatus &error_code, std::string &error_msg); - - /// - /// @brief Build NetOutput nodes with data & ctrl edges - /// @param [in] net_output_desc - /// @param [in] peer_out_anchors - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void BuildNetOutputNodeWithLink(const OpDescPtr &net_output_desc, - const std::vector &peer_out_anchors, - graphStatus &error_code, std::string &error_msg); - - /// - /// @brief process after build - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void PostProcess(graphStatus &error_code, std::string &error_msg); - - std::string name_; - NodePtr parent_node_; - bool retval_flag_; - std::map, std::vector>> graph_inputs_; - std::vector> graph_outputs_; - std::vector graph_targets_; - - // index_of_graph_input -> in_anchor_index_of_parent_node - std::map input_mapping_; - // index_of_graph_output -> out_anchor_index_of_parent_node - std::map output_mapping_; -}; - -class PartialGraphBuilder : public ComputeGraphBuilder { - public: - PartialGraphBuilder() = default; - PartialGraphBuilder(const PartialGraphBuilder &) = delete; - PartialGraphBuilder &operator=(const PartialGraphBuilder &) = delete; - PartialGraphBuilder(const PartialGraphBuilder &&) = delete; - PartialGraphBuilder &operator=(const PartialGraphBuilder &&) = delete; - ~PartialGraphBuilder() = default; - - /// - /// @brief Add node to graph - /// @param [in] op_desc - /// @return PartialGraphBuilder - /// - PartialGraphBuilder &AddNode(const OpDescPtr &op_desc) override; - - /// - /// @brief Add data-link among nodes in graph - /// @param [in] src_name - /// @param [in] out_anchor_ind - /// @param [in] dst_name - /// @param [in] in_anchor_ind - /// @return PartialGraphBuilder - /// - PartialGraphBuilder &AddDataLink(const std::string &src_name, uint32_t out_anchor_ind, - const std::string &dst_name, uint32_t in_anchor_ind) override; - - /// - /// @brief Add ctrl-link among nodes in graph - /// @param [in] src_name - /// @param [in] dst_name - /// @return PartialGraphBuilder - /// - PartialGraphBuilder &AddControlLink(const std::string &src_name, const std::string &dst_name) override; - - /// - /// @brief Set owner graph - /// @param [in] graph - /// @return PartialGraphBuilder - /// - PartialGraphBuilder &SetOwnerGraph(const ComputeGraphPtr &graph); - - /// - /// @brief Add exist node - /// @param [in] node - /// @return PartialGraphBuilder - /// - PartialGraphBuilder &AddExistNode(const NodePtr &node); - - /// - /// @brief Build multi nodes with links - /// @param [out] error_code - /// @param [out] error_msg - /// @return ComputeGraphPtr - /// - ComputeGraphPtr Build(graphStatus &error_code, std::string &error_msg) override; - - private: - /// - /// @brief Build exist nodes - /// @param [out] error_code - /// @param [out] error_msg - /// @return void - /// - void BuildExistNodes(graphStatus &error_code, std::string &error_msg); - - std::vector exist_nodes_; -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_GRAPH_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/graph_utils_ex.h b/inc/metadef/inc/graph/utils/graph_utils_ex.h deleted file mode 100644 index a367f734e..000000000 --- a/inc/metadef/inc/graph/utils/graph_utils_ex.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __INC_METADEF_GRAPH_UTILS_EX_H -#define __INC_METADEF_GRAPH_UTILS_EX_H - -#include "graph/node.h" -#include "graph/compute_graph.h" -#include "external/graph/graph.h" - -namespace ge { -class GraphUtilsEx { - public: - // Detach from ComputeGraph - static graphStatus Verify(const ComputeGraphPtr &graph); - static graphStatus InferOriginFormat(const ComputeGraphPtr &graph); - static graphStatus InferShapeInNeed(const ComputeGraphPtr &graph); - - // Detach from GraphUtils - static ComputeGraphPtr GetComputeGraph(const Graph &graph); - static ComputeGraphPtr CreateGraphFromOperator(const std::string &name, const std::vector &inputs); - static Graph CreateGraphFromComputeGraph(const ComputeGraphPtr compute_graph); - static GraphPtr CreateGraphPtrFromComputeGraph(const ComputeGraphPtr compute_graph); - static void BreakConnect(const std::map &all_nodes_infos); - static graphStatus RecoverGraphOperators(const Graph &graph); - static graphStatus CopyGraph(const Graph &src_graph, Graph &dst_graph); - - private: - static graphStatus CopyGraphImpl(const Graph &src_graph, Graph &dst_graph, - const std::map &node_old_2_new, - const std::map &op_desc_old_2_new); -}; -} // namespace ge -#endif // __INC_METADEF_GRAPH_UTILS_EX_H diff --git a/inc/metadef/inc/graph/utils/node_adapter.h b/inc/metadef/inc/graph/utils/node_adapter.h deleted file mode 100644 index 19d765438..000000000 --- a/inc/metadef/inc/graph/utils/node_adapter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_NODE_ADAPTER_H_ -#define INC_GRAPH_UTILS_NODE_ADAPTER_H_ - -#include "graph/gnode.h" -#include "graph/node.h" - -namespace ge { -using NodePtr = std::shared_ptr; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY NodeAdapter { - public: - static GNode Node2GNode(const NodePtr &node); - static NodePtr GNode2Node(const GNode &node); - static GNodePtr Node2GNodePtr(const NodePtr &node); -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_NODE_ADAPTER_H_ diff --git a/inc/metadef/inc/graph/utils/node_utils.h b/inc/metadef/inc/graph/utils/node_utils.h deleted file mode 100644 index 8f1dfd6b4..000000000 --- a/inc/metadef/inc/graph/utils/node_utils.h +++ /dev/null @@ -1,222 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_NODE_UTILS_H_ -#define INC_GRAPH_UTILS_NODE_UTILS_H_ - -#include -#include -#include -#include "external/graph/operator.h" -#include "graph/node.h" - -namespace ge { -// Op types of Const like Opps. -extern const std::set kConstOpTypes; - -// Op types of Enter like Opps. -extern const std::set kEnterOpTypes; -// Op types of Merge like Opps. -extern const std::set kMergeOpTypes; -// Op types of Switch like Opps. -extern const std::set kSwitchOpTypes; -// Op types of NextIteration like Opps. -extern const std::set kNextIterationOpTypes; -// Op types of Exit like Opps. -extern const std::set kExitOpTypes; - -// Op types of If like Opps. -extern const std::set kIfOpTypes; -// Op types of While like Opps. -extern const std::set kWhileOpTypes; -// Op types of Case like Opps. -extern const std::set kCaseOpTypes; -// Op types of For like Opps. -extern const std::set kForOpTypes; - -class NodeUtils { - public: - static graphStatus AddSendEventId(const NodePtr &node, const uint32_t &event_id); - static graphStatus AddRecvEventId(const NodePtr &node, const uint32_t &event_id); - static graphStatus GetSendEventIdList(const NodePtr &node, std::vector &vec_send); - static graphStatus GetRecvEventIdList(const NodePtr &node, std::vector &vec_recv); - - static graphStatus ClearSendInfo(); - static graphStatus ClearRecvInfo(); - - static graphStatus GetSingleOutputNodeOfNthLayer(const NodePtr &src, int depth, NodePtr &dst); - - static graphStatus GetDataOutAnchorAndControlInAnchor(const NodePtr &node_ptr, OutDataAnchorPtr &out_data, - InControlAnchorPtr &in_control); - - static graphStatus ClearInDataAnchor(const NodePtr &node_ptr, const InDataAnchorPtr &in_data_anchor); - static graphStatus SetAllAnchorStatus(const NodePtr &nodePtr); - static graphStatus SetAllAnchorStatus(Node &node); - static bool IsAnchorStatusSet(const NodePtr &nodePtr); - static bool IsAnchorStatusSet(const Node &node); - - static graphStatus MoveOutputEdges(const NodePtr &origin_node, const NodePtr &new_node); - - static void UpdateIsInputConst(const NodePtr &nodePtr); - static void UpdateIsInputConst(Node &node); - static bool IsConst(const Node &node); - static void UnlinkAll(const Node &node); - static graphStatus UpdatePeerNodeInputDesc(const NodePtr &node_ptr); - - static graphStatus AppendInputAnchor(const NodePtr &node, uint32_t num); - static graphStatus RemoveInputAnchor(const NodePtr &node, uint32_t num); - - static graphStatus AppendOutputAnchor(const NodePtr &node, uint32_t num); - static graphStatus RemoveOutputAnchor(const NodePtr &node, uint32_t num); - - static bool IsInNodesEmpty(const Node &node); - static GeTensorDesc GetOutputDesc(const Node &node, uint32_t index); - static GeTensorDesc GetInputDesc(const Node &node, uint32_t index); - static graphStatus UpdateOutputShape(const Node &node, uint32_t index, const GeShape &shape); - static graphStatus UpdateInputShape(const Node &node, uint32_t index, const GeShape &shape); - // check node whether unknown shape.If node shape contain -1 or -2,out param "is_unknow" will be true; - // for func op, it will check subgraph yet, if some node shape of subgraph contain -1 or -2, - // the out param "is_unknow" will be true too - static graphStatus GetNodeUnknownShapeStatus(const Node &node, bool &is_unknow); - - static std::string GetNodeType(const Node &node); - static std::string GetNodeType(const NodePtr &node); - - static std::vector GetAllSubgraphs(const Node &node); - static graphStatus GetDirectSubgraphs(const NodePtr &node, std::vector &subgraphs); - static ComputeGraphPtr GetSubgraph(const Node &node, uint32_t index); - static graphStatus SetSubgraph(Node &node, uint32_t index, const ComputeGraphPtr &subgraph); - static NodePtr CreatNodeWithoutGraph(const OpDescPtr op_desc); - /// - /// Check if node is input of subgraph - /// @param [in] node - /// @return bool - /// - static bool IsSubgraphInput(const NodePtr &node); - - /// - /// Check if node is output of subgraph - /// @param [in] node - /// @return bool - /// - static bool IsSubgraphOutput(const NodePtr &node); - - /// - /// @brief Get subgraph original input node. - /// @param [in] node - /// @return Node - /// - static NodePtr GetParentInput(const Node &node); - static NodePtr GetParentInput(const NodePtr &node); - /// - /// @brief Get subgraph original input node and corresponding out_anchor. - /// @param [in] node - /// @return NodeToOutAnchor node and out_anchor which linked to in_param node - /// - static NodeToOutAnchor GetParentInputAndAnchor(const NodePtr &node); - - /// - /// @brief Get is dynamic shape graph from node. - /// @param [in] node - /// @return bool - /// - static bool IsDynamicShape(const Node &node); - static bool IsDynamicShape(const NodePtr &node); - - /// - /// @brief Check is varying_input for while node - /// @param [in] node: Data node for subgraph - /// @return bool - /// - static bool IsWhileVaryingInput(const ge::NodePtr &node); - - /// - /// @brief Get subgraph input is constant. - /// @param [in] node - /// @param [out] string - /// @return bool - /// - static bool GetConstOpType(const NodePtr &node, std::string &type); - - /// - /// @brief Remove node-related subgraphs, including subgraphs of nodes in the subgraph. - /// @param [in] node - /// @return return GRAPH_SUCCESS if remove successfully, other for failed. - /// - static graphStatus RemoveSubgraphsOnNode(const NodePtr &node); - - /// - /// @brief Get subgraph input data node by index. - /// @param [in] node - /// @return Node - /// - static std::vector GetSubgraphDataNodesByIndex(const Node &node, int index); - - /// - /// @brief Get subgraph input data node by index. - /// @param [in] node - /// @return Node - /// - static std::vector GetSubgraphOutputNodes(const Node &node); - - static NodePtr GetInDataNodeByIndex(const Node &node, const int index); - - static std::vector> GetOutDataNodesWithAnchorByIndex(const Node &node, const int index); - - static ge::ConstNodePtr GetNodeFromOperator(const Operator &oprt); - - static graphStatus GetInputConstData(const ConstNodePtr& node_ptr, const std::string &dst_name, GeTensorPtr &ge_tensor); - - static graphStatus GetInputConstData(const Node &node, const std::string &dst_name, GeTensorPtr &ge_tensor); - - /// - /// @brief Get node type in cross subgragh. - /// @param [in] node - /// @return type - /// - static std::string GetInConstNodeTypeCrossSubgraph(const ge::NodePtr &node); - - /// - /// @brief Get node in cross subgragh. - /// @param [in] node - /// @return Node - /// - static NodePtr GetInNodeCrossSubgraph(const ge::NodePtr &node); - - /// - /// @brief Get peer input node, supported get cross PartitionedCall . - /// @param [in] node, current node - /// @param [in] index, current node the index'th input, if it is PartionedCall's subgraph Data, please assign 0 - /// @param [out] peer_node, - /// A(PartionedCall_0)->B(PartionedCall_1) - /// PartionedCall_0's subgraph: Data->A->Netoutput - /// PartionedCall_1's subgraph: Data1->B->Netoutput - /// If it is called like GetInNodeCrossPartionCallNode(B,0,peer_node)or(Data1,0,peer_node), peer_node is A - /// @return [graphStatus] running result of this function - /// - static graphStatus GetInNodeCrossPartionedCallNode(const NodePtr &node, uint32_t index, NodePtr &peer_node); - - static graphStatus SetNodeParallelGroup(Node &node, const char *group_name); - - static graphStatus UpdateInputOriginalShapeAndShape(const Node &node, uint32_t index, const GeShape &shape); - static graphStatus UpdateOutputOriginalShapeAndShape(const Node &node, uint32_t index, const GeShape &shape); - -private: - static std::map> map_send_info_; - static std::map> map_recv_info_; -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_NODE_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/op_desc_utils.h b/inc/metadef/inc/graph/utils/op_desc_utils.h deleted file mode 100644 index 44a5f2a45..000000000 --- a/inc/metadef/inc/graph/utils/op_desc_utils.h +++ /dev/null @@ -1,186 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_OP_DESC_UTILS_H_ -#define INC_GRAPH_UTILS_OP_DESC_UTILS_H_ - -#include -#include -#include -#include "graph/def_types.h" -#include "graph/node.h" -#include "graph/op_desc.h" -#include "graph/operator.h" -#include "graph/range_vistor.h" - -namespace ge { -class OpDesc; -using OpDescPtr = std::shared_ptr; -using ConstGeTensorBarePtr = const GeTensor*; - -class OpDescUtils { - public: - template - using Vistor = RangeVistor>; - - OpDescUtils() = default; - ~OpDescUtils() = default; - static bool HasQuantizeFactorParams(const OpDescPtr& op_desc); - static bool HasQuantizeFactorParams(const OpDesc& op_desc); - static std::vector GetConstInputNode(const ge::Node& node); - static std::vector GetConstInputNodeAndAnchor(const ge::Node &node); - static std::vector GetInputData(const std::vector& input_nodes); - static std::vector GetWeightsFromNodes(const std::vector& input_nodes_2_out_anchors); - - static std::vector GetWeights(const ge::Node& node); - static std::vector GetWeights(const ge::ConstNodePtr& node); - static std::vector MutableWeights(const ge::Node& node); - static std::vector MutableWeights(const ge::NodePtr node); - static graphStatus SetWeights(ge::Node& node, const std::vector& weights); - static graphStatus SetWeights(ge::NodePtr node, const std::vector &weights); - static graphStatus SetWeights(ge::Node &node, const std::map &weights_map); - static graphStatus ClearWeights(ge::NodePtr node); - - static bool ClearInputDesc(ge::OpDescPtr op_desc, uint32_t index); - static bool ClearInputDesc(const ge::NodePtr& node); - static bool ClearOutputDesc(const ge::OpDescPtr& op_desc, uint32_t index); - static bool ClearOutputDesc(const ge::NodePtr& node); - static std::vector GetConstInputs(const ge::Node& node); - static std::vector GetConstInputs(const ge::ConstNodePtr& node); - static size_t GetNonConstInputsSize(const ge::Node& node); - static size_t GetNonConstInputsSize(ge::ConstNodePtr node); - // Index: Indicates the index of all non const inputs - static GeTensorDesc GetNonConstInputTensorDesc(const ge::Node& node, size_t index_non_const = 0); - static GeTensorDesc GetNonConstInputTensorDesc(const ge::ConstNodePtr& node, size_t index_non_const = 0); - static bool GetNonConstInputIndex(const ge::Node& node, size_t index_non_const, size_t& index); - static bool GetNonConstInputIndex(const ge::ConstNodePtr& node, size_t index_non_const, size_t& index); - // Index: Indicates the index of all inputs - static bool IsNonConstInput(const ge::Node& node, size_t index = 0); - static bool IsNonConstInput(const ge::ConstNodePtr& node, size_t index = 0); - - static std::vector GetNonConstTensorDesc(const ge::ConstNodePtr& node); - static graphStatus AddConstOpToAnchor(InDataAnchorPtr in_anchor, const GeTensorPtr& tensor_ptr); - - static Operator CreateOperatorFromOpDesc(OpDescPtr op_desc); - static Operator CreateOperatorFromNode(ge::ConstNodePtr node_ptr); - static OpDescPtr GetOpDescFromOperator(const Operator& oprt); - static graphStatus CopyOperatorLinks(const std::map &src_op_list, - std::map &dst_op_list); - static graphStatus CopyOperators(ComputeGraphPtr &dst_compute_graph, - const std::map &node_old_2_new, - const std::map &op_desc_old_2_new, - const std::map &src_op_list, - std::map &dst_op_list); - static OpDescPtr CreateConstOp(const GeTensorPtr& tensor_ptr); - - static graphStatus SetSubgraphInstanceName(const std::string &subgraph_name, - const std::string &subgraph_instance_name, OpDescPtr &op_desc); - static ConstGeTensorBarePtr GetInputConstData(const Operator &op, uint32_t idx); - private: - static GeTensorPtr MutableWeights(ge::OpDesc& op_desc); - static GeTensorPtr MutableWeights(ge::OpDescPtr op_desc); - static graphStatus SetWeights(ge::OpDesc& op_desc, const GeTensorPtr weight); - static graphStatus SetWeights(ge::OpDescPtr op_desc, const GeTensorPtr weight); -}; - -class OpDescBuilder { - public: - OpDescBuilder(std::string name, std::string type) : name_(std::move(name)), type_(std::move(type)) {} - OpDescBuilder(const OpDescBuilder &) = delete; - OpDescBuilder &operator=(const OpDescBuilder &) = delete; - OpDescBuilder(const OpDescBuilder &&) = delete; - OpDescBuilder &operator=(const OpDescBuilder &&) = delete; - ~OpDescBuilder() = default; - - /// - /// @brief Add input - /// @param [in] name - /// @return OpDescBuilder - /// - OpDescBuilder& AddInput(const std::string &name); - - /// - /// @brief Add input - /// @param [in] name - /// @param [in] tensor - /// @return OpDescBuilder - /// - OpDescBuilder& AddInput(const std::string &name, const GeTensorDesc &tensor); - - /// - /// @brief Add dynamic input - /// @param [in] name - /// @param [in] num - /// @return OpDescBuilder - /// - OpDescBuilder& AddDynamicInput(const std::string &name, uint32_t num); - - /// - /// @brief Add dynamic input - /// @param [in] name - /// @param [in] num - /// @param [in] tensor - /// @return OpDescBuilder - /// - OpDescBuilder& AddDynamicInput(const std::string &name, uint32_t num, const GeTensorDesc &tensor); - - /// - /// @brief Add output - /// @param [in] name - /// @return OpDescBuilder - /// - OpDescBuilder& AddOutput(const std::string &name); - - /// - /// @brief Add output - /// @param [in] name - /// @param [in] tensor - /// @return OpDescBuilder - /// - OpDescBuilder& AddOutput(const std::string &name, const GeTensorDesc &tensor); - - /// - /// @brief Add dynamic output - /// @param [in] name - /// @param [in] num - /// @return OpDescBuilder - /// - OpDescBuilder& AddDynamicOutput(const std::string &name, uint32_t num); - - /// - /// @brief Add dynamic output - /// @param [in] name - /// @param [in] num - /// @param [in] tensor - /// @return OpDescBuilder - /// - OpDescBuilder& AddDynamicOutput(const std::string &name, uint32_t num, const GeTensorDesc &tensor); - - /// - /// @brief Build op_desc - /// @return OpDescPtr - /// - OpDescPtr Build(); - - private: - std::string name_; - std::string type_; - std::vector> inputs_; - std::vector> outputs_; -}; -} // namespace ge - -#endif // INC_GRAPH_UTILS_OP_DESC_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/tensor_adapter.h b/inc/metadef/inc/graph/utils/tensor_adapter.h deleted file mode 100644 index e5f55a6c0..000000000 --- a/inc/metadef/inc/graph/utils/tensor_adapter.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_TENSOR_ADAPTER_H_ -#define INC_GRAPH_UTILS_TENSOR_ADAPTER_H_ - -#include -#include "graph/ge_tensor.h" -#include "graph/tensor.h" - -namespace ge { -using GeTensorPtr = std::shared_ptr; -using ConstGeTensorPtr = std::shared_ptr; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TensorAdapter { - public: - static GeTensorDesc TensorDesc2GeTensorDesc(const TensorDesc &tensorDesc); - static TensorDesc GeTensorDesc2TensorDesc(const GeTensorDesc &geTensorDesc); - static GeTensorPtr Tensor2GeTensor(const Tensor &tensor); - static Tensor GeTensor2Tensor(const ConstGeTensorPtr &geTensor); - - static ConstGeTensorPtr AsGeTensorPtr(const Tensor &tensor); // Share value - static GeTensorPtr AsGeTensorPtr(Tensor &tensor); // Share value - static const GeTensor AsGeTensor(const Tensor &tensor); // Share value - static GeTensor AsGeTensor(Tensor &tensor); // Share value - static const Tensor AsTensor(const GeTensor &tensor); // Share value - static Tensor AsTensor(GeTensor &tensor); // Share value - static GeTensor AsGeTensorShared(const Tensor &tensor); - static GeTensor NormalizeGeTensor(const GeTensor &tensor); -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_TENSOR_ADAPTER_H_ diff --git a/inc/metadef/inc/graph/utils/tensor_utils.h b/inc/metadef/inc/graph/utils/tensor_utils.h deleted file mode 100644 index 1bc9689d1..000000000 --- a/inc/metadef/inc/graph/utils/tensor_utils.h +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_TENSOR_UTILS_H_ -#define INC_GRAPH_UTILS_TENSOR_UTILS_H_ - -#include -#include "graph/def_types.h" -#include "graph/ge_error_codes.h" -#include "graph/ge_tensor.h" - -namespace ge { -class TensorUtils { - public: - static GeTensor CreateShareTensor(const GeTensor &other); - static GeTensor CreateShareTensor(const GeTensorDesc &tensorDesc, - std::shared_ptr aligned_ptr, - size_t size); - static void ShareTensor(const GeTensor &from, GeTensor &to); - static TensorData CreateShareTensorData(const TensorData &other); - static void ShareTensorData(const TensorData &from, TensorData &to); - static void ShareAlignedPtr(std::shared_ptr ptr, size_t size, TensorData &to); - static void ShareAlignedPtr(std::shared_ptr ptr, size_t size, GeTensor &to); - static void CopyTensor(const GeTensor &from, GeTensor &to); - static ge::graphStatus GetSize(const GeTensorDesc &tensorDesc, int64_t &size); - static void SetSize(GeTensorDesc &tensorDesc, int64_t size); - static uint32_t GetWeightSize(const ConstGeTensorPtr &tensorPtr); - static uint32_t GetWeightSize(const GeTensor &tensor); - static uint32_t GetWeightSize(const GeTensorDesc &tensorDesc); - static uint8_t *GetWeightAddr(const ConstGeTensorPtr &tensorPtr, uint8_t *base); - static uint8_t *GetWeightAddr(const GeTensor &tensor, uint8_t *base); - static void SetWeightSize(GeTensorDesc &tensorDesc, uint32_t size); - static ge::graphStatus GetReuseInput(const GeTensorDesc &tensorDesc, bool &flag); - static void SetReuseInput(GeTensorDesc &tensorDesc, bool flag); - static ge::graphStatus GetOutputTensor(const GeTensorDesc &tensorDesc, bool &flag); - static void SetOutputTensor(GeTensorDesc &tensorDesc, bool flag); - static graphStatus GetDeviceType(const GeTensorDesc &tensorDesc, DeviceType &type); - static void SetDeviceType(GeTensorDesc &tensorDesc, DeviceType type); - static ge::graphStatus GetInputTensor(const GeTensorDesc &tensorDesc, bool &flag); - static void SetInputTensor(GeTensorDesc &tensorDesc, bool flag); - static ge::graphStatus GetRealDimCnt(const GeTensorDesc &tensorDesc, uint32_t &cnt); - static void SetRealDimCnt(GeTensorDesc &tensorDesc, uint32_t cnt); - static ge::graphStatus GetReuseInputIndex(const GeTensorDesc &tensorDesc, uint32_t &idx); - static void SetReuseInputIndex(GeTensorDesc &tensorDesc, uint32_t idx); - static ge::graphStatus GetDataOffset(const GeTensorDesc &tensorDesc, int64_t &offset); - static void SetDataOffset(GeTensorDesc &tensorDesc, int64_t offset); - static ge::graphStatus GetRC(const GeTensorDesc &tensorDesc, uint32_t &rc); - static void SetRC(GeTensorDesc &tensorDesc, uint32_t rc); - - /// - /// calculate tensor mem size. - /// @param shape tensor shape - /// @param format tensor format - /// @param data_type tensor data type - /// @param mem_size -1 means unknown shape,other means mem size - /// @return GRAPH_SUCCESS:success, other:failed - /// - static ge::graphStatus CalcTensorMemSize(const GeShape &shape, Format format, DataType data_type, int64_t &mem_size); - static ge::graphStatus CalcTensorMemSizeForNoTiling(const GeTensorDesc &tensor, Format format, DataType data_type, - int64_t &mem_size); - static ge::graphStatus GetTensorMemorySizeInBytes(const GeTensorDesc &desc_temp, int64_t &size_temp); - static ge::graphStatus GetTensorSizeInBytes(const GeTensorDesc &desc_temp, int64_t &size_temp); - static ge::graphStatus CheckShapeByShapeRange(const GeShape &shape, - const std::vector> &shape_range); -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_TENSOR_UTILS_H_ diff --git a/inc/metadef/inc/graph/utils/type_utils.h b/inc/metadef/inc/graph/utils/type_utils.h deleted file mode 100644 index fecba6b9a..000000000 --- a/inc/metadef/inc/graph/utils/type_utils.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_GRAPH_UTILS_TYPE_UTILS_H_ -#define INC_GRAPH_UTILS_TYPE_UTILS_H_ - -#include -#include -#include -#include -#include "graph/def_types.h" -#include "graph/ge_error_codes.h" -#include "graph/types.h" -#include "register/register_types.h" -#include "external/register/register_fmk_types.h" - -namespace ge { -class TypeUtils { - public: - static bool IsDataTypeValid(DataType dt); - static bool IsFormatValid(Format format); - static bool IsDataTypeValid(std::string dt); // for user json input - static bool IsFormatValid(std::string format); // for user json input - static bool IsInternalFormat(Format format); - - static std::string ImplyTypeToSerialString(domi::ImplyType imply_type); - static std::string DataTypeToSerialString(DataType data_type); - static DataType SerialStringToDataType(const std::string &str); - static std::string FormatToSerialString(Format format); - static Format SerialStringToFormat(const std::string &str); - static Format DataFormatToFormat(const std::string &str); - static graphStatus SplitFormatFromStr(const std::string &str, std::string &primary_format_str, int32_t &sub_format); - static Format DomiFormatToFormat(domi::domiTensorFormat_t domi_format); - static std::string FmkTypeToSerialString(domi::FrameworkType fmk_type); - - static bool GetDataTypeLength(ge::DataType data_type, uint32_t &length); - static bool CheckUint64MulOverflow(uint64_t a, uint32_t b); -}; -} // namespace ge -#endif // INC_GRAPH_UTILS_TYPE_UTILS_H_ diff --git a/inc/metadef/inc/register/custom_pass_helper.h b/inc/metadef/inc/register/custom_pass_helper.h deleted file mode 100644 index f2ac7a751..000000000 --- a/inc/metadef/inc/register/custom_pass_helper.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_CUSTOM_PASS_HELPER_H_ -#define INC_REGISTER_CUSTOM_PASS_HELPER_H_ - -#include -#include "external/ge/ge_api_error_codes.h" -#include "external/register/register_pass.h" -#include "external/register/register_types.h" - -namespace ge { -class CustomPassGreater : std::greater { - public: - bool operator()(const PassRegistrationData &a, const PassRegistrationData &b) const { - return a.GetPriority() < b.GetPriority(); - } -}; - -class CustomPassHelper { - public: - static CustomPassHelper &Instance(); - - void Insert(const PassRegistrationData &); - - Status Run(ge::GraphPtr &); - - ~CustomPassHelper() = default; - - private: - CustomPassHelper() = default; - std::multiset registration_datas_; -}; -} // namespace ge - -#endif // INC_REGISTER_CUSTOM_PASS_HELPER_H_ diff --git a/inc/metadef/inc/register/ffts_plus_task_update.h b/inc/metadef/inc/register/ffts_plus_task_update.h deleted file mode 100644 index fe236e122..000000000 --- a/inc/metadef/inc/register/ffts_plus_task_update.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_FFTS_PLUS_TASK_UPDATE_H_ -#define INC_REGISTER_FFTS_PLUS_TASK_UPDATE_H_ - -#include - -#include "graph/node.h" -#include "register/op_tiling_registry.h" -#include "runtime/rt_ffts_plus.h" -#include "external/ge/ge_api_error_codes.h" - -namespace ge { -struct AutoThreadSubTaskFlush { - int32_t device_id{0}; - void *args_base{nullptr}; - std::vector op_run_info; - - uintptr_t aic_non_tail_task_start_pc{0U}; - uintptr_t aic_tail_task_start_pc{0U}; - uint32_t aic_icache_prefetch_cnt{0U}; - - uintptr_t aiv_non_tail_task_start_pc{0U}; - uintptr_t aiv_tail_task_start_pc{0U}; - uint32_t aiv_icache_prefetch_cnt{0U}; -}; - -struct AutoThreadParam { - uint16_t thread_dim; // thread dim after Pre-Thread - uint32_t input_output_num; // input + output - std::vector task_addr_offset; // input + output + workspace -}; - -class FFTSPlusTaskUpdate { -public: - FFTSPlusTaskUpdate() = default; - virtual ~FFTSPlusTaskUpdate() = default; - - virtual Status GetAutoThreadParam(const NodePtr &node, const std::vector &op_run_info, - AutoThreadParam &auto_thread_param) { - return SUCCESS; - } - - virtual Status UpdateSubTaskAndCache(const NodePtr &node, const AutoThreadSubTaskFlush &sub_task_flush, - rtFftsPlusTaskInfo_t &ffts_plus_task_info) { - return SUCCESS; - } -}; -} // namespace ge -#endif // INC_REGISTER_FFTS_PLUS_TASK_UPDATE_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h b/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h deleted file mode 100644 index 809708fa3..000000000 --- a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_CONSTANT_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_CONSTANT_H_ -#include -#include - -namespace fe { -const std::string UB_FUSION_OP_TYPE = "_ub_fusion_op_type"; -// add the op pattern -const std::string TBE_PATTERN_INPUT_NODE = "InputData"; -const std::string TBE_PATTERN_OP_TYPE_ANY = "OpTypeAny"; -const std::string TBE_PATTERN_OUTPUT_NODE = "OutputData"; -const std::string OP_PATTERN_ELEMWISE = "ElemWise"; -const std::string OP_PATTERN_COMMONREDUCE = "CommReduce"; -const std::string OP_PATTERN_BROAD_CAST = "Broadcast"; -const std::string OP_PATTERN_SEGMENT = "Segment"; -const std::string OP_PATTERN_MAXPOOL = "MaxPool"; -const std::string OP_PATTERN_CONV = "Convolution"; -const std::string OP_PATTERN_MATMUL = "Matmul"; -const std::string OP_PATTERN_BNUPDATE = "bn_update"; -const std::string OP_PATTERN_BNREDUCE = "bn_reduce"; -const std::string OP_PATTERN_CONV_BACKPROP_INPUT = "Conv2d_backprop_input"; -const std::string OP_PATTERN_DEPTHWISE_CONV = "DepthwiseConvolution"; -const std::string OP_PATTERN_QUANT = "quant"; -const std::string OP_PATTERN_DEQUANT = "dequant"; -const std::string OP_PATTERN_REQUANT = "requant"; -const std::string OP_PATTERN_POOL2D = "Pool2d"; -const std::string OP_PATTERN_ANTIQUANT = "anti_quant"; -const std::string OP_PATTERN_STRIDED_WRITE = "strided_write"; -const std::string OP_PATTERN_STRIDED_READ = "strided_read"; -const std::string OP_PATTERN_AIPP = "aipp"; -const std::string OP_PATTERN_CONFUSION_TRANSPOSE = "confusiontranspose"; -const std::string OP_PATTERN_DEQUANTS16 = "dequant_s16"; -const std::string OP_PATTERN_REQUANTS16 = "requant_s16"; -const std::string OP_PATTERN_READ_SELECT = "read_select"; -const std::string OP_PATTERN_WRITE_SELECT = "write_select"; -const std::string OP_PATTERN_BATCH_MATMUL = "BatchMatmul"; -const std::string OP_PATTERN_CONV3D = "Conv3d"; -const std::string OP_PATTERN_DROPOUTDOMASKV3D = "DropOutDoMaskV3D"; -const std::string OP_PATTERN_CONV3D_BACKPROP_INPUT = "Conv3d_backprop_input"; -const std::string OP_PATTERN_CONV_BACKPROP_FILTER = "Conv2d_backprop_filter"; -const std::string OP_PATTERN_GEMM = "GEMM"; - -const std::vector OP_PATTERN_VEC{OP_PATTERN_ELEMWISE, - OP_PATTERN_COMMONREDUCE, - OP_PATTERN_BROAD_CAST, - OP_PATTERN_SEGMENT, - OP_PATTERN_MAXPOOL, - OP_PATTERN_CONV, - OP_PATTERN_MATMUL, - OP_PATTERN_BNUPDATE, - OP_PATTERN_BNREDUCE, - OP_PATTERN_CONV_BACKPROP_INPUT, - OP_PATTERN_DEPTHWISE_CONV, - OP_PATTERN_QUANT, - OP_PATTERN_DEQUANT, - OP_PATTERN_REQUANT, - OP_PATTERN_POOL2D, - OP_PATTERN_ANTIQUANT, - OP_PATTERN_STRIDED_WRITE, - OP_PATTERN_STRIDED_READ, - OP_PATTERN_AIPP, - OP_PATTERN_CONFUSION_TRANSPOSE, - OP_PATTERN_DEQUANTS16, - OP_PATTERN_REQUANTS16, - OP_PATTERN_READ_SELECT, - OP_PATTERN_WRITE_SELECT, - OP_PATTERN_BATCH_MATMUL, - OP_PATTERN_CONV3D, - OP_PATTERN_DROPOUTDOMASKV3D, - OP_PATTERN_CONV3D_BACKPROP_INPUT, - OP_PATTERN_CONV_BACKPROP_FILTER, - OP_PATTERN_GEMM -}; -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_CONSTANT_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h b/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h deleted file mode 100644 index c62f64564..000000000 --- a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_BASE_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_BASE_H_ - -#include -#include -#include -#include "register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h" -#include "register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" -#include "register/graph_optimizer/fusion_common/op_slice_info.h" - -namespace fe { -enum BufferFusionPassType { - BUILT_IN_AI_CORE_BUFFER_FUSION_PASS, - BUILT_IN_VECTOR_CORE_BUFFER_FUSION_PASS, - CUSTOM_AI_CORE_BUFFER_FUSION_PASS, - CUSTOM_VECTOR_CORE_BUFFER_FUSION_PASS, - BUFFER_FUSION_PASS_TYPE_RESERVED -}; - -class BufferFusionPassBase { - public: - explicit BufferFusionPassBase(); - virtual ~BufferFusionPassBase(); - virtual std::vector DefinePatterns() = 0; - virtual Status GetFusionNodes(const BufferFusionMapping &mapping, std::vector &fusion_nodes); - virtual Status CalcFusionOpSliceInfo(std::vector &fusion_nodes, OpCalcInfo &op_slice_info); -#ifdef ONLY_COMPILE_OPEN_SRC - std::vector GetMatchedNodes(const BufferFusionMapping &mapping); -#else - static std::vector GetMatchedNodes(const BufferFusionMapping &mapping); -#endif -#ifdef ONLY_COMPILE_OPEN_SRC - std::vector GetMatchedNodesByDescName(const std::string &desc_name, - const BufferFusionMapping &mapping); -#else - static std::vector GetMatchedNodesByDescName(const std::string &desc_name, - const BufferFusionMapping &mapping); -#endif -#ifdef ONLY_COMPILE_OPEN_SRC - ge::NodePtr GetMatchedHeadNode(const std::vector &matched_nodes); -#else - static ge::NodePtr GetMatchedHeadNode(const std::vector &matched_nodes); -#endif - - void SetName(const std::string &name) { name_ = name; } - - std::string GetName() { return name_; } - - private: - std::string name_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_BASE_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h b/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h deleted file mode 100644 index c80483871..000000000 --- a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_REGISTRY_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_REGISTRY_H_ -#include -#include -#include -#include -#include "register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h" - -namespace fe { -class BufferFusionPassRegistry { - public: - using CreateFn = BufferFusionPassBase *(*)(); - ~BufferFusionPassRegistry(); - - static BufferFusionPassRegistry &GetInstance(); - - void RegisterPass(const BufferFusionPassType &pass_type, const std::string &pass_name, CreateFn create_fun); - - std::map GetCreateFnByType(const BufferFusionPassType &pass_type); - - private: - BufferFusionPassRegistry(); - class BufferFusionPassRegistryImpl; - std::unique_ptr impl_; -}; - -class BufferFusionPassRegistrar { - public: - BufferFusionPassRegistrar(const BufferFusionPassType &pass_type, const std::string &pass_name, - BufferFusionPassBase *(*create_fun)()); - ~BufferFusionPassRegistrar() {} -}; - -#define REGISTER_BUFFER_FUSION_PASS(pass_name, pass_type, pass_class) \ - REGISTER_BUFFER_FUSION_PASS_UNIQ_HELPER(__COUNTER__, pass_name, pass_type, pass_class) - -#define REGISTER_BUFFER_FUSION_PASS_UNIQ_HELPER(ctr, pass_name, pass_type, pass_class) \ - REGISTER_BUFFER_FUSION_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) - -#define REGISTER_BUFFER_FUSION_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) \ - static ::fe::BufferFusionPassRegistrar register_buffer_fusion_pass##ctr __attribute__((unused)) = \ - ::fe::BufferFusionPassRegistrar( \ - (pass_type), (pass_name), []()->::fe::BufferFusionPassBase * { return new (std::nothrow) pass_class();}) - -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_REGISTRY_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h b/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h deleted file mode 100644 index 5846315b2..000000000 --- a/inc/metadef/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PATTERN_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PATTERN_H_ -#include -#include -#include -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/graph_utils.h" -#include "graph/utils/graph_utils_ex.h" - -namespace fe { -static const int TBE_FUSION_OP_NUM_MAX = 5; -static const int TBE_PATTERN_NUM_MAX = 5; -static const int TBE_PATTERN_NUM_NONE = 0; -static const int TBE_PATTERN_NUM_DEFAULT = 1; -static const int TBE_OUTPUT_BRANCH_DEFAULT = 0; -static const int TBE_OUTPUT_BRANCH_SINGLE = 1; -static const int TBE_OUTPUT_BRANCH_MULTI = 2; -static const int TBE_PATTERN_GROUPID_INVALID = -1; - -enum SkipStatus { DISABLED = 0, AVAILABLE = 1, SKIPPED = 2 }; - -enum ShapeTypeRule { IGNORE_SHAPE_TYPE = 0, ONLY_SUPPORT_STATIC, ONLY_SUPPORT_DYNAMIC }; - -struct BufferFusionOpDesc { - std::string desc_name; // description name - std::vector types; // description type - std::vector inputs; // all input op - std::vector outputs; // all output op - int64_t out_branch_type; // out desc type, 1:single, 2: multi - int64_t repeate_min; // opdesc min repeat num - int64_t repeate_max; // opdesc max repeat num - int64_t repeate_curr; // opdesc current repeat num - bool match_status; - bool not_pattern; - int64_t group_id; // record desc groupid, need one desc matched at least in - // the same group - ShapeTypeRule shape_type_rule; - bool ignore_input_num; - bool ignore_output_num; - // used for two connected op, first opdesc has optional multiple nodes and - // ignore_output_num is true, second opdesc is same pattern type and - // out_branch_type is TBE_OUTPUT_BRANCH_MULTI - std::map multi_output_skip_status; -}; -using BufferFusionMapping = std::map>; -using BufferFusionMappings = std::vector; - -class BufferFusionPattern { - public: - explicit BufferFusionPattern(std::string name = "", int64_t op_max_count = TBE_FUSION_OP_NUM_MAX); - - virtual ~BufferFusionPattern(); - - BufferFusionPattern &AddOpDesc(const std::string &desc_name, const std::vector &patterns, - int64_t repeat_min = TBE_PATTERN_NUM_DEFAULT, - int64_t repeat_max = TBE_PATTERN_NUM_DEFAULT, - int64_t group_id = TBE_PATTERN_GROUPID_INVALID, - ShapeTypeRule shape_type_rule = ONLY_SUPPORT_STATIC, - bool not_pattern = false); - - BufferFusionPattern &SetOutputs(const std::string &desc_name, const std::vector &patterns, - int64_t relation = TBE_OUTPUT_BRANCH_SINGLE, bool ignore_input_num = false, - bool ignore_output_num = false); - - BufferFusionPattern &SetHead(const std::vector &op_patterns); - - std::string GetName(); - int64_t GetOpMaxCount(); - std::vector GetOpDescs(); - bool GetOutputs(BufferFusionOpDesc *op_desc, std::vector &outputs, bool ignore_repeat = false); - std::vector GetHead(); - int64_t GetErrorCnt(); - void InitRepeatCurr(const BufferFusionPattern &pattern); - - private: - BufferFusionOpDesc *GetOpDesc(const std::string &desc_name); - void UpdateSkipStatus(BufferFusionOpDesc *op_desc); - std::string name_; - int64_t op_max_count_; - std::vector ops_; - std::map op_map_; - std::vector head_; - int64_t error_count_; -}; -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PATTERN_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/fusion_common/aicore_util_types.h b/inc/metadef/inc/register/graph_optimizer/fusion_common/aicore_util_types.h deleted file mode 100644 index 1a32fc7e1..000000000 --- a/inc/metadef/inc/register/graph_optimizer/fusion_common/aicore_util_types.h +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FUSION_ENGINE_INC_COMMON_AICORE_UTIL_TYPES_H_ -#define FUSION_ENGINE_INC_COMMON_AICORE_UTIL_TYPES_H_ - -#include -#include -#include -#include "graph/anchor.h" -#include "graph/types.h" -#include "runtime/kernel.h" - -namespace fe { -const uint32_t L2_MAXDATANUM = 8; -struct FusionOpSrc { - uint32_t src_op_id; - ge::AnchorPtr src_anchor; - int32_t fusion_src_index; - int32_t fusion_dst_index; -}; - -struct FusionOpDst { - uint32_t dst_op_id; - ge::AnchorPtr dst_anchor; -}; - -struct FusionDataFlow { - std::pair edge; - std::pair node_dataindex_pair; -}; - -typedef struct tag_l2_fusion_data { - uint32_t l2Index; - uint64_t l2Addr; - uint64_t l2PageNum; -} L2FusionData_t; -typedef std::map L2FusionDataMap_t; - -typedef struct tag_fe_sm_desc { - rtL2Ctrl_t l2ctrl; - std::string node_name[L2_MAXDATANUM]; - uint8_t output_index[L2_MAXDATANUM]; -} fe_sm_desc_t; - -typedef struct TagTaskL2FusionInfo { - std::string node_name; - fe_sm_desc_t l2_info; - L2FusionDataMap_t input; - L2FusionDataMap_t output; - uint32_t is_used; -} TaskL2FusionInfo_t; - -using L2FusionInfoPtr = std::shared_ptr; - -typedef struct ToOpStruct { - int64_t op_l1_space = 0; - std::vector op_l1_fusion_type; - int64_t op_l1_workspace_flag = 0; // for workspace flag - int64_t op_l1_workspace_size = 0; - std::vector> slice_input_shape; - std::vector> slice_output_shape; - std::vector> - slice_input_offset; // conv & pooling & ReadSelect - std::vector> slice_output_offset; // WriteSelect - std::vector total_shape; - uint32_t split_index = 0; - ToOpStruct() { - // set invalid value for essential variable - op_l1_space = -1; - op_l1_workspace_size = -1; - } -} ToOpStruct_t; - -enum SlicePattern { - ELEMENT_WISE = 0, - ELEMENT_WISE_BROADCAST, - BROADCAST, - SLIDING_WINDOW, - SLIDING_WINDOW_DECONV, - CUBE_MATMUL, - SLICE_PATTERN_REDUCE, - SLICE_PATTERN_RESIZE, - SLICE_PATTERN_SCATTER, - SLICE_PATTERN_SEGMENT, - PATTERN_RESERVED -}; - -enum OpImplType { - EN_IMPL_CUSTOM_CONSTANT_CCE = 0, // custom constant op - EN_IMPL_CUSTOM_TIK, // custom tik op - EN_IMPL_CUSTOM_TBE, // custom tbe op - EN_IMPL_HW_CONSTANT_CCE, // Huawei built-in constant op - EN_IMPL_HW_GENERAL_CCE, // Huawei built-in cce op - EN_IMPL_HW_TIK, // Huawei built-in tik op - EN_IMPL_HW_TBE, // Huawei built-in tbe op - EN_IMPL_RL, // RL op - EN_IMPL_PLUGIN_TBE, // Huawei built-in tbe plugin op - EN_IMPL_VECTOR_CORE_HW_TBE, // Huawei built-in tbe op - EN_IMPL_VECTOR_CORE_CUSTOM_TBE, // custom tbe op - EN_IMPL_NON_PERSISTENT_CUSTOM_TBE, // custom tbe op - EN_RESERVED // reserved value -}; - -enum AOEOption { - AOE_OPT_USE_KB = 0, - AOE_OPT_NOT_USE_KB, - AOE_OPT_RESERVED -}; - -struct FEOpsStoreInfo { - int32_t priority; - std::string fe_ops_store_name; - OpImplType op_impl_type; - std::string cfg_file_path; - std::string op_impl_file_path; - bool need_pre_compile; - bool need_compile; - FEOpsStoreInfo() : priority(0), fe_ops_store_name(), op_impl_type(EN_RESERVED), cfg_file_path(), op_impl_file_path(), - need_pre_compile(false), need_compile(false) {} - FEOpsStoreInfo(int32_t priority_value, std::string ops_store_name_value, OpImplType op_impl_type_value, - std::string cfg_file_path_value, std::string op_impl_file_path_value, - bool need_pre_compile_value, bool need_compile_value) - : priority(priority_value), fe_ops_store_name(ops_store_name_value), op_impl_type(op_impl_type_value), - cfg_file_path(cfg_file_path_value), op_impl_file_path(op_impl_file_path_value), - need_pre_compile(need_pre_compile_value), need_compile(need_compile_value) {} - FEOpsStoreInfo(int32_t priority_value, std::string ops_store_name_value, OpImplType op_impl_type_value, - std::string cfg_file_path_value, std::string op_impl_file_path_value) - : priority(priority_value), fe_ops_store_name(ops_store_name_value), op_impl_type(op_impl_type_value), - cfg_file_path(cfg_file_path_value), op_impl_file_path(op_impl_file_path_value), - need_pre_compile(false), need_compile(false) {} -}; - -enum ISAArchVersion { EN_ISA_ARCH_V100 = 0, EN_ISA_ARCH_V200, EN_ISA_ARCH_V210 }; - -// Don't change the order, only add new mode in the end. -enum AppendArgsMode { NO_ARGS = 0, L2_BUFFER_ARGS = 1, L2_CACHE_ARGS = 999}; - -enum BufferFusionMode { EN_OPTIMIZE_DISABLE = 0, EN_L2_BUFFER, EN_L2_FUSION }; - -enum BufferOptimize { EN_UNKNOWN_OPTIMIZE = 0, EN_OFF_OPTIMIZE, EN_L1_OPTIMIZE, EN_L2_OPTIMIZE }; - -enum AutoTuneMode { TUNE_MODE_NO_TUNE = 0, TUNE_MODE_AUTO_TUNE, TUNE_MODE_RL_TUNE, TUNE_MODE_AUTO_AND_RL_TUNE }; - -enum PrecisionPolicy { WHITE = 0, BLACK = 1, GRAY = 2 }; - -enum OpPattern { - OP_PATTERN_OP_KERNEL = 0, - OP_PATTERN_OP_CUSTOMIZE, - OP_PATTERN_FORMAT_AGNOSTIC, - OP_PATTERN_BROADCAST, - OP_PATTERN_REDUCE -}; - -enum OpParamType { REQUIRED = 0, OPTIONAL, DYNAMIC, RESERVED }; - -enum OpConstValueDepend { CONST_IGNORE = 0, CONST_REQUIRED, CONST_OPTIONAL }; - -enum OpReduceType { REDUCE_MEAN = 0, REDUCE_ADD, REDUCE_MAX, REDUCE_MIN }; - -enum OpL1FusionType { L1FUSION_DISABLE = 0, L1FUSION_BASIC, L1FUSION_INPUT_CTR }; -} -#endif // FUSION_ENGINE_INC_COMMON_AICORE_UTIL_TYPES_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/fusion_common/fusion_statistic_recorder.h b/inc/metadef/inc/register/graph_optimizer/fusion_common/fusion_statistic_recorder.h deleted file mode 100644 index fbbc85261..000000000 --- a/inc/metadef/inc/register/graph_optimizer/fusion_common/fusion_statistic_recorder.h +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_FUSION_STATISTIC_RECORDER_H -#define INC_REGISTER_GRAPH_OPTIMIZER_FUSION_STATISTIC_RECORDER_H - -#include -#include -#include -#include -#include -#include - -namespace fe { - -class FusionInfo { - public: -#ifdef ONLY_COMPILE_OPEN_SRC - explicit FusionInfo(uint64_t session_id = 0, std::string graph_id = "", std::string pass_name = "", - int32_t match_times = 0, int32_t effect_times = 0); - - virtual ~FusionInfo(); - - void AddMatchTimes(int32_t match_times); - - void AddEffectTimes(int32_t effect_times); - - int32_t GetMatchTimes(); - - int32_t GetEffectTimes(); - - std::string GetGraphId(); - - std::string GetPassName(); - - uint64_t GetSessionId(); - - void SetMatchTimes(int32_t match_times); - - void SetEffectTimes(int32_t effect_times); -#else - explicit FusionInfo(const uint64_t session_id = 0, const std::string graph_id = "", - const std::string pass_name = "", const int32_t match_times = 0, const int32_t effect_times = 0); - - virtual ~FusionInfo(); - - void AddMatchTimes(const int32_t match_times); - - void AddEffectTimes(const int32_t effect_times); - - int32_t GetMatchTimes() const; - - int32_t GetEffectTimes() const; - - std::string GetGraphId() const; - - std::string GetPassName() const; - - uint64_t GetSessionId() const; - - void SetMatchTimes(const int32_t match_times); - - void SetEffectTimes(const int32_t effect_times); -#endif - - private: - uint64_t session_id_; - std::string graph_id_; - std::string pass_name_; - int32_t match_times_; - int32_t effect_times_; -}; - -using FusionStatisticMap = std::map>; - -class FusionStatisticRecorder { - public: - FusionStatisticRecorder(const FusionStatisticRecorder &) = delete; - - FusionStatisticRecorder &operator=(const FusionStatisticRecorder &) = delete; - - static FusionStatisticRecorder &Instance(); - -#ifdef ONLY_COMPILE_OPEN_SRC - void UpdateGraphFusionMatchTimes(FusionInfo &fusion_info); - - void UpdateGraphFusionEffectTimes(FusionInfo &fusion_info); - - void UpdateBufferFusionMatchTimes(FusionInfo &fusion_info); - - void UpdateBufferFusionEffectTimes(FusionInfo &fusion_info); -#else - void UpdateGraphFusionMatchTimes(const FusionInfo &fusion_info); - - void UpdateGraphFusionEffectTimes(const FusionInfo &fusion_info); - - void UpdateBufferFusionMatchTimes(const FusionInfo &fusion_info); - - void UpdateBufferFusionEffectTimes(const FusionInfo &fusion_info); -#endif - void GetAndClearFusionInfo(const std::string &session_graph_id, - std::map &graph_fusion_info_map, - std::map &buffer_fusion_info_map); - - void GetAllSessionAndGraphIdList(std::vector &session_graph_id_vec); - - private: - FusionStatisticRecorder(); - virtual ~FusionStatisticRecorder(); - FusionStatisticMap graph_fusion_info_map_; - FusionStatisticMap buffer_fusion_info_map_; - void GetFusionInfo(const std::string &session_graph_id, std::map &graph_fusion_info_map, - std::map &buffer_fusion_info_map); - -#ifdef ONLY_COMPILE_OPEN_SRC - void ClearFusionInfo(std::string session_graph_id); -#else - void ClearFusionInfo(const std::string& session_graph_id); -#endif - std::recursive_mutex mutex_; -}; -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_FUSION_STATISTIC_RECORDER_H diff --git a/inc/metadef/inc/register/graph_optimizer/fusion_common/graph_pass_util.h b/inc/metadef/inc/register/graph_optimizer/fusion_common/graph_pass_util.h deleted file mode 100644 index bad383bb3..000000000 --- a/inc/metadef/inc/register/graph_optimizer/fusion_common/graph_pass_util.h +++ /dev/null @@ -1,267 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_UTIL_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_UTIL_H_ -#include "graph/compute_graph.h" -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/node_utils.h" -#include "graph/utils/type_utils.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" - -#include -#include -#include -#include -#include - -namespace fe { -using NodeTypeMap = std::unordered_map>; -using NodeTypeMapPtr = std::shared_ptr; -struct NodeMapInfo { - int64_t run_count; - NodeTypeMapPtr node_type_map; -}; -using NodeMapInfoPtr = std::shared_ptr; - -/** @brief define graph pass, which provides two interface: 1. run pass; -* 2. record op names before fusion */ -class GraphPassUtil { - public: - /** set outputdesc attr for data dump - * - * @param origin_index,usually is origin node output index - * - * @param fusion_index,usually is fusion node output index - * - * @param origin_node, usually is origin node - * - * @param fusion_node, usually is fusion node - */ - static void SetOutputDescAttr(uint32_t origin_index, uint32_t fusion_index, ge::NodePtr origin_node, - ge::NodePtr fusion_node) { - if (fusion_node->GetOpDesc() == nullptr) { - return; - } - - auto fusion_node_output_desc = fusion_node->GetOpDesc()->MutableOutputDesc(fusion_index); - if (fusion_node_output_desc == nullptr) { - return; - } - if (origin_node->GetOpDesc() == nullptr) { - return; - } - auto origin_node_output_desc = origin_node->GetOpDesc()->MutableOutputDesc(origin_index); - if (origin_node_output_desc == nullptr) { - return; - } - - std::vector original_names; - if (ge::AttrUtils::GetListStr(origin_node->GetOpDesc(), ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_names) && - original_names.size() > 0) { - std::string original_name; - if (ge::AttrUtils::GetStr(origin_node_output_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_NAME, original_name)) { - (void)ge::AttrUtils::SetStr(fusion_node_output_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_NAME, original_name); - - std::int64_t origin_output_index = 0; - if (ge::AttrUtils::GetInt(origin_node_output_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX, - origin_output_index)) { - (void)ge::AttrUtils::SetInt(fusion_node_output_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX, - origin_output_index); - } - - ge::DataType origin_data_type = GetDataDumpOriginDataType(origin_node_output_desc); - if (origin_data_type != ge::DT_UNDEFINED) { - SetDataDumpOriginDataType(origin_data_type, fusion_node_output_desc); - } - ge::Format origin_format = GetDataDumpOriginFormat(origin_node_output_desc); - if (origin_format != ge::FORMAT_RESERVED) { - SetDataDumpOriginFormat(origin_format, fusion_node_output_desc); - } - } - } else { - (void)ge::AttrUtils::SetStr(fusion_node_output_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_NAME, origin_node->GetName()); - (void)ge::AttrUtils::SetInt(fusion_node_output_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX, origin_index); - SetDataDumpOriginDataType(origin_node_output_desc->GetOriginDataType(), fusion_node_output_desc); - SetDataDumpOriginFormat(origin_node_output_desc->GetOriginFormat(), fusion_node_output_desc); - } - } - - /** get origin format for data dump - * - * @param tensor_desc,usually is output_desc - * - * @return format of this tensor_desc - */ - static ge::Format GetDataDumpOriginFormat(ge::GeTensorDescPtr tensor_desc) { - std::string origin_format_str; - if (!ge::AttrUtils::GetStr(tensor_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_FORMAT, origin_format_str)) { - // Can not get the certificate and it's not set,return directly - return ge::FORMAT_RESERVED; - } - if (origin_format_str == "RESERVED") { - return ge::FORMAT_RESERVED; - } - return ge::TypeUtils::SerialStringToFormat(origin_format_str); - } - - /** set origin format for data dump - * - * @param origin format - * - * @param tensor_desc,usually is output_desc - */ - static void SetDataDumpOriginFormat(ge::Format origin_format, ge::GeTensorDescPtr tensor_desc) { - std::string origin_format_str = "RESERVED"; - if (origin_format != ge::FORMAT_RESERVED) { - origin_format_str = ge::TypeUtils::FormatToSerialString(origin_format); - } - (void)ge::AttrUtils::SetStr(tensor_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_FORMAT, origin_format_str); - } - - /** set origin datatype for data dump - * - * @param origin datatype - * - * @param tensor_desc,usually is output_desc - */ - static void SetDataDumpOriginDataType(ge::DataType origin_data_type, ge::GeTensorDescPtr tensor_desc) { - std::string origin_data_type_str = "RESERVED"; - if (origin_data_type != ge::DT_UNDEFINED) { - origin_data_type_str = ge::TypeUtils::DataTypeToSerialString(origin_data_type); - } - (void)ge::AttrUtils::SetStr(tensor_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_DATA_TYPE, origin_data_type_str); - } - - /** get origin datatype for data dump - * - * @param tensor_desc,usually is output_desc - * - * @return format of this tensor_desc - */ - static ge::DataType GetDataDumpOriginDataType(ge::GeTensorDescPtr tensor_desc) { - std::string origin_data_type_str; - if (!ge::AttrUtils::GetStr(tensor_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_DATA_TYPE, origin_data_type_str)) { - return ge::DT_UNDEFINED; - } - if (origin_data_type_str == "RESERVED") { - return ge::DT_UNDEFINED; - } - return ge::TypeUtils::SerialStringToDataType(origin_data_type_str); - } - - static void AddNodeFromOpTypeMap(NodeMapInfoPtr &node_map_info, ge::NodePtr &node_ptr) { - if (node_map_info == nullptr || node_ptr == nullptr) { - return; - } - NodeTypeMapPtr node_type_map = node_map_info->node_type_map; - std::string real_op_type = ge::NodeUtils::GetNodeType(*node_ptr); - auto iter = node_type_map->find(real_op_type); - if (iter != node_type_map->end()) { - iter->second.emplace(node_ptr->GetName(), node_ptr); - } else { - node_type_map->emplace(std::make_pair(real_op_type, - std::map{{node_ptr->GetName(), node_ptr}})); - } - } - - static Status GetOpTypeMapToGraph(NodeMapInfoPtr &node_map_info, const ge::ComputeGraph &graph) { - node_map_info = graph.TryGetExtAttr("NodeMapInfo", node_map_info); - if (node_map_info == nullptr) { - return FAILED; - } - return SUCCESS; - } - - static void RecordOriginalNames(std::vector original_nodes, ge::NodePtr node) { - // 1. get the original_names - std::vector original_names; - for (ge::NodePtr original_node : original_nodes) { - if (original_node == nullptr || original_node->GetOpDesc() == nullptr) { - return; - } - - ge::OpDescPtr origin_op_desc_ptr = original_node->GetOpDesc(); - std::vector names_tmp; - bool is_has_attr = ge::AttrUtils::GetListStr(origin_op_desc_ptr, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, - names_tmp); - if (is_has_attr) { - for (const auto &node_name : names_tmp) { - if (!node_name.empty()) { - original_names.push_back(node_name); - } - } - } else { - original_names.push_back(origin_op_desc_ptr->GetName()); - } - } - - // 2. set the dump attr - if (node == nullptr || node->GetOpDesc() == nullptr) { - return; - } - ge::OpDescPtr node_op_desc_ptr = node->GetOpDesc(); - (void)ge::AttrUtils::SetListStr(node_op_desc_ptr, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_names); - } - - static void AddNodeToNodeTypeMap(NodeTypeMapPtr &node_type_map, const std::string &op_type, ge::NodePtr &node_ptr) { - if (node_type_map == nullptr || node_ptr == nullptr) { - return; - } - auto iter = node_type_map->find(op_type); - if (iter == node_type_map->end()) { - node_type_map->emplace(std::make_pair(op_type, - std::map{{node_ptr->GetName(), node_ptr}})); - } else { - iter->second.emplace(node_ptr->GetName(), node_ptr); - } - } - - static void RemoveNodeFromNodeTypeMap(NodeTypeMapPtr &node_type_map, const std::string &op_type, - ge::NodePtr &node_ptr) { - if (node_type_map == nullptr || node_ptr == nullptr) { - return; - } - auto iter = node_type_map->find(op_type); - if (iter != node_type_map->end()) { - iter->second.erase(node_ptr->GetName()); - } - } - - static void GetNodesFromNodeTypeMap(NodeTypeMapPtr &node_type_map, const std::string &op_type, - std::vector &nodes) { - if (node_type_map == nullptr) { - return; - } - - auto iter = node_type_map->find(op_type); - if (iter == node_type_map->end()) { - return; - } - if (iter->second.empty()) { - return; - } - for (auto node_iter = iter->second.begin(); node_iter != iter->second.end(); node_iter++) { - nodes.push_back(node_iter->second); - } - } -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_UTIL_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/fusion_common/op_slice_info.h b/inc/metadef/inc/register/graph_optimizer/fusion_common/op_slice_info.h deleted file mode 100644 index 73bce790d..000000000 --- a/inc/metadef/inc/register/graph_optimizer/fusion_common/op_slice_info.h +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_COMMON_UTILS_AI_CORE_OP_SLICE_INFO_H -#define INC_COMMON_UTILS_AI_CORE_OP_SLICE_INFO_H - -#include -#include "graph/op_desc.h" -#include "aicore_util_types.h" - -namespace fe { -class InputSplitInfoImpl; -using InputSplitInfoImplPtr = std::shared_ptr; -class InputSplitInfo; -using InputSplitInfoPtr = std::shared_ptr; -class OutputSplitInfoImpl; -using OutputSplitInfoImplPtr = std::shared_ptr; -class OutputSplitInfo; -using OutputSplitInfoPtr = std::shared_ptr; -class InputReduceInfoImpl; -using InputReduceInfoImplPtr = std::shared_ptr; -class InputReduceInfo; -using InputReduceInfoPtr = std::shared_ptr; -class OutputReduceInfoImpl; -using OutputReduceInfoImplPtr = std::shared_ptr; -class OutputReduceInfo; -using OutputReduceInfoPtr = std::shared_ptr; -class AxisSplitMapImpl; -using AxisSplitMapImplPtr = std::shared_ptr; -class AxisSplitMap; -using AxisSplitMapPtr = std::shared_ptr; -class AxisReduceMapImpl; -using AxisReduceMapImplPtr = std::shared_ptr; -class AxisReduceMap; -using AxisReduceMapPtr = std::shared_ptr; -class OpCalcInfoImpl; -using OpCalcInfoImplPtr = std::shared_ptr; -class OpCalcInfo; -using OpCalcInfoPtr = std::shared_ptr; - -class InputSplitInfo { -public: - InputSplitInfo(); - InputSplitInfo &operator = (const InputSplitInfo &input_split_info); - ~InputSplitInfo(); - bool Initialize(); - size_t GetIndex() const; - std::vector GetAxis() const; - std::vector GetHeadOverLap() const; - std::vector GetTailOverLap() const; - void SetIndex(const size_t& idx); - void SetAxis(std::vector& axis); - void SetHeadOverLap(std::vector& head_over_lap); - void SetTailOverLap(std::vector& tail_over_lap); - bool IsPtrNull(); -private: - InputSplitInfoImplPtr split_impl_{nullptr}; -}; - -class OutputSplitInfo { -public: - OutputSplitInfo(); - OutputSplitInfo &operator = (const OutputSplitInfo &output_split_info); - ~OutputSplitInfo(); - bool Initialize(); - size_t GetIndex() const; - std::vector GetAxis() const; - void SetIndex(const size_t& idx); - void SetAxis(std::vector& axis); - bool IsPtrNull(); -private: - OutputSplitInfoImplPtr split_impl_{nullptr}; -}; - -class InputReduceInfo { -public: - InputReduceInfo(); - InputReduceInfo &operator = (const InputReduceInfo &input_reduce_info); - ~InputReduceInfo(); - bool Initialize(); - size_t GetIndex() const; - std::vector GetAxis() const; - void SetIndex(const size_t& idx); - void SetAxis(std::vector& axis); - bool IsPtrNull(); -private: - InputReduceInfoImplPtr reduce_impl_{nullptr}; -}; - -class OutputReduceInfo { -public: - OutputReduceInfo(); - OutputReduceInfo &operator = (const OutputReduceInfo &output_reduce_info); - ~OutputReduceInfo(); - bool Initialize(); - size_t GetIndex() const; - OpReduceType GetReduceType() const; - bool GetIsAtomic() const; - void SetIndex(const size_t& idx); - void SetReduceType(const OpReduceType& reduce_type); - void SetIsAtomic(const bool& is_atomic); - bool IsPtrNull(); -private: - OutputReduceInfoImplPtr reduce_impl_{nullptr}; -}; - -class AxisSplitMap { -public: - friend class AxisSplitMapImpl; - AxisSplitMap(); - AxisSplitMap &operator = (const AxisSplitMap &axis_split_map); - ~AxisSplitMap(); - bool Initialize(); - std::vector GetInputSplitInfos() const; - std::vector GetOutputSplitInfos() const; - std::vector GetInputSplitInfoVec() const; - std::vector GetOutputSplitInfoVec() const; - void AddInputSplitInfo(InputSplitInfo& input_split_info); - void SetInputSplitInfos(std::vector& input_split_vec); - void SetInputSplitInfos(std::vector& input_split_vec); - void AddOutputSplitInfo(OutputSplitInfo& output_split_info); - void SetOutputSplitInfos(std::vector& output_split_vec); - void SetOutputSplitInfos(std::vector& output_split_vec); - bool IsPtrNull(); -private: - AxisSplitMapImplPtr aixs_split_impl_{nullptr}; -}; - -class AxisReduceMap { -public: - AxisReduceMap(); - AxisReduceMap &operator = (const AxisReduceMap &axis_reduce_map); - ~AxisReduceMap(); - bool Initialize(); - friend class AxisReduceMapImpl; - std::vector GetInputReduceInfos() const; - std::vector GetOutputReduceInfos() const; - std::vector GetInputReduceInfoVec() const; - std::vector GetOutputReduceInfoVec() const; - void AddInputReduceInfo(InputReduceInfo& input_reduce_info); - void SetInputReduceInfos(std::vector& input_reduce_vec); - void SetInputReduceInfos(std::vector& input_reduce_vec); - void AddOutputReduceInfo(OutputReduceInfo& output_reduce_info); - void SetOutputReduceInfos(std::vector& output_reduce_vec); - void SetOutputReduceInfos(std::vector& output_reduce_vec); - bool IsPtrNull(); -private: - AxisReduceMapImplPtr aixs_reduce_impl_{nullptr}; -}; - -class OpCalcInfo { -public: - OpCalcInfo(); - ~OpCalcInfo(); - bool Initialize(); - std::vector GetAxisSplitMaps() const; - std::vector GetAxisReduceMaps() const; - std::vector GetAxisSplitMapVec() const; - std::vector GetAxisReduceMapVec() const; - OpL1FusionType GetL1FusionEnable() const; - int64_t GetMinTbeL1Space() const; - void AddAxisSplitMap(AxisSplitMap& axis_split_map); - void SetAxisSplitMaps(std::vector& axis_split_vec); - void SetAxisSplitMaps(std::vector& axis_split_vec); - void AddAxisReduceMap(AxisReduceMap& axis_reduce_map); - void SetAxisReduceMaps(std::vector& axis_reduce_vec); - void SetAxisReduceMaps(std::vector& axis_reduce_vec); - void SetL1FusionEnable(const OpL1FusionType& l1_fusion_enable); - void SetMinTbeL1Space(const int64_t& min_tbe_l1_space); - void DelAxisSplitMapBaseAxis(std::vector& axis); - bool IsPtrNull(); -private: - OpCalcInfoImplPtr op_calc_info_impl_{nullptr}; -}; -} // namespace fe -#endif // INC_COMMON_UTILS_AI_CORE_OP_SLICE_INFO_H diff --git a/inc/metadef/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h b/inc/metadef/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h deleted file mode 100644 index 9becbf3a1..000000000 --- a/inc/metadef/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_PATTERN_FUSION_BASE_PASS_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_PATTERN_FUSION_BASE_PASS_H_ - -#include -#include -#include -#include -#include -#include "common/opskernel/ops_kernel_info_store.h" -#include "register/graph_optimizer/graph_fusion/fusion_pattern.h" -#include "register/graph_optimizer/graph_fusion/graph_pass.h" - -using std::initializer_list; -using std::map; -using std::string; -using std::vector; - -using namespace std; - -namespace fe { -using OpsKernelInfoStorePtr = std::shared_ptr; -class PatternFusionBasePassImpl; -using PatternFusionBasePassImplPtr = std::shared_ptr; - -/** Pass based on pattern - * @ingroup FUSION_PASS_GROUP - * @note New virtual methods should be append at the end of this class - */ -class PatternFusionBasePass : public GraphPass { - public: - using OpDesc = FusionPattern::OpDesc; - using Mapping = std::map, std::vector>; - using Mappings = std::vector; - - PatternFusionBasePass(); - virtual ~PatternFusionBasePass(); - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - Status Run(ge::ComputeGraph &graph) override; - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @param [ops_kernel_info_store_ptr, OP info kernel instance - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Run(ge::ComputeGraph &graph, OpsKernelInfoStorePtr ops_kernel_info_store_ptr); - - protected: - virtual std::vector DefinePatterns() = 0; - virtual Status Fusion(ge::ComputeGraph &graph, Mapping &mapping, std::vector &new_nodes) = 0; - - std::vector GetNodesFromMapping(const Mapping &mapping); - ge::NodePtr GetNodeFromMapping(const std::string &id, const Mapping &mapping); - - void RecordOutputAnchorMap(ge::NodePtr output_node); - void ClearOutputAnchorMap(); - - Status SetDataDumpAttr(std::vector &original_nodes, std::vector &fus_nodes); - - bool CheckOpSupported(const ge::OpDescPtr &op_desc_ptr); - - bool CheckOpSupported(const ge::NodePtr &node); - - /** check whether the input graph is Cyclic - * - * @param graph need to be checked - * @return false or true - */ - bool CheckGraphCycle(ge::ComputeGraph &graph); - - void EnableNetworkAnalysis(); - - void DumpMapping(const FusionPattern &pattern, const Mapping &mapping); - private: - /** match all nodes in graph according to pattern - * - * @param pattern fusion pattern defined - * @param mappings match result - * @return SUCCESS, successfully add edge - * @return FAILED, fail - */ - bool MatchAll(ge::ComputeGraph &graph, const FusionPattern &pattern, Mappings &mappings); - - Status RunOnePattern(ge::ComputeGraph &graph, const FusionPattern &pattern, bool &changed); // lint !e148 - - /** Internal implement class ptr */ - std::shared_ptr pattern_fusion_base_pass_impl_ptr_; - - std::unordered_map> origin_op_anchors_map_; - - bool enable_network_analysis_ = false; - -}; -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_PATTERN_FUSION_BASE_PASS_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h b/inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h deleted file mode 100644 index 2592b54ef..000000000 --- a/inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PASS_REGISTRY_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PASS_REGISTRY_H_ - -#include -#include -#include -#include -#include "register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h" - -namespace fe { -class FusionPassRegistry { - public: - using CreateFn = GraphPass *(*)(); - ~FusionPassRegistry(); - - static FusionPassRegistry &GetInstance(); - -#ifdef ONLY_COMPILE_OPEN_SRC - void RegisterPass(const GraphFusionPassType &pass_type, const std::string &pass_name, CreateFn create_fn); -#else - void RegisterPass(const GraphFusionPassType &pass_type, const std::string &pass_name, CreateFn create_fn) const; -#endif - - std::map GetCreateFnByType(const GraphFusionPassType &pass_type); - - private: - FusionPassRegistry(); - class FusionPassRegistryImpl; - std::unique_ptr impl_; -}; - -class FusionPassRegistrar { - public: - FusionPassRegistrar(const GraphFusionPassType &pass_type, const std::string &pass_name, GraphPass *(*create_fn)()); - ~FusionPassRegistrar() {} -}; - -#define REGISTER_PASS(pass_name, pass_type, pass_class) \ - REGISTER_PASS_UNIQ_HELPER(__COUNTER__, pass_name, pass_type, pass_class) - -#define REGISTER_PASS_UNIQ_HELPER(ctr, pass_name, pass_type, pass_class) \ - REGISTER_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) - -#define REGISTER_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) \ - static ::fe::FusionPassRegistrar register_fusion_pass##ctr __attribute__((unused)) = ::fe::FusionPassRegistrar( \ - pass_type, pass_name, []() -> ::fe::GraphPass * { return new (std::nothrow) pass_class(); }) - -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PASS_REGISTRY_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h b/inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h deleted file mode 100644 index 11de2d7d3..000000000 --- a/inc/metadef/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PATTERN_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PATTERN_H_ -#include -#include -#include -#include -#include - -namespace fe { - -/** Fusion pattern - * @ingroup FUSION_PASS_GROUP - * Describe Pattern of Ops waiting for fusion(Op type, etc) - */ -class FusionPattern { - public: - struct OpDesc; - using OpDescPtr = std::shared_ptr; - /** - * @ingroup fe - * @brief description of Ops - */ - struct OpDesc { - std::string id; // Identifier - std::vector types; // the Op types of Ops - std::vector inputs; // all input Ops - bool repeatable; // flag to show if match multiple Ops or not - bool is_output; // flag to show if the op is output node - }; - - public: -#ifdef ONLY_COMPILE_OPEN_SRC - explicit FusionPattern(std::string name = ""); -#else - explicit FusionPattern(const std::string name = ""); -#endif - ~FusionPattern(); - - /** set pattern name - * - * @param name pattern name - * @return FusionPattern - */ - FusionPattern &SetName(const std::string &name); - - /** add Op description with unknown number of args - * - * @param id pattern id - * @param types op type list - * @return FusionPattern - */ - FusionPattern &AddOpDesc(const std::string &id, const std::initializer_list &types = {}); - - /** add Op description with vector - * - * @param id pattern id - * @param types op type list - * - * @return FusionPattern - */ - FusionPattern &AddOpDesc(const std::string &id, const std::vector &types); - - /** set input Ops with unknown number of args - * - * @param id pattern id - * - * @param input_ids inputs to id op - * - * @return FusionPattern - */ - FusionPattern &SetInputs(const std::string &id, const std::initializer_list &input_ids); - - /** set input Ops with unknown number of args - * - * @param id pattern id - * - * @param input_ids inputs to id op - * - * @return FusionPattern - */ - FusionPattern &SetInputs(const std::string &id, const std::vector &input_ids); - - /** set output Op - * - * @param id pattern id - * - * @return FusionPattern - */ - FusionPattern &SetOutput(const std::string &id); - - /** build pattern and check if error exists - * - * @return True or False - */ - bool Build(); - - /** get pattern name - * - * @param id pattern id - * - * @return fusion pattern name - */ - const std::string &GetName() const; - - /** get the OpDesc of input Ops (const) - * - * @param op_desc op_desc for getting inputs - * - * @return op_desc's iniput opdesc list - */ - static const std::vector> *GetInputs(std::shared_ptr op_desc); - - /** get the OpDesc of output Op - * - * @return pattern's output opdesc list - */ - const std::shared_ptr GetOutput() const; - - /** print pattern - * - */ - void Dump() const; - -#ifdef ONLY_COMPILE_OPEN_SRC - void GetOpDescList(std::vector> &op_desc_list); -#endif - /** get OpDesc based on ID, return nullptr if failed - * - * @param id pattern id - * - * @return pattern's output opdesc list - */ - std::shared_ptr GetOpDesc(const std::string &id) const; - - private: - FusionPattern(const FusionPattern &) = default; - FusionPattern &operator=(const FusionPattern &) = default; - - void SetError(); - - private: - std::string name_; - - std::vector> ops_; - - std::map> op_map_; - - std::shared_ptr output_; - - bool has_error_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PATTERN_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h b/inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h deleted file mode 100644 index d9956f5ae..000000000 --- a/inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_FUSION_PASS_BASE_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_FUSION_PASS_BASE_H_ - -#include -#include -#include -#include -#include - -#include "register/graph_optimizer/graph_fusion/fusion_pattern.h" -#include "register/graph_optimizer/graph_fusion/graph_pass.h" - -using std::initializer_list; -using std::map; -using std::string; -using std::vector; - -using namespace std; - -namespace fe { -enum GraphFusionPassType { - BUILT_IN_GRAPH_PASS = 0, - BUILT_IN_VECTOR_CORE_GRAPH_PASS, - CUSTOM_AI_CORE_GRAPH_PASS, - CUSTOM_VECTOR_CORE_GRAPH_PASS, - SECOND_ROUND_BUILT_IN_GRAPH_PASS, - BUILT_IN_BEFORE_TRANSNODE_INSERTION_GRAPH_PASS, - GRAPH_FUSION_PASS_TYPE_RESERVED -}; -class PatternFusionBasePassImpl; -using PatternFusionBasePassImplPtr = std::shared_ptr; - -/** Pass based on pattern - * @ingroup FUSION_PASS_GROUP - * @note New virtual methods should be append at the end of this class - */ -class GraphFusionPassBase : public GraphPass { - public: - using OpDesc = FusionPattern::OpDesc; - using Mapping = std::map, std::vector>; - using Mappings = std::vector; - - GraphFusionPassBase(); - virtual ~GraphFusionPassBase(); - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - Status Run(ge::ComputeGraph &graph) override; - - protected: - /** define pattern - * - * @return NA - */ - virtual std::vector DefinePatterns() = 0; - - /** do fusion according to nodes matched - * - * @param graph the graph waiting for pass level optimization - * @param new_nodes fusion result node(s) - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Fusion(ge::ComputeGraph &graph, Mapping &mapping, std::vector &new_nodes) = 0; - - /** get nodes from matched result - * - * @param mapping match result - * @return nodes result - */ - static ge::NodePtr GetNodeFromMapping(const std::string &id, const Mapping &mapping); - - private: - /** match all nodes in graph according to pattern - * - * @param pattern fusion pattern defined - * @param mappings match result - * @return SUCCESS, successfully add edge - * @return FAILED, fail - */ - bool MatchAll(ge::ComputeGraph &graph, const FusionPattern &pattern, Mappings &mappings); - - Status RunOnePattern(ge::ComputeGraph &graph, const FusionPattern &pattern, bool &changed); // lint !e148 - - /** Internal implement class ptr */ - std::shared_ptr pattern_fusion_base_pass_impl_ptr_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_FUSION_PASS_BASE_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_pass.h b/inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_pass.h deleted file mode 100644 index dc4c6640b..000000000 --- a/inc/metadef/inc/register/graph_optimizer/graph_fusion/graph_pass.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_H_ - -#include -#include "register/graph_optimizer/graph_fusion/pass.h" - -namespace fe { - -/** graph pass - * @ingroup GRAPH_PASS_GROUP - * graph level pass - */ -class GraphPass : public Pass { - public: - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Run(ge::ComputeGraph &graph) = 0; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/graph_fusion/pass.h b/inc/metadef/inc/register/graph_optimizer/graph_fusion/pass.h deleted file mode 100644 index 142981938..000000000 --- a/inc/metadef/inc/register/graph_optimizer/graph_fusion/pass.h +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** @defgroup FUSION_PASS_GROUP Fusion Pass Interface */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_PASS_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_PASS_H_ - -#include "graph/compute_graph.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" - -namespace fe { - -/** fusion pass - * @ingroup GRAPH_PASS_GROUP - * network level pass - */ -template -class Pass { - public: - virtual ~Pass() {} - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Run(ge::ComputeGraph &graph) = 0; - - void SetName(const std::string &name) { name_ = name; } - - std::string GetName() { return name_; } - - private: - std::string name_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_PASS_H_ diff --git a/inc/metadef/inc/register/graph_optimizer/graph_optimize_register_error_codes.h b/inc/metadef/inc/register/graph_optimizer/graph_optimize_register_error_codes.h deleted file mode 100644 index b8950158a..000000000 --- a/inc/metadef/inc/register/graph_optimizer/graph_optimize_register_error_codes.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZE_REGISTER_ERROR_CODES_H_ -#define INC_REGISTER_GRAPH_OPTIMIZE_REGISTER_ERROR_CODES_H_ - -#include -#include - -/** Assigned SYS ID */ -const uint8_t SYSID_FE = 3; - -/** Common module ID */ -const uint8_t FE_MODID_COMMON = 50; - -namespace fe { - -/** FE error code definiton Macro -* Build error code -*/ -#define FE_DEF_ERRORNO(sysid, modid, name, value, desc) \ - static constexpr fe::Status (name) = \ - ((((static_cast((0xFF) & (static_cast(sysid)))) << 24) | \ - ((static_cast((0xFF) & (static_cast(modid)))) << 16)) | \ - ((0xFFFF) & (static_cast(value)))); - -using Status = uint32_t; - -#define FE_DEF_ERRORNO_COMMON(name, value, desc) \ - FE_DEF_ERRORNO(SYSID_FE, FE_MODID_COMMON, (name), (value), (desc)) - -using Status = uint32_t; - -FE_DEF_ERRORNO(0, 0, SUCCESS, 0, "success"); -FE_DEF_ERRORNO(0xFF, 0xFF, FAILED, 0xFFFF, "failed"); -FE_DEF_ERRORNO_COMMON(NOT_CHANGED, 201, "The nodes of the graph not changed."); -FE_DEF_ERRORNO_COMMON(PARAM_INVALID, 1, "Parameter's invalid!"); -FE_DEF_ERRORNO_COMMON(GRAPH_FUSION_CYCLE, 301, "Graph is cycle after fusion!"); - -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZE_REGISTER_ERROR_CODES_H_ diff --git a/inc/metadef/inc/register/host_cpu_context.h b/inc/metadef/inc/register/host_cpu_context.h deleted file mode 100644 index f7d4f52f1..000000000 --- a/inc/metadef/inc/register/host_cpu_context.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_HOST_CPU_CONTEXT_H_ -#define INC_REGISTER_HOST_CPU_CONTEXT_H_ - -#include "external/ge/ge_api_error_codes.h" -#include "register/register_types.h" - -namespace ge { -class HostCpuContext { - public: - HostCpuContext() = default; - ~HostCpuContext() = default; - private: - class Impl; - Impl *impl_; -}; -} // namespace ge - -extern "C" { -// Unified definition for registering host_cpu_kernel_wrapper when so is opened -FMK_FUNC_HOST_VISIBILITY ge::Status Initialize(const ge::HostCpuContext &ctx); -} - -#endif //INC_REGISTER_HOST_CPU_CONTEXT_H_ diff --git a/inc/metadef/inc/register/infer_data_slice_registry.h b/inc/metadef/inc/register/infer_data_slice_registry.h deleted file mode 100644 index 6a6412ed8..000000000 --- a/inc/metadef/inc/register/infer_data_slice_registry.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_INFER_DATA_SLICE_REGISTRY_H_ -#define INC_REGISTER_INFER_DATA_SLICE_REGISTRY_H_ - -#include "external/graph/ge_error_codes.h" -#include "external/graph/operator.h" - -namespace ge { -using InferDataSliceFunc = std::function; - -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferDataSliceFuncRegister { - public: - InferDataSliceFuncRegister(const char *operator_type, const InferDataSliceFunc &infer_data_slice_func); - ~InferDataSliceFuncRegister() = default; -}; - -// infer data slice func register -#define IMPLEMT_COMMON_INFER_DATA_SLICE(func_name) \ - GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY static graphStatus func_name(Operator &op) - -#define IMPLEMT_INFER_DATA_SLICE(op_name, func_name) \ - GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY static graphStatus func_name(op::op_name &op) - -#define INFER_DATA_SLICE_FUNC(op_name, x) [&](Operator &v) { return x((op::op_name &)v); } - -#define __INFER_DATA_SLICE_FUNC_REG_IMPL__(op_name, x, n) \ - static const InferDataSliceFuncRegister PASTE(ids_register, n)(#op_name, x) - -#define INFER_DATA_SLICE_FUNC_REG(op_name, x) \ - __INFER_DATA_SLICE_FUNC_REG_IMPL__(op_name, INFER_DATA_SLICE_FUNC(op_name, x), __COUNTER__) -} // namespace ge - -#endif // INC_REGISTER_INFER_DATA_SLICE_REGISTRY_H_ diff --git a/inc/metadef/inc/register/op_kernel_registry.h b/inc/metadef/inc/register/op_kernel_registry.h deleted file mode 100644 index 35fcc8573..000000000 --- a/inc/metadef/inc/register/op_kernel_registry.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_OP_KERNEL_REGISTRY_H_ -#define INC_REGISTER_OP_KERNEL_REGISTRY_H_ -#include -#include -#include "register/register_types.h" -#include "register.h" - -namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpKernelRegistry { - public: - using CreateFn = HostCpuOp* (*)(); - ~OpKernelRegistry(); - - static OpKernelRegistry& GetInstance(); - - bool IsRegistered(const std::string &op_type); - - void RegisterHostCpuOp(const std::string &op_type, CreateFn create_fn); - - std::unique_ptr CreateHostCpuOp(const std::string &op_type); - - private: - OpKernelRegistry(); - class OpKernelRegistryImpl; - /*lint -e148*/ - std::unique_ptr impl_; -}; -} // namespace ge - -#endif // INC_REGISTER_OP_KERNEL_REGISTRY_H_ diff --git a/inc/metadef/inc/register/op_registry.h b/inc/metadef/inc/register/op_registry.h deleted file mode 100644 index 8c209fa89..000000000 --- a/inc/metadef/inc/register/op_registry.h +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_OP_REGISTRY_H_ -#define INC_REGISTER_OP_REGISTRY_H_ - -#include -#include -#include -#include -#include -#include - -#include "register/register.h" - -namespace domi { -enum RemoveInputType { - OMG_MOVE_TYPE_DTYPE = 0, - OMG_MOVE_TYPE_VALUE, - OMG_MOVE_TYPE_SHAPE, - OMG_MOVE_TYPE_FORMAT, - OMG_MOVE_TYPE_AXIS, - OMG_MOVE_TYPE_SCALAR_VALUE, - OMG_REMOVE_TYPE_WITH_COND = 1000, - OMG_REMOVE_INPUT_WITH_ORIGINAL_TYPE, - OMG_INPUT_REORDER, -}; - -struct RemoveInputConfigure { - int inputIdx = INT_MAX; - std::string attrName; - RemoveInputType moveType; - bool attrValue = false; - std::string originalType; - std::vector input_order; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpRegistry { - public: - static OpRegistry *Instance(); - - std::vector registrationDatas; - - bool Register(const OpRegistrationData ®_data); - - domi::ImplyType GetImplyType(const std::string &op_type); - - void GetOpTypeByImplyType(std::vector &vec_op_type, const domi::ImplyType &imply_type); - - domi::ParseParamFunc GetParseParamFunc(const std::string &op_type, const std::string &ori_type); - - domi::ParseParamByOpFunc GetParseParamByOperatorFunc(const std::string &ori_type); - - domi::FusionParseParamFunc GetFusionParseParamFunc(const std::string &op_type, const std::string &ori_type); - - domi::FusionParseParamByOpFunc GetFusionParseParamByOpFunc(const std::string &op_type, - const std::string &ori_type); - - domi::ParseSubgraphFunc GetParseSubgraphPostFunc(const std::string &op_type); - - Status GetParseSubgraphPostFunc(const std::string &op_type, domi::ParseSubgraphFuncV2 &parse_subgraph_func); - - domi::ImplyType GetImplyTypeByOriOpType(const std::string &ori_optype); - - const std::vector &GetRemoveInputConfigure(const std::string &ori_optype) const; - - bool GetOmTypeByOriOpType(const std::string &ori_optype, std::string &om_type); - - ParseOpToGraphFunc GetParseOpToGraphFunc(const std::string &op_type, const std::string &ori_type); - - private: - std::unordered_map op_run_mode_map_; - std::unordered_map op_parse_params_fn_map_; - std::unordered_map parse_params_by_op_func_map_; - std::unordered_map fusion_op_parse_params_fn_map_; - std::unordered_map fusion_parse_params_by_op_fn_map_; - std::unordered_map op_types_to_parse_subgraph_post_func_; - std::unordered_map> remove_input_configure_map_; - std::map origin_type_to_om_type_; - std::unordered_map parse_op_to_graph_fn_map_; - std::unordered_map op_types_to_parse_subgraph_post_func_v2_; -}; -} // namespace domi -#endif // INC_REGISTER_OP_REGISTRY_H_ diff --git a/inc/metadef/inc/register/op_tiling.h b/inc/metadef/inc/register/op_tiling.h deleted file mode 100644 index 260b0533f..000000000 --- a/inc/metadef/inc/register/op_tiling.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2019-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_OP_TILING_H_ -#define INC_REGISTER_OP_TILING_H_ - -#include "graph/debug/ge_attr_define.h" -#include "graph/node.h" -#include "register/op_tiling_registry.h" - -namespace optiling { -extern "C" ge::graphStatus OpParaCalculateV2(const ge::Node &node, OpRunInfoV2 &run_info); -extern "C" ge::graphStatus OpAtomicCalculateV2(const ge::Node &node, OpRunInfoV2 &run_info); -extern "C" ge::graphStatus OpFftsCalculateV2(const ge::Node &node, std::vector &run_info); -} // namespace optiling -#endif // INC_REGISTER_OP_TILING_H_ diff --git a/inc/metadef/inc/register/ops_kernel_builder_registry.h b/inc/metadef/inc/register/ops_kernel_builder_registry.h deleted file mode 100644 index 8a8f3a189..000000000 --- a/inc/metadef/inc/register/ops_kernel_builder_registry.h +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_OPS_KERNEL_BUILDER_REGISTRY_H_ -#define INC_REGISTER_OPS_KERNEL_BUILDER_REGISTRY_H_ - -#include -#include "register/register_types.h" -#include "common/opskernel/ops_kernel_builder.h" - -namespace ge { -using OpsKernelBuilderPtr = std::shared_ptr; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpsKernelBuilderRegistry { - public: - ~OpsKernelBuilderRegistry(); - static OpsKernelBuilderRegistry &GetInstance(); - - void Register(const std::string &lib_name, const OpsKernelBuilderPtr &instance); - - void Unregister(const std::string &lib_name); - - void UnregisterAll(); - - const std::map &GetAll() const; - - private: - OpsKernelBuilderRegistry() = default; - std::map kernel_builders_; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpsKernelBuilderRegistrar { - public: - using CreateFn = OpsKernelBuilder *(*)(); - OpsKernelBuilderRegistrar(const std::string &kernel_lib_name, CreateFn fn); - ~OpsKernelBuilderRegistrar(); - -private: - std::string kernel_lib_name_; -}; - -#define REGISTER_OPS_KERNEL_BUILDER(kernel_lib_name, builder) \ - REGISTER_OPS_KERNEL_BUILDER_UNIQ_HELPER(__COUNTER__, kernel_lib_name, builder) - -#define REGISTER_OPS_KERNEL_BUILDER_UNIQ_HELPER(ctr, kernel_lib_name, builder) \ - REGISTER_OPS_KERNEL_BUILDER_UNIQ(ctr, kernel_lib_name, builder) - -#define REGISTER_OPS_KERNEL_BUILDER_UNIQ(ctr, kernel_lib_name, builder) \ - static ::ge::OpsKernelBuilderRegistrar register_op_kernel_builder_##ctr \ - __attribute__((unused)) = \ - ::ge::OpsKernelBuilderRegistrar(kernel_lib_name, []()->::ge::OpsKernelBuilder* { \ - return new (std::nothrow) builder(); \ - }) -} // namespace ge - -#endif // INC_REGISTER_OPS_KERNEL_BUILDER_REGISTRY_H_ diff --git a/inc/metadef/inc/register/proto/caffe/caffe.proto b/inc/metadef/inc/register/proto/caffe/caffe.proto deleted file mode 100644 index 3f45aae25..000000000 --- a/inc/metadef/inc/register/proto/caffe/caffe.proto +++ /dev/null @@ -1,1821 +0,0 @@ -syntax = "proto2"; - -package domi.caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; - optional bytes int8_data = 10; - repeated int32 int32_data = 11 [packed = true]; - repeated uint64 uint64_data = 12 [packed = true]; - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { - repeated BlobProto blobs = 1; -} - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [default = false]; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [default = 'constant']; - optional float value = 2 [default = 0]; // the value in constant filler - optional float min = 3 [default = 0]; // the min value in uniform filler - optional float max = 4 [default = 1]; // the max value in uniform filler - optional float mean = 5 [default = 0]; // the mean value in Gaussian filler - optional float std = 6 [default = 1]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // DEPRECATED. See InputParameter. The input blobs to the network. - repeated string input = 3; - // DEPRECATED. See InputParameter. The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "input_shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [default = false]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [default = false]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 42 (last added: layer_wise_reduce) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; - optional bool test_compute_loss = 19 [default = false]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [default = true]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [default = "L2"]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [default = -1]; - - optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [default = false]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [default = GPU]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [default = -1]; - - // type of the solver - optional string type = 40 [default = "SGD"]; - - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38 [default = 0.99]; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [default = false]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [default = true]; - - // DEPRECATED: old solver enum types, use string instead - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - // DEPRECATED: use type instead of solver_type - optional SolverType solver_type = 30 [default = SGD]; - - // Overlap compute and communication for data parallel training - optional bool layer_wise_reduce = 41 [default = true]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [default = 1.0]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [default = 1.0]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -// LayerParameter next available layer-specific ID: 151 (last added: smooth_l1_loss_param) -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; - - // Specifies whether to backpropagate to each bottom. If unspecified, - // Caffe will automatically infer whether each input needs backpropagation - // to compute parameter gradients. If set to true for some inputs, - // backpropagation to those inputs is forced; if set false for some inputs, - // backpropagation to those inputs is skipped. - // - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional BatchNormParameter batch_norm_param = 139; - optional BiasParameter bias_param = 141; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional CropParameter crop_param = 144; - optional DataParameter data_param = 107; - optional DetectionOutputParameter detection_output_param = 150; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional ELUParameter elu_param = 140; - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional InputParameter input_param = 143; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional ParameterParameter parameter_param = 145; - optional PoolingParameter pooling_param = 121; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional RecurrentParameter recurrent_param = 146; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional ScaleParameter scale_param = 142; - optional SigmoidParameter sigmoid_param = 124; - optional SmoothL1LossParameter smooth_l1_loss_param = 148; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; - optional WindowDataParameter window_data_param = 129; - optional PermuteParameter permute_param = 202; - optional PriorBoxParameter prior_box_param = 203; - optional NormalizeParameter norm_param = 206; - optional PSROIPoolingParameter psroi_pooling_param = 207; - optional FreespaceExtractParameter freespace_extract_param = 151; - optional PostprocessParameter postprocess_param = 152; - optional SpatialTransformParameter spatial_transform_param = 153; - optional ROIAlignParameter roi_align_param = 154; - optional ReorgParameter reorg_param = 155; - optional RegionParameter region_param = 156; - optional ReverseParameter reverse_param = 157; - optional InterpParameter interp_param = 158; - optional ShuffleChannelParameter shuffle_channel_param = 159; - optional UpsampleParameter upsample_param = 160; - optional ROIPoolingParameter roi_pooling_param = 161; - optional YoloParameter yolo_param = 199; - optional YoloV3DetectionOutputParameter yolov3_detection_output_param = 200; - optional ProposalParameter proposal_param = 201; - optional FSRDetectionOutputParameter fsrdetectionoutput_param = 222; - optional SSDDetectionOutputParameter ssddetectionoutput_param = 232; - optional YoloV2DetectionOutputParameter yolov2_detection_output_param = 204; - optional QuantParameter quant_param = 208; - optional CondTakeParameter condtake_param = 233; - optional MatrixInverseParameter matrix_inverse_param = 210; - optional WarpPerspectiveParameter warp_perspective_param = 234; - optional BatchMatMulParameter batch_matmul_param = 235; - optional SpatialTransformerParameter st_param = 5000; - optional YoloV3DetectionOutputV2Parameter yolov3_detection_output_v2_param = 5001; -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [default = 1]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [default = false]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would substract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // How to normalize the loss for loss layers that aggregate across batches, - // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. - enum NormalizationMode { - // Divide by the number of examples in the batch times spatial dimensions. - // Outputs that receive the ignore label will NOT be ignored in computing - // the normalization factor. - FULL = 0; - // Divide by the total number of output locations that do not take the - // ignore_label. If ignore_label is not set, this behaves like FULL. - VALID = 1; - // Divide by the batch size. - BATCH_SIZE = 2; - // Do not normalize the loss. - NONE = 3; - } - // For historical reasons, the default normalization for - // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. - optional NormalizationMode normalization = 3 [default = VALID]; - // Deprecated. Ignored if normalization is specified. If normalization - // is not specified, then setting this to false will be equivalent to - // normalization = BATCH_SIZE to be consistent with previous behavior. - optional bool normalize = 2; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; - // The axis along which to maximise -- may be negative to index from the - // end (e.g., -1 for the last axis). - // By default ArgMaxLayer maximizes over the flattened trailing dimensions - // for each index of the first / num dimension. - optional int32 axis = 3; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; -} - -message BatchNormParameter { - // If false, normalization is performed over the current mini-batch - // and global statistics are accumulated (but not yet used) by a moving - // average. - // If true, those accumulated mean and variance values are used for the - // normalization. - // By default, it is set to false when the network is in the training - // phase and true when the network is in the testing phase. - optional bool use_global_stats = 1; - // What fraction of the moving average remains each iteration? - // Smaller values make the moving average decay faster, giving more - // weight to the recent values. - // Each iteration updates the moving average @f$S_{t-1}@f$ with the - // current mean @f$ Y_t @f$ by - // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ - // is the moving_average_fraction parameter. - optional float moving_average_fraction = 2 [default = .999]; - // Small value to add to the variance estimate so that we don't divide by - // zero. - optional float eps = 3 [default = 1e-5]; -} - -message BiasParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar bias. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the bias - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to add a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer.) - // The initialization for the learned bias parameter. - // Default is the zero (0) initialization, resulting in the BiasLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - optional bool bias_from_blob = 4 [default = true]; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; -} - -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 - // Factor used to dilate the kernel, (implicitly) zero-filling the resulting - // holes. (Kernel dilation is sometimes referred to by its use in the - // algorithme à trous from Holschneider et al. 1987.) - repeated uint32 dilation = 18; // The dilation; defaults to 1 - - // For 2D convolution only, the *_h and *_w versions may also be used to - // specify both spatial dimensions. - optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) - optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) - - optional uint32 group = 5 [default = 1]; // The group size for group conv - - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; - - // The axis to interpret as "channels" when performing convolution. - // Preceding dimensions are treated as independent inputs; - // succeeding dimensions are treated as "spatial". - // With (N, C, H, W) inputs, and axis == 1 (the default), we perform - // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for - // groups g>1) filters across the spatial axes (H, W) of the input. - // With (N, C, D, H, W) inputs, and axis == 1, we perform - // N independent 3D convolutions, sliding (C/g)-channels - // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [default = 1]; - - // Whether to force use of the general ND convolution, even if a specific - // implementation for blobs of the appropriate number of spatial dimensions - // is available. (Currently, there is only a 2D-specific convolution - // implementation; for input blobs with num_axes != 2, this option is - // ignored and the ND implementation will be used.) - optional bool force_nd_im2col = 17 [default = false]; -} - -message CropParameter { - // To crop, elements of the first bottom are selected to fit the dimensions - // of the second, reference bottom. The crop is configured by - // - the crop `axis` to pick the dimensions for cropping - // - the crop `offset` to set the shift for all/each dimension - // to align the cropped bottom with the reference bottom. - // All dimensions up to but excluding `axis` are preserved, while - // the dimensions including and trailing `axis` are cropped. - // If only one `offset` is set, then all dimensions are offset by this amount. - // Otherwise, the number of offsets must equal the number of cropped axes to - // shift the crop in each dimension accordingly. - // Note: standard dimensions are N,C,H,W so the default is a spatial crop, - // and `axis` may be negative to index from the end (e.g., -1 for the last - // axis). - optional int32 axis = 1 [default = 2]; - repeated uint32 offset = 2; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [default = 0]; - optional DB backend = 8 [default = LEVELDB]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Increase if data feeding bandwidth varies, within the - // limit of device memory for GPU training) - optional uint32 prefetch = 10 [default = 4]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio - optional bool scale_train = 2 [default = true]; // scale train or test phase -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [default = true]; -} - -// Message that stores parameters used by ELULayer -message ELUParameter { - // Described in: - // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate - // Deep Network Learning by Exponential Linear Units (ELUs). arXiv - optional float alpha = 1 [default = 1]; -} - -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - -// Message that stores parameters used by ExpLayer -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; -} - -message HDF5OutputParameter { - optional string file_name = 1; -} - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [default = L1]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [default = false]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; - // Specify if the images are color or gray - optional bool is_color = 11 [default = true]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - optional string root_folder = 12 [default = ""]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; - optional int32 axis = 2 [default = 1]; // axis of prob -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; - // Specify whether to transpose the weight matrix or not. - // If transpose == true, any operations will be performed on the transpose - // of the weight matrix. The weight matrix itself is not going to be transposed - // but rather the transfer flag of operations will be toggled accordingly. - optional bool transpose = 6 [default = false]; -} - -message InputParameter { - // This layer produces N >= 1 top blob(s) to be assigned manually. - // Define N shapes to set a shape for each top. - // Define 1 shape to set the same shape for every top. - // Define no shape to defer to reshaping manually. - repeated BlobShape shape = 1; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [default = 5]; - optional float alpha = 2 [default = 1.]; - optional float beta = 3 [default = 0.75]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; - optional float k = 5 [default = 1.]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [default = true]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -message ParameterParameter { - optional BlobShape shape = 1; -} - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [default = MAX]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [default = DEFAULT]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [default = false]; - optional bool ceil_mode = 13 [default = true]; - // How to calculate the output size - using ceil (default) or floor rounding. - enum RoundMode { - CEIL = 0; - FLOOR = 1; - } - optional RoundMode round_mode = 14 [default = CEIL]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [default = 1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; -} - -// Message that stores parameters used by RecurrentLayer -message RecurrentParameter { - // The dimension of the output (and usually hidden state) representation -- - // must be explicitly set to non-zero. - optional uint32 num_output = 1 [default = 0]; - - optional FillerParameter weight_filler = 2; // The filler for the weight - optional FillerParameter bias_filler = 3; // The filler for the bias - - // Whether to enable displaying debug_info in the unrolled recurrent net. - optional bool debug_info = 4 [default = false]; - - // Whether to add as additional inputs (bottoms) the initial hidden state - // blobs, and add as additional outputs (tops) the final timestep hidden state - // blobs. The number of additional bottom/top blobs required depends on the - // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. - optional bool expose_hidden = 5 [default = false]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [default = 0]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [default = DEFAULT]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - - -message ScaleParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar multiplier. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the scale - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer.) - // The initialization for the learned scale parameter. - // Default is the unit (1) initialization, resulting in the ScaleLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - - // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but - // may be more efficient). Initialized with bias_filler (defaults to 0). - optional bool bias_term = 4 [default = false]; - optional FillerParameter bias_filler = 5; - optional bool scale_from_blob = 6 [default = true]; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; -} - -message SmoothL1LossParameter { - // SmoothL1Loss(x) = - // 0.5 * (sigma * x) ** 2 -- if x < 1.0 / sigma / sigma - // |x| - 0.5 / sigma / sigma -- otherwise - optional float sigma = 1 [default = 1]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer -message ThresholdParameter { - optional float threshold = 1 [default = 0]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [default = false]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [default = 0.5]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [default = 0.25]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [default = "warp"]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [default = false]; - // append root_folder to locate images - optional string root_folder = 13 [default = ""]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - WINDOW_DATA = 24; - THRESHOLD = 31; - QUANT = 208; - DEQUANT = 209; - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [default = MAX]; // The pooling method - optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - - optional uint32 local_size = 13 [default = 5]; // for local response norm - optional float alpha = 14 [default = 1.]; // for local response norm - optional float beta = 15 [default = 0.75]; // for local response norm - optional float k = 22 [default = 1.]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [default = 1]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [default = false]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [default = 0.5]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [default = 0.25]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [default = "warp"]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [default = false]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope parameters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} - -// Message that stores parameters used by DetectionOutputLayer -//message DetectionOutputParameter { -// optional int32 num_classes = 1 [default = 21]; -// optional float nms_threshold = 2 [default = 0.3]; -// optional int32 top_k = 3; -// optional float confidence_threshold = 4 [default = 0.8]; -//} - -// Message that store parameters used by PriorBoxLayer -message PriorBoxParameter { - // Encode/decode type. - enum CodeType { - CORNER = 1; - CENTER_SIZE = 2; - CORNER_SIZE = 3; - } - // Minimum box size (in pixels). Required! - repeated float min_size = 1; - // Maximum box size (in pixels). Required! - repeated float max_size = 2; - // Various of aspect ratios. Duplicate ratios will be ignored. - // If none is provided, we use default ratio 1. - repeated float aspect_ratio = 3; - // If true, will flip each aspect ratio. - // For example, if there is aspect ratio "r", - // we will generate aspect ratio "1.0/r" as well. - optional bool flip = 4 [default = true]; - // If true, will clip the prior so that it is within [0, 1] - optional bool clip = 5 [default = false]; - // Variance for adjusting the prior bboxes. - repeated float variance = 6; - // By default, we calculate img_height, img_width, step_x, step_y based on - // bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely - // provided. - // Explicitly provide the img_size. - optional uint32 img_size = 7; - // Either img_size or img_h/img_w should be specified; not both. - optional uint32 img_h = 8; - optional uint32 img_w = 9; - - // Explicitly provide the step size. - optional float step = 10; - // Either step or step_h/step_w should be specified; not both. - optional float step_h = 11; - optional float step_w = 12; - - // Offset to the top left corner of each cell. - optional float offset = 13 [default = 0.5]; -} - -// Message that stores parameters used by PermutetLayer -message PermuteParameter { - // The new orders of the axes of data. Notice it should be with - // in the same range as the input data, and it starts from 0. - // Do not provide repeated order. - repeated uint32 order = 1; -} - -message NormalizeParameter { - optional bool across_spatial = 1 [default = true]; - // Initial value of scale. Default is 1.0 for all - optional FillerParameter scale_filler = 2; - // Whether or not scale parameters are shared across channels. - optional bool channel_shared = 3 [default = true]; - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 4 [default = 1e-10]; -} - -// needed by ssd -message SaveOutputParameter { - // Output directory. If not empty, we will save the results. - optional string output_directory = 1; - // Output name prefix. - optional string output_name_prefix = 2; - // Output format. - // VOC - PASCAL VOC output format. - // COCO - MS COCO output format. - optional string output_format = 3; - // If you want to output results, must also provide the following two files. - // Otherwise, we will ignore saving results. - // label map file. - optional string label_map_file = 4; - // A file which contains a list of names and sizes with same order - // of the input DB. The file is in the following format: - // name height width - // ... - optional string name_size_file = 5; - // Number of test images. It can be less than the lines specified in - // name_size_file. For example, when we only want to evaluate on part - // of the test images. - optional uint32 num_test_image = 6; - // The resize parameter used in saving the data. - // optional ResizeParameter resize_param = 7; -} - -message NonMaximumSuppressionParameter { - // Threshold to be used in nms. - optional float nms_threshold = 1 [default = 0.3]; - // Maximum number of results to be kept. - optional int32 top_k = 2; - // Parameter for adaptive nms. - optional float eta = 3 [default = 1.0]; -} - -message GeneralNmsParameter { - optional int32 post_top_k = 1 ; - optional float nms_threshold = 2 [default = 0]; - optional float iou_threshold_decay = 3 [default = 1.0]; - optional float coor_scale_factor = 4 [default = 1.0]; -} - -// Message that store parameters used by DetectionOutputLayer, ssd/fasterRcnn -message DetectionOutputParameter { - optional int32 num_classes = 1; - optional bool share_location = 2 [default = true]; - optional int32 background_label_id = 3 [default = 0]; - optional NonMaximumSuppressionParameter nms_param = 4; - optional SaveOutputParameter save_output_param = 5; - optional PriorBoxParameter.CodeType code_type = 6 [default = CENTER_SIZE]; - optional bool variance_encoded_in_target = 8 [default = true]; - optional int32 keep_top_k = 7; - optional float confidence_threshold = 9; - optional float nms_threshold = 13; - optional int32 top_k = 14; - optional int32 boxes = 15 [default = 1]; - optional bool relative = 17 [default = true]; - optional float objectness_threshold = 18 [default = 0.5]; - optional float class_threshold = 19 [default = 0.5]; - repeated float biases = 20; - optional GeneralNmsParameter general_nms_param = 21; - optional float objectness_score = 22; -} -message PSROIPoolingParameter { - required float spatial_scale = 1; - required int32 output_dim = 2; // output channel number - required int32 group_size = 3; // number of groups to encode position-sensitive score maps -} -// Message that stores parameters used by FreespaceExtractLayer -message FreespaceExtractParameter { - optional float org_height = 1; -} - -// Message that stores parameters used by DetectpostprocessLayer -message PostprocessParameter { - optional float nms_thresh = 1 [default = 0.3]; - optional float conf_thresh = 2 [default = 0.5]; - optional uint32 post_nms_topn = 3 [default = 100]; - optional uint32 cls_num = 4 [default = 12]; - repeated float bbox_reg_weights = 5; -} - -// Message that stores parameters used by SpatialTransformLayer -message SpatialTransformParameter { - optional uint32 output_h = 1 [default = 0]; - optional uint32 output_w = 2 [default = 0]; - optional float border_value = 3 [default = 0]; - repeated float affine_transform = 4; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; -} -message ROIAlignParameter { - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pooled_h = 1 [default = 0]; // The pooled output height - optional uint32 pooled_w = 2 [default = 0]; // The pooled output width - // Multiplicative spatial scale factor to translate ROI coords from their - // input scale to the scale used when pooling - optional float spatial_scale = 3 [default = 1]; - optional int32 sampling_ratio = 4 [default = -1]; - optional int32 roi_end_mode = 5 [default = 0]; -} - -message RegionParameter { - optional uint32 classes = 1 [default = 20]; // Category of classification - optional uint32 coords = 2 [default = 4]; // Coordinates of box - optional uint32 boxes = 3 [default = 1]; // Number of boxes predicted per grid - optional uint32 softmax = 4 [default = 0]; - optional string softmax_tree = 5 [default = ""]; - optional uint32 background = 6 [default = 0]; -} -message ReorgParameter{ - optional uint32 stride = 2 [default = 2]; - optional bool reverse = 1 [default = false]; -} -message ReverseParameter{ - repeated int32 axis = 1; -} -message InterpParameter{ - optional int32 height = 1 [default = 0];//Height of output - optional int32 width = 2 [default = 0];//Width of output - optional int32 zoom_factor = 3 [default = 1];//zoom factor - optional int32 shrink_factor = 4 [default = 1];//shrink factor - optional int32 pad_beg = 5 [default = 0];//padding at begin of input - optional int32 pad_end = 6 [default = 0];//padding at end of input -} -message ShuffleChannelParameter{ - optional uint32 group = 1[default = 1]; // The number of group -} -message UpsampleParameter{ - optional float scale = 1[default = 1]; - optional int32 stride = 2[default = 2]; - optional int32 stride_h = 3[default = 2]; - optional int32 stride_w = 4[default=2]; -} -message ROIPoolingParameter { - required int32 pooled_h = 1; - required int32 pooled_w = 2; - optional float spatial_scale = 3 [default=0.0625]; - optional float spatial_scale_h = 4; - optional float spatial_scale_w = 5; -} - -message YoloParameter { - optional int32 boxes = 1 [default = 3]; - optional int32 coords = 2 [default = 4]; - optional int32 classes = 3 [default = 80]; - optional string yolo_version = 4 [default = "V3"]; - optional bool softmax = 5 [default = false]; - optional bool background = 6 [default = false]; - optional bool softmaxtree = 7 [default = false]; -} - -message YoloV3DetectionOutputParameter { - optional int32 boxes = 1 [default = 3]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases_high = 9; - repeated float biases_mid = 10; - repeated float biases_low = 11; - optional int32 coords = 12 [default = 4]; - repeated float biases = 13; - optional bool resize_origin_img_to_net = 14 [default = false]; -} - -message YoloV3DetectionOutputV2Parameter { - optional int32 boxes = 1 [default = 3]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases_high = 9; - repeated float biases_mid = 10; - repeated float biases_low = 11; - optional int32 coords = 12 [default = 4]; - repeated float biases = 13; - optional bool resize_origin_img_to_net = 14 [default = false]; - optional int32 out_box_dim = 15 [default = 3]; -} - -message ProposalParameter { - optional float feat_stride = 1 [default = 16]; - optional float base_size = 2 [default = 16]; - optional float min_size = 3 [default = 16]; - repeated float ratio = 4; - repeated float scale = 5; - optional int32 pre_nms_topn = 6 [default = 3000]; - optional int32 post_nms_topn = 7 [default = 304]; - optional float iou_threshold = 8 [default = 0.7]; - optional bool output_actual_rois_num = 9 [default = false]; -} - -message FSRDetectionOutputParameter { - required int32 num_classes = 1; - required float score_threshold = 2; - required float iou_threshold = 3; - optional int32 batch_rois = 4 [default = 1]; -} - -message SSDDetectionOutputParameter { - required int32 num_classes= 1 [default = 2]; - optional bool share_location = 2 [default = true]; - optional int32 background_label_id = 3 [default = 0]; - optional float iou_threshold = 4 [default = 0.3]; - optional int32 top_k = 5 [default = 200]; - optional float eta = 6 [default = 1.0]; - optional bool variance_encoded_in_target = 7 [default = false]; - optional int32 code_type = 8 [default = 1]; - optional int32 keep_top_k = 9 [default = -1]; - optional float confidence_threshold = 10 [default = 0.0]; -} -message YoloV2DetectionOutputParameter { - optional int32 boxes = 1 [default = 5]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases = 9; - optional int32 coords = 10 [default = 4]; - optional bool resize_origin_img_to_net = 11 [default = false]; -} - -message QuantParameter { - optional float scale = 2; - optional bytes offset = 3; -} - -message BatchMatMulParameter{ - optional bool adj_x1 = 1 [default = false]; - optional bool adj_x2 = 2 [default = false]; -} - -message CondTakeParameter { - required string mode = 1; - required float val = 2; - optional float eps = 3 [default = 1e-06]; -} - -message MatrixInverseParameter { - optional bool adjoint = 1 [default = false]; -} - -message WarpPerspectiveParameter { - required int32 out_height = 1; - required int32 out_width = 2; - optional float constant = 3; - optional string border_type = 4 [default = 'BORDER_CONSTANT']; -} - -message SpatialTransformerParameter { - // How to use the parameter passed by localisation network - optional string transform_type = 1 [default = "affine"]; - // What is the sampling technique - optional string sampler_type = 2 [default = "bilinear"]; - - // If not set,stay same with the input dimension H and W - optional int32 output_H = 3; - optional int32 output_W = 4; - // If false, only compute dTheta, DO NOT compute dU - optional bool to_compute_dU = 5 [default = true]; - - // The default value for some parameters - optional double theta_1_1 = 6; - optional double theta_1_2 = 7; - optional double theta_1_3 = 8; - optional double theta_2_1 = 9; - optional double theta_2_2 = 10; - optional double theta_2_3 = 11; -} diff --git a/inc/metadef/inc/register/proto/dump_task.proto b/inc/metadef/inc/register/proto/dump_task.proto deleted file mode 100644 index ee1c6f47f..000000000 --- a/inc/metadef/inc/register/proto/dump_task.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; -package toolkit.dumpdata; - -enum OutputDataType { - DT_UNDEFINED = 0; - DT_FLOAT = 1; - DT_FLOAT16 = 2; - DT_INT8 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_UINT16 = 6; - DT_INT32 = 7; - DT_INT64 = 8; - DT_UINT32 = 9; - DT_UINT64 = 10; - DT_BOOL = 11; - DT_DOUBLE = 12; - DT_STRING = 13; - DT_DUAL_SUB_INT8 = 14; - DT_DUAL_SUB_UINT8 = 15; - DT_COMPLEX64 = 16; - DT_COMPLEX128 = 17; - DT_QINT8 = 18; - DT_QINT16 = 19; - DT_QINT32 = 20; - DT_QUINT8 = 21; - DT_QUINT16 = 22; - DT_RESOURCE = 23; - DT_STRING_REF = 24; - DT_DUAL = 25; - DT_VARIANT = 26; -} - -enum OutputFormat { - FORMAT_NCHW = 0; - FORMAT_NHWC = 1; - FORMAT_ND = 2; - FORMAT_NC1HWC0 = 3; - FORMAT_FRACTAL_Z = 4; - FORMAT_NC1C0HWPAD = 5; - FORMAT_NHWC1C0 = 6; - FORMAT_FSR_NCHW = 7; - FORMAT_FRACTAL_DECONV = 8; - FORMAT_C1HWNC0 = 9; - FORMAT_FRACTAL_DECONV_TRANSPOSE = 10; - FORMAT_FRACTAL_DECONV_SP_STRIDE_TRANS = 11; - FORMAT_NC1HWC0_C04 = 12; - FORMAT_FRACTAL_Z_C04 = 13; - FORMAT_CHWN = 14; - FORMAT_FRACTAL_DECONV_SP_STRIDE8_TRANS = 15; - FORMAT_HWCN = 16; - FORMAT_NC1KHKWHWC0 = 17; - FORMAT_BN_WEIGHT = 18; - FORMAT_FILTER_HWCK = 19; - FORMAT_HASHTABLE_LOOKUP_LOOKUPS=20; - FORMAT_HASHTABLE_LOOKUP_KEYS = 21; - FORMAT_HASHTABLE_LOOKUP_VALUE = 22; - FORMAT_HASHTABLE_LOOKUP_OUTPUT = 23; - FORMAT_HASHTABLE_LOOKUP_HITS=24; - FORMAT_C1HWNCoC0 = 25; - FORMAT_MD = 26; - FORMAT_NDHWC = 27; - FORMAT_FRACTAL_ZZ = 28; - FORMAT_FRACTAL_NZ = 29; - FORMAT_RESERVED = 30; -} - -message OriginalOp { - string name = 1; - uint32 output_index = 2; - OutputDataType data_type = 3; - OutputFormat format = 4; -} - -message Shape { - repeated uint64 dim = 1; -} - -message OpOutput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - OriginalOp original_op = 4; // the original op corresponding to the output - bytes data = 5; - uint64 size = 6; -} - -message OpInput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - bytes data = 4; - uint64 size = 5; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - bytes data = 2; - uint64 size = 3; -} - -message DumpData{ - string version = 1; - uint64 dump_time = 2; - repeated OpOutput output = 3; - repeated OpInput input = 4; - repeated OpBuffer buffer = 5; - string op_name = 6; -} diff --git a/inc/metadef/inc/register/proto/fusion_model.proto b/inc/metadef/inc/register/proto/fusion_model.proto deleted file mode 100644 index c92c5581a..000000000 --- a/inc/metadef/inc/register/proto/fusion_model.proto +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -import "om.proto"; - -package domi; - -message FusionModelDef { - string version = 1; - repeated OpDef fusion_op = 2; -} \ No newline at end of file diff --git a/inc/metadef/inc/register/proto/fwk_adapter.proto b/inc/metadef/inc/register/proto/fwk_adapter.proto deleted file mode 100644 index 9335c9263..000000000 --- a/inc/metadef/inc/register/proto/fwk_adapter.proto +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package aicpu.FWKAdapter; -option cc_enable_arenas = true; - - -// Defines an struct for input and output. -message TensorDataInfo { - - // value DataType - uint32 dtype = 1; - - // shape dim - repeated int64 dim = 2; - - // data point addr - int64 data_addr = 3; -} - -message KernelRunParam { - // input - repeated TensorDataInfo input = 1; - // output - repeated TensorDataInfo output = 2; -} - diff --git a/inc/metadef/inc/register/proto/ge_ir.proto b/inc/metadef/inc/register/proto/ge_ir.proto deleted file mode 100644 index 12989a548..000000000 --- a/inc/metadef/inc/register/proto/ge_ir.proto +++ /dev/null @@ -1,191 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/inc/metadef/inc/register/proto/insert_op.proto b/inc/metadef/inc/register/proto/insert_op.proto deleted file mode 100644 index bf918b20a..000000000 --- a/inc/metadef/inc/register/proto/insert_op.proto +++ /dev/null @@ -1,139 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/inc/metadef/inc/register/proto/om.proto b/inc/metadef/inc/register/proto/om.proto deleted file mode 100644 index e15e5f808..000000000 --- a/inc/metadef/inc/register/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/inc/metadef/inc/register/proto/onnx/ge_onnx.proto b/inc/metadef/inc/register/proto/onnx/ge_onnx.proto deleted file mode 100644 index 4cd77f3ae..000000000 --- a/inc/metadef/inc/register/proto/onnx/ge_onnx.proto +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright (c) ONNX Project Contributors. -// Licensed under the MIT license. - -syntax = "proto3"; - -package ge.onnx; - -// Overview -// -// ONNX is an open specification that is comprised of the following components: -// -// 1) A definition of an extensible computation graph model. -// 2) Definitions of standard data types. -// 3) Definitions of built-in operators. -// -// This document describes the syntax of models and their computation graphs, -// as well as the standard data types. Together, they are referred to as the ONNX -// Intermediate Representation, or 'IR' for short. -// -// The normative semantic specification of the ONNX IR is found in docs/IR.md. -// Definitions of the built-in neural network operators may be found in docs/Operators.md. - -// Notes -// -// Release -// -// We are still in the very early stage of defining ONNX. The current -// version of ONNX is a starting point. While we are actively working -// towards a complete spec, we would like to get the community involved -// by sharing our working version of ONNX. -// -// Protobuf compatibility -// -// To simplify framework compatibility, ONNX is defined using the subset of protobuf -// that is compatible with both protobuf v2 and v3. This means that we do not use any -// protobuf features that are only available in one of the two versions. -// -// Here are the most notable contortions we have to carry out to work around -// these limitations: -// -// - No 'map' (added protobuf 3.0). We instead represent mappings as lists -// of key-value pairs, where order does not matter and duplicates -// are not allowed. - - -// Versioning -// -// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md -// -// To be compatible with both proto2 and proto3, we will use a version number -// that is not defined by the default value but an explicit enum number. -enum Version { - // proto3 requires the first enum value to be zero. - // We add this just to appease the compiler. - _START_VERSION = 0; - // The version field is always serialized and we will use it to store the - // version that the graph is generated from. This helps us set up version - // control. - // For the IR, we are using simple numbers starting with with 0x00000001, - // which was the version we published on Oct 10, 2017. - IR_VERSION_2017_10_10 = 0x0000000000000001; - - // IR_VERSION 2 published on Oct 30, 2017 - // - Added type discriminator to AttributeProto to support proto3 users - IR_VERSION_2017_10_30 = 0x0000000000000002; - - // IR VERSION 3 published on Nov 3, 2017 - // - For operator versioning: - // - Added new message OperatorSetIdProto - // - Added opset_import in ModelProto - // - For vendor extensions, added domain in NodeProto - IR_VERSION_2017_11_3 = 0x0000000000000003; - - // IR VERSION 4 published on Jan 22, 2019 - // - Relax constraint that initializers should be a subset of graph inputs - // - Add type BFLOAT16 - IR_VERSION_2019_1_22 = 0x0000000000000004; - - // IR VERSION 5 published on March 18, 2019 - // - Add message TensorAnnotation. - // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. - IR_VERSION_2019_3_18 = 0x0000000000000005; - - // IR VERSION 6 published on Sep 19, 2019 - // - Add support for sparse tensor constants stored in model. - // - Add message SparseTensorProto - // - Add sparse initializers - IR_VERSION = 0x0000000000000006; -} - -// Attributes -// -// A named attribute containing either singular float, integer, string, graph, -// and tensor values, or repeated float, integer, string, graph, and tensor values. -// An AttributeProto MUST contain the name field, and *only one* of the -// following content fields, effectively enforcing a C/C++ union equivalent. -message AttributeProto { - - // Note: this enum is structurally identical to the OpSchema::AttrType - // enum defined in schema.h. If you rev one, you likely need to rev the other. - enum AttributeType { - UNDEFINED = 0; - FLOAT = 1; - INT = 2; - STRING = 3; - TENSOR = 4; - GRAPH = 5; - SPARSE_TENSOR = 11; - - FLOATS = 6; - INTS = 7; - STRINGS = 8; - TENSORS = 9; - GRAPHS = 10; - SPARSE_TENSORS = 12; - } - - // The name field MUST be present for this version of the IR. - string name = 1; // namespace Attribute - - // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. - // In this case, this AttributeProto does not contain data, and it's a reference of attribute - // in parent scope. - // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. - string ref_attr_name = 21; - - // A human-readable documentation for this attribute. Markdown is allowed. - string doc_string = 13; - - // The type field MUST be present for this version of the IR. - // For 0.0.1 versions of the IR, this field was not defined, and - // implementations needed to use has_field hueristics to determine - // which value field was in use. For IR_VERSION 0.0.2 or later, this - // field MUST be set and match the f|i|s|t|... field in use. This - // change was made to accomodate proto3 implementations. - AttributeType type = 20; // discriminator that indicates which field below is in use - - // Exactly ONE of the following fields must be present for this version of the IR - float f = 2; // float - int64 i = 3; // int - bytes s = 4; // UTF-8 string - TensorProto t = 5; // tensor value - GraphProto g = 6; // graph - SparseTensorProto sparse_tensor = 22; // sparse tensor value - // Do not use field below, it's deprecated. - // optional ValueProto v = 12; // value - subsumes everything but graph - - repeated float floats = 7; // list of floats - repeated int64 ints = 8; // list of ints - repeated bytes strings = 9; // list of UTF-8 strings - repeated TensorProto tensors = 10; // list of tensors - repeated GraphProto graphs = 11; // list of graph - repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors -} - -// Defines information on value, including the name, the type, and -// the shape of the value. -message ValueInfoProto { - // This field MUST be present in this version of the IR. - string name = 1; // namespace Value - // This field MUST be present in this version of the IR for - // inputs and outputs of the top-level graph. - TypeProto type = 2; - // A human-readable documentation for this value. Markdown is allowed. - string doc_string = 3; -} - -// Nodes -// -// Computation graphs are made up of a DAG of nodes, which represent what is -// commonly called a "layer" or "pipeline stage" in machine learning frameworks. -// -// For example, it can be a node of type "Conv" that takes in an image, a filter -// tensor and a bias tensor, and produces the convolved output. -message NodeProto { - repeated string input = 1; // namespace Value - repeated string output = 2; // namespace Value - - // An optional identifier for this node in a graph. - // This field MAY be absent in ths version of the IR. - string name = 3; // namespace Node - - // The symbolic identifier of the Operator to execute. - string op_type = 4; // namespace Operator - // The domain of the OperatorSet that specifies the operator named by op_type. - string domain = 7; // namespace Domain - - // Additional named attributes. - repeated AttributeProto attribute = 5; - - // A human-readable documentation for this node. Markdown is allowed. - string doc_string = 6; -} - -// Models -// -// ModelProto is a top-level file/container format for bundling a ML model and -// associating its computation graph with metadata. -// -// The semantics of the model are described by the associated GraphProto. -message ModelProto { - // The version of the IR this model targets. See Version enum above. - // This field MUST be present. - int64 ir_version = 1; - - // The OperatorSets this model relies on. - // All ModelProtos MUST have at least one entry that - // specifies which version of the ONNX OperatorSet is - // being imported. - // - // All nodes in the ModelProto's graph will bind against the operator - // with the same-domain/same-op_type operator with the HIGHEST version - // in the referenced operator sets. - repeated OperatorSetIdProto opset_import = 8; - - // The name of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_name = 2; - - // The version of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_version = 3; - - // Domain name of the model. - // We use reverse domain names as name space indicators. For example: - // `com.facebook.fair` or `com.microsoft.cognitiveservices` - // - // Together with `model_version` and GraphProto.name, this forms the unique identity of - // the graph. - string domain = 4; - - // The version of the graph encoded. See Version enum below. - int64 model_version = 5; - - // A human-readable documentation for this model. Markdown is allowed. - string doc_string = 6; - - // The parameterized graph that is evaluated to execute the model. - GraphProto graph = 7; - - // Named metadata values; keys should be distinct. - repeated StringStringEntryProto metadata_props = 14; -}; - -// StringStringEntryProto follows the pattern for cross-proto-version maps. -// See https://developers.google.com/protocol-buffers/docs/proto3#maps -message StringStringEntryProto { - string key = 1; - string value= 2; -}; - -message TensorAnnotation { - string tensor_name = 1; - // pairs to annotate tensor specified by above. - // The keys used in the mapping below must be pre-defined in ONNX spec. - // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as - // quantization parameter keys. - repeated StringStringEntryProto quant_parameter_tensor_names = 2; -} - - - -// Graphs -// -// A graph defines the computational logic of a model and is comprised of a parameterized -// list of nodes that form a directed acyclic graph based on their inputs and outputs. -// This is the equivalent of the "network" or "graph" in many deep learning -// frameworks. -message GraphProto { - // The nodes in the graph, sorted topologically. - repeated NodeProto node = 1; - - // The name of the graph. - string name = 2; // namespace Graph - - // A list of named tensor values, used to specify constant inputs of the graph. - // Each TensorProto entry must have a distinct name (within the list) that - // MAY also appear in the input list. - repeated TensorProto initializer = 5; - - // Initializers (see above) stored in sparse format. - repeated SparseTensorProto sparse_initializer = 15; - - // A human-readable documentation for this graph. Markdown is allowed. - string doc_string = 10; - - // The inputs and outputs of the graph. - repeated ValueInfoProto input = 11; - repeated ValueInfoProto output = 12; - - // Information for the values in the graph. The ValueInfoProto.name's - // must be distinct. It is optional for a value to appear in value_info list. - repeated ValueInfoProto value_info = 13; - - // This field carries information to indicate the mapping among a tensor and its - // quantization parameter tensors. For example: - // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, - // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. - repeated TensorAnnotation quantization_annotation = 14; - - // DO NOT USE the following fields, they were deprecated from earlier versions. - // repeated string input = 3; - // repeated string output = 4; - // optional int64 ir_version = 6; - // optional int64 producer_version = 7; - // optional string producer_tag = 8; - // optional string domain = 9; -} - -// Tensors -// -// A serialized tensor value. -message TensorProto { - enum DataType { - UNDEFINED = 0; - // Basic types. - FLOAT = 1; // float - UINT8 = 2; // uint8_t - INT8 = 3; // int8_t - UINT16 = 4; // uint16_t - INT16 = 5; // int16_t - INT32 = 6; // int32_t - INT64 = 7; // int64_t - STRING = 8; // string - BOOL = 9; // bool - - // IEEE754 half-precision floating-point format (16 bits wide). - // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. - FLOAT16 = 10; - - DOUBLE = 11; - UINT32 = 12; - UINT64 = 13; - COMPLEX64 = 14; // complex with float32 real and imaginary components - COMPLEX128 = 15; // complex with float64 real and imaginary components - - // Non-IEEE floating-point format based on IEEE754 single-precision - // floating-point number truncated to 16 bits. - // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. - BFLOAT16 = 16; - - // Future extensions go here. - } - - // The shape of the tensor. - repeated int64 dims = 1; - - // The data type of the tensor. - // This field MUST have a valid TensorProto.DataType value - int32 data_type = 2; - - // For very large tensors, we may want to store them in chunks, in which - // case the following fields will specify the segment that is stored in - // the current TensorProto. - message Segment { - int64 begin = 1; - int64 end = 2; - } - Segment segment = 3; - - // Tensor content must be organized in row-major order. - // - // Depending on the data_type field, exactly one of the fields below with - // name ending in _data is used to store the elements of the tensor. - - // For float and complex64 values - // Complex64 tensors are encoded as a single array of floats, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. - repeated float float_data = 4 [packed = true]; - - // For int32, uint8, int8, uint16, int16, bool, and float16 values - // float16 values must be bit-wise converted to an uint16_t prior - // to writing to the buffer. - // When this field is present, the data_type field MUST be - // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16 - repeated int32 int32_data = 5 [packed = true]; - - // For strings. - // Each element of string_data is a UTF-8 encoded Unicode - // string. No trailing null, no leading BOM. The protobuf "string" - // scalar type is not used to match ML community conventions. - // When this field is present, the data_type field MUST be STRING - repeated bytes string_data = 6; - - // For int64. - // When this field is present, the data_type field MUST be INT64 - repeated int64 int64_data = 7 [packed = true]; - - // Optionally, a name for the tensor. - string name = 8; // namespace Value - - // A human-readable documentation for this tensor. Markdown is allowed. - string doc_string = 12; - - // Serializations can either use one of the fields above, or use this - // raw bytes field. The only exception is the string case, where one is - // required to store the content in the repeated bytes string_data field. - // - // When this raw_data field is used to store tensor value, elements MUST - // be stored in as fixed-width, little-endian order. - // Floating-point data types MUST be stored in IEEE 754 format. - // Complex64 elements must be written as two consecutive FLOAT values, real component first. - // Complex128 elements must be written as two consecutive DOUBLE values, real component first. - // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). - // - // Note: the advantage of specific field rather than the raw_data field is - // that in some cases (e.g. int data), protobuf does a better packing via - // variable length storage, and may lead to smaller binary footprint. - // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED - bytes raw_data = 9; - - // Data can be stored inside the protobuf file using type-specific fields or raw_data. - // Alternatively, raw bytes data can be stored in an external file, using the external_data field. - // external_data stores key-value pairs describing data location. Recognized keys are: - // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX - // protobuf model was stored - // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. - // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. - // - "length" (optional) - number of bytes containing data. Integer stored as string. - // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. - repeated StringStringEntryProto external_data = 13; - - // Location of the data for this tensor. MUST be one of: - // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. - // - EXTERNAL - data stored in an external location as described by external_data field. - enum DataLocation { - DEFAULT = 0; - EXTERNAL = 1; - } - - // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. - DataLocation data_location = 14; - - // For double - // Complex128 tensors are encoded as a single array of doubles, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 - repeated double double_data = 10 [packed = true]; - - // For uint64 and uint32 values - // When this field is present, the data_type field MUST be - // UINT32 or UINT64 - repeated uint64 uint64_data = 11 [packed = true]; -} - -// A serialized sparse-tensor value -message SparseTensorProto { - // The sequence of non-default values are encoded as a tensor of shape [NNZ]. - // The default-value is zero for numeric tensors, and empty-string for string tensors. - TensorProto values = 1; - - // The indices of the non-default values, which may be stored in one of two formats. - // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value - // corresponding to the j-th index of the i-th value (in the values tensor). - // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value - // must be the linearized-index of the i-th value (in the values tensor). - // The linearized-index can be converted into an index tuple (k_1,...,k_rank) - // using the shape provided below. - // The indices must appear in ascending order without duplication. - // In the first format, the ordering is lexicographic-ordering: - // e.g., index-value [1,4] must appear before [2,1] - TensorProto indices = 2; - - // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] - repeated int64 dims = 3; -} - -// Defines a tensor shape. A dimension can be either an integer value -// or a symbolic variable. A symbolic variable represents an unknown -// dimension. -message TensorShapeProto { - message Dimension { - oneof value { - int64 dim_value = 1; - string dim_param = 2; // namespace Shape - }; - // Standard denotation can optionally be used to denote tensor - // dimensions with standard semantic descriptions to ensure - // that operations are applied to the correct axis of a tensor. - // Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition - // for pre-defined dimension denotations. - string denotation = 3; - }; - repeated Dimension dim = 1; -} - -// Types -// -// The standard ONNX data types. -message TypeProto { - - message Tensor { - // This field MUST NOT have the value of UNDEFINED - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - int32 elem_type = 1; - TensorShapeProto shape = 2; - } - - // repeated T - message Sequence { - // The type and optional shape of each element of the sequence. - // This field MUST be present for this version of the IR. - TypeProto elem_type = 1; - }; - - // map - message Map { - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING - int32 key_type = 1; - // This field MUST be present for this version of the IR. - TypeProto value_type = 2; - }; - - oneof value { - // The type of a tensor. - Tensor tensor_type = 1; - - // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values - // as input and output to graphs and nodes. These types are needed to naturally - // support classical ML operators. DNN operators SHOULD restrict their input - // and output types to tensors. - - // The type of a sequence. - Sequence sequence_type = 4; - - // The type of a map. - Map map_type = 5; - - } - - // An optional denotation can be used to denote the whole - // type with a standard semantic description as to what is - // stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition - // for pre-defined type denotations. - string denotation = 6; -} - -// Operator Sets -// -// OperatorSets are uniquely identified by a (domain, opset_version) pair. -message OperatorSetIdProto { - // The domain of the operator set being identified. - // The empty string ("") or absence of this field implies the operator - // set that is defined as part of the ONNX specification. - // This field MUST be present in this version of the IR when referring to any other operator set. - string domain = 1; - - // The version of the operator set being identified. - // This field MUST be present in this version of the IR. - int64 version = 2; -} diff --git a/inc/metadef/inc/register/proto/op_mapping_info.proto b/inc/metadef/inc/register/proto/op_mapping_info.proto deleted file mode 100644 index 7fb6f84b1..000000000 --- a/inc/metadef/inc/register/proto/op_mapping_info.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package aicpu.dump; - -message Shape { - repeated uint64 dim = 1; -} - -message Output { - int32 data_type = 1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - string original_name = 5; - int32 original_output_index = 6; - int32 original_output_data_type = 7; - int32 original_output_format = 8; - uint64 size = 9; - Shape origin_shape = 10; -} - -message Input { - int32 data_type =1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - uint64 size = 5; - Shape origin_shape = 6; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - uint64 address = 2; - uint64 size = 3; -} - -message Op { - string op_name = 1; - string op_type = 2; -} - -message Task { - uint32 task_id = 1; - uint32 stream_id = 2; - Op op = 3; - repeated Output output = 4; - bool end_graph = 5; - repeated Input input = 6; - repeated OpBuffer buffer = 7; -} - -message OpMappingInfo { - string dump_path = 1; - oneof model_name_param { - string model_name = 2; - } - oneof model_id_param { - uint32 model_id = 3; - } - oneof step_id { - uint64 step_id_addr = 4; - } - oneof iterations_per_loop { - uint64 iterations_per_loop_addr = 5; - } - oneof loop_cond { - uint64 loop_cond_addr = 6; - } - uint32 flag = 7; // 0x01 load, 0x00 unload - repeated Task task = 8; - string dump_step = 9; -} \ No newline at end of file diff --git a/inc/metadef/inc/register/proto/proto_inner/ge_onnx.proto b/inc/metadef/inc/register/proto/proto_inner/ge_onnx.proto deleted file mode 100644 index 4cd77f3ae..000000000 --- a/inc/metadef/inc/register/proto/proto_inner/ge_onnx.proto +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright (c) ONNX Project Contributors. -// Licensed under the MIT license. - -syntax = "proto3"; - -package ge.onnx; - -// Overview -// -// ONNX is an open specification that is comprised of the following components: -// -// 1) A definition of an extensible computation graph model. -// 2) Definitions of standard data types. -// 3) Definitions of built-in operators. -// -// This document describes the syntax of models and their computation graphs, -// as well as the standard data types. Together, they are referred to as the ONNX -// Intermediate Representation, or 'IR' for short. -// -// The normative semantic specification of the ONNX IR is found in docs/IR.md. -// Definitions of the built-in neural network operators may be found in docs/Operators.md. - -// Notes -// -// Release -// -// We are still in the very early stage of defining ONNX. The current -// version of ONNX is a starting point. While we are actively working -// towards a complete spec, we would like to get the community involved -// by sharing our working version of ONNX. -// -// Protobuf compatibility -// -// To simplify framework compatibility, ONNX is defined using the subset of protobuf -// that is compatible with both protobuf v2 and v3. This means that we do not use any -// protobuf features that are only available in one of the two versions. -// -// Here are the most notable contortions we have to carry out to work around -// these limitations: -// -// - No 'map' (added protobuf 3.0). We instead represent mappings as lists -// of key-value pairs, where order does not matter and duplicates -// are not allowed. - - -// Versioning -// -// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md -// -// To be compatible with both proto2 and proto3, we will use a version number -// that is not defined by the default value but an explicit enum number. -enum Version { - // proto3 requires the first enum value to be zero. - // We add this just to appease the compiler. - _START_VERSION = 0; - // The version field is always serialized and we will use it to store the - // version that the graph is generated from. This helps us set up version - // control. - // For the IR, we are using simple numbers starting with with 0x00000001, - // which was the version we published on Oct 10, 2017. - IR_VERSION_2017_10_10 = 0x0000000000000001; - - // IR_VERSION 2 published on Oct 30, 2017 - // - Added type discriminator to AttributeProto to support proto3 users - IR_VERSION_2017_10_30 = 0x0000000000000002; - - // IR VERSION 3 published on Nov 3, 2017 - // - For operator versioning: - // - Added new message OperatorSetIdProto - // - Added opset_import in ModelProto - // - For vendor extensions, added domain in NodeProto - IR_VERSION_2017_11_3 = 0x0000000000000003; - - // IR VERSION 4 published on Jan 22, 2019 - // - Relax constraint that initializers should be a subset of graph inputs - // - Add type BFLOAT16 - IR_VERSION_2019_1_22 = 0x0000000000000004; - - // IR VERSION 5 published on March 18, 2019 - // - Add message TensorAnnotation. - // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. - IR_VERSION_2019_3_18 = 0x0000000000000005; - - // IR VERSION 6 published on Sep 19, 2019 - // - Add support for sparse tensor constants stored in model. - // - Add message SparseTensorProto - // - Add sparse initializers - IR_VERSION = 0x0000000000000006; -} - -// Attributes -// -// A named attribute containing either singular float, integer, string, graph, -// and tensor values, or repeated float, integer, string, graph, and tensor values. -// An AttributeProto MUST contain the name field, and *only one* of the -// following content fields, effectively enforcing a C/C++ union equivalent. -message AttributeProto { - - // Note: this enum is structurally identical to the OpSchema::AttrType - // enum defined in schema.h. If you rev one, you likely need to rev the other. - enum AttributeType { - UNDEFINED = 0; - FLOAT = 1; - INT = 2; - STRING = 3; - TENSOR = 4; - GRAPH = 5; - SPARSE_TENSOR = 11; - - FLOATS = 6; - INTS = 7; - STRINGS = 8; - TENSORS = 9; - GRAPHS = 10; - SPARSE_TENSORS = 12; - } - - // The name field MUST be present for this version of the IR. - string name = 1; // namespace Attribute - - // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. - // In this case, this AttributeProto does not contain data, and it's a reference of attribute - // in parent scope. - // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. - string ref_attr_name = 21; - - // A human-readable documentation for this attribute. Markdown is allowed. - string doc_string = 13; - - // The type field MUST be present for this version of the IR. - // For 0.0.1 versions of the IR, this field was not defined, and - // implementations needed to use has_field hueristics to determine - // which value field was in use. For IR_VERSION 0.0.2 or later, this - // field MUST be set and match the f|i|s|t|... field in use. This - // change was made to accomodate proto3 implementations. - AttributeType type = 20; // discriminator that indicates which field below is in use - - // Exactly ONE of the following fields must be present for this version of the IR - float f = 2; // float - int64 i = 3; // int - bytes s = 4; // UTF-8 string - TensorProto t = 5; // tensor value - GraphProto g = 6; // graph - SparseTensorProto sparse_tensor = 22; // sparse tensor value - // Do not use field below, it's deprecated. - // optional ValueProto v = 12; // value - subsumes everything but graph - - repeated float floats = 7; // list of floats - repeated int64 ints = 8; // list of ints - repeated bytes strings = 9; // list of UTF-8 strings - repeated TensorProto tensors = 10; // list of tensors - repeated GraphProto graphs = 11; // list of graph - repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors -} - -// Defines information on value, including the name, the type, and -// the shape of the value. -message ValueInfoProto { - // This field MUST be present in this version of the IR. - string name = 1; // namespace Value - // This field MUST be present in this version of the IR for - // inputs and outputs of the top-level graph. - TypeProto type = 2; - // A human-readable documentation for this value. Markdown is allowed. - string doc_string = 3; -} - -// Nodes -// -// Computation graphs are made up of a DAG of nodes, which represent what is -// commonly called a "layer" or "pipeline stage" in machine learning frameworks. -// -// For example, it can be a node of type "Conv" that takes in an image, a filter -// tensor and a bias tensor, and produces the convolved output. -message NodeProto { - repeated string input = 1; // namespace Value - repeated string output = 2; // namespace Value - - // An optional identifier for this node in a graph. - // This field MAY be absent in ths version of the IR. - string name = 3; // namespace Node - - // The symbolic identifier of the Operator to execute. - string op_type = 4; // namespace Operator - // The domain of the OperatorSet that specifies the operator named by op_type. - string domain = 7; // namespace Domain - - // Additional named attributes. - repeated AttributeProto attribute = 5; - - // A human-readable documentation for this node. Markdown is allowed. - string doc_string = 6; -} - -// Models -// -// ModelProto is a top-level file/container format for bundling a ML model and -// associating its computation graph with metadata. -// -// The semantics of the model are described by the associated GraphProto. -message ModelProto { - // The version of the IR this model targets. See Version enum above. - // This field MUST be present. - int64 ir_version = 1; - - // The OperatorSets this model relies on. - // All ModelProtos MUST have at least one entry that - // specifies which version of the ONNX OperatorSet is - // being imported. - // - // All nodes in the ModelProto's graph will bind against the operator - // with the same-domain/same-op_type operator with the HIGHEST version - // in the referenced operator sets. - repeated OperatorSetIdProto opset_import = 8; - - // The name of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_name = 2; - - // The version of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_version = 3; - - // Domain name of the model. - // We use reverse domain names as name space indicators. For example: - // `com.facebook.fair` or `com.microsoft.cognitiveservices` - // - // Together with `model_version` and GraphProto.name, this forms the unique identity of - // the graph. - string domain = 4; - - // The version of the graph encoded. See Version enum below. - int64 model_version = 5; - - // A human-readable documentation for this model. Markdown is allowed. - string doc_string = 6; - - // The parameterized graph that is evaluated to execute the model. - GraphProto graph = 7; - - // Named metadata values; keys should be distinct. - repeated StringStringEntryProto metadata_props = 14; -}; - -// StringStringEntryProto follows the pattern for cross-proto-version maps. -// See https://developers.google.com/protocol-buffers/docs/proto3#maps -message StringStringEntryProto { - string key = 1; - string value= 2; -}; - -message TensorAnnotation { - string tensor_name = 1; - // pairs to annotate tensor specified by above. - // The keys used in the mapping below must be pre-defined in ONNX spec. - // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as - // quantization parameter keys. - repeated StringStringEntryProto quant_parameter_tensor_names = 2; -} - - - -// Graphs -// -// A graph defines the computational logic of a model and is comprised of a parameterized -// list of nodes that form a directed acyclic graph based on their inputs and outputs. -// This is the equivalent of the "network" or "graph" in many deep learning -// frameworks. -message GraphProto { - // The nodes in the graph, sorted topologically. - repeated NodeProto node = 1; - - // The name of the graph. - string name = 2; // namespace Graph - - // A list of named tensor values, used to specify constant inputs of the graph. - // Each TensorProto entry must have a distinct name (within the list) that - // MAY also appear in the input list. - repeated TensorProto initializer = 5; - - // Initializers (see above) stored in sparse format. - repeated SparseTensorProto sparse_initializer = 15; - - // A human-readable documentation for this graph. Markdown is allowed. - string doc_string = 10; - - // The inputs and outputs of the graph. - repeated ValueInfoProto input = 11; - repeated ValueInfoProto output = 12; - - // Information for the values in the graph. The ValueInfoProto.name's - // must be distinct. It is optional for a value to appear in value_info list. - repeated ValueInfoProto value_info = 13; - - // This field carries information to indicate the mapping among a tensor and its - // quantization parameter tensors. For example: - // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, - // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. - repeated TensorAnnotation quantization_annotation = 14; - - // DO NOT USE the following fields, they were deprecated from earlier versions. - // repeated string input = 3; - // repeated string output = 4; - // optional int64 ir_version = 6; - // optional int64 producer_version = 7; - // optional string producer_tag = 8; - // optional string domain = 9; -} - -// Tensors -// -// A serialized tensor value. -message TensorProto { - enum DataType { - UNDEFINED = 0; - // Basic types. - FLOAT = 1; // float - UINT8 = 2; // uint8_t - INT8 = 3; // int8_t - UINT16 = 4; // uint16_t - INT16 = 5; // int16_t - INT32 = 6; // int32_t - INT64 = 7; // int64_t - STRING = 8; // string - BOOL = 9; // bool - - // IEEE754 half-precision floating-point format (16 bits wide). - // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. - FLOAT16 = 10; - - DOUBLE = 11; - UINT32 = 12; - UINT64 = 13; - COMPLEX64 = 14; // complex with float32 real and imaginary components - COMPLEX128 = 15; // complex with float64 real and imaginary components - - // Non-IEEE floating-point format based on IEEE754 single-precision - // floating-point number truncated to 16 bits. - // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. - BFLOAT16 = 16; - - // Future extensions go here. - } - - // The shape of the tensor. - repeated int64 dims = 1; - - // The data type of the tensor. - // This field MUST have a valid TensorProto.DataType value - int32 data_type = 2; - - // For very large tensors, we may want to store them in chunks, in which - // case the following fields will specify the segment that is stored in - // the current TensorProto. - message Segment { - int64 begin = 1; - int64 end = 2; - } - Segment segment = 3; - - // Tensor content must be organized in row-major order. - // - // Depending on the data_type field, exactly one of the fields below with - // name ending in _data is used to store the elements of the tensor. - - // For float and complex64 values - // Complex64 tensors are encoded as a single array of floats, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. - repeated float float_data = 4 [packed = true]; - - // For int32, uint8, int8, uint16, int16, bool, and float16 values - // float16 values must be bit-wise converted to an uint16_t prior - // to writing to the buffer. - // When this field is present, the data_type field MUST be - // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16 - repeated int32 int32_data = 5 [packed = true]; - - // For strings. - // Each element of string_data is a UTF-8 encoded Unicode - // string. No trailing null, no leading BOM. The protobuf "string" - // scalar type is not used to match ML community conventions. - // When this field is present, the data_type field MUST be STRING - repeated bytes string_data = 6; - - // For int64. - // When this field is present, the data_type field MUST be INT64 - repeated int64 int64_data = 7 [packed = true]; - - // Optionally, a name for the tensor. - string name = 8; // namespace Value - - // A human-readable documentation for this tensor. Markdown is allowed. - string doc_string = 12; - - // Serializations can either use one of the fields above, or use this - // raw bytes field. The only exception is the string case, where one is - // required to store the content in the repeated bytes string_data field. - // - // When this raw_data field is used to store tensor value, elements MUST - // be stored in as fixed-width, little-endian order. - // Floating-point data types MUST be stored in IEEE 754 format. - // Complex64 elements must be written as two consecutive FLOAT values, real component first. - // Complex128 elements must be written as two consecutive DOUBLE values, real component first. - // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). - // - // Note: the advantage of specific field rather than the raw_data field is - // that in some cases (e.g. int data), protobuf does a better packing via - // variable length storage, and may lead to smaller binary footprint. - // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED - bytes raw_data = 9; - - // Data can be stored inside the protobuf file using type-specific fields or raw_data. - // Alternatively, raw bytes data can be stored in an external file, using the external_data field. - // external_data stores key-value pairs describing data location. Recognized keys are: - // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX - // protobuf model was stored - // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. - // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. - // - "length" (optional) - number of bytes containing data. Integer stored as string. - // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. - repeated StringStringEntryProto external_data = 13; - - // Location of the data for this tensor. MUST be one of: - // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. - // - EXTERNAL - data stored in an external location as described by external_data field. - enum DataLocation { - DEFAULT = 0; - EXTERNAL = 1; - } - - // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. - DataLocation data_location = 14; - - // For double - // Complex128 tensors are encoded as a single array of doubles, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 - repeated double double_data = 10 [packed = true]; - - // For uint64 and uint32 values - // When this field is present, the data_type field MUST be - // UINT32 or UINT64 - repeated uint64 uint64_data = 11 [packed = true]; -} - -// A serialized sparse-tensor value -message SparseTensorProto { - // The sequence of non-default values are encoded as a tensor of shape [NNZ]. - // The default-value is zero for numeric tensors, and empty-string for string tensors. - TensorProto values = 1; - - // The indices of the non-default values, which may be stored in one of two formats. - // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value - // corresponding to the j-th index of the i-th value (in the values tensor). - // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value - // must be the linearized-index of the i-th value (in the values tensor). - // The linearized-index can be converted into an index tuple (k_1,...,k_rank) - // using the shape provided below. - // The indices must appear in ascending order without duplication. - // In the first format, the ordering is lexicographic-ordering: - // e.g., index-value [1,4] must appear before [2,1] - TensorProto indices = 2; - - // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] - repeated int64 dims = 3; -} - -// Defines a tensor shape. A dimension can be either an integer value -// or a symbolic variable. A symbolic variable represents an unknown -// dimension. -message TensorShapeProto { - message Dimension { - oneof value { - int64 dim_value = 1; - string dim_param = 2; // namespace Shape - }; - // Standard denotation can optionally be used to denote tensor - // dimensions with standard semantic descriptions to ensure - // that operations are applied to the correct axis of a tensor. - // Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition - // for pre-defined dimension denotations. - string denotation = 3; - }; - repeated Dimension dim = 1; -} - -// Types -// -// The standard ONNX data types. -message TypeProto { - - message Tensor { - // This field MUST NOT have the value of UNDEFINED - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - int32 elem_type = 1; - TensorShapeProto shape = 2; - } - - // repeated T - message Sequence { - // The type and optional shape of each element of the sequence. - // This field MUST be present for this version of the IR. - TypeProto elem_type = 1; - }; - - // map - message Map { - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING - int32 key_type = 1; - // This field MUST be present for this version of the IR. - TypeProto value_type = 2; - }; - - oneof value { - // The type of a tensor. - Tensor tensor_type = 1; - - // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values - // as input and output to graphs and nodes. These types are needed to naturally - // support classical ML operators. DNN operators SHOULD restrict their input - // and output types to tensors. - - // The type of a sequence. - Sequence sequence_type = 4; - - // The type of a map. - Map map_type = 5; - - } - - // An optional denotation can be used to denote the whole - // type with a standard semantic description as to what is - // stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition - // for pre-defined type denotations. - string denotation = 6; -} - -// Operator Sets -// -// OperatorSets are uniquely identified by a (domain, opset_version) pair. -message OperatorSetIdProto { - // The domain of the operator set being identified. - // The empty string ("") or absence of this field implies the operator - // set that is defined as part of the ONNX specification. - // This field MUST be present in this version of the IR when referring to any other operator set. - string domain = 1; - - // The version of the operator set being identified. - // This field MUST be present in this version of the IR. - int64 version = 2; -} diff --git a/inc/metadef/inc/register/proto/task.proto b/inc/metadef/inc/register/proto/task.proto deleted file mode 100644 index 0da5631ea..000000000 --- a/inc/metadef/inc/register/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/inc/metadef/inc/register/proto/tensorflow/attr_value.proto b/inc/metadef/inc/register/proto/tensorflow/attr_value.proto deleted file mode 100644 index 1cc67d627..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/attr_value.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "AttrValueProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing the value for an attr used to configure an Op. -// Comment indicates the corresponding attr type. Only the field matching the -// attr type may be filled. -message AttrValue { - // LINT.IfChange - message ListValue { - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated DataType type = 6 [packed = true]; // "list(type)" - repeated TensorShapeProto shape = 7; // "list(shape)" - repeated TensorProto tensor = 8; // "list(tensor)" - repeated NameAttrList func = 9; // "list(attr)" - } - // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) - - oneof value { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - DataType type = 6; // "type" - TensorShapeProto shape = 7; // "shape" - TensorProto tensor = 8; // "tensor" - ListValue list = 1; // any "list(...)" - - // "func" represents a function. func.name is a function's name or - // a primitive op's name. func.attr.first is the name of an attr - // defined for that function. func.attr.second is the value for - // that attr in the instantiation. - NameAttrList func = 10; - - // This is a placeholder only used in nodes defined inside a - // function. It indicates the attr value will be supplied when - // the function is instantiated. For example, let us suppose a - // node "N" in function "FN". "N" has an attr "A" with value - // placeholder = "foo". When FN is instantiated with attr "foo" - // set to "bar", the instantiated node N's attr A will have been - // given the value "bar". - string placeholder = 9; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NameAttrList { - string name = 1; - map attr = 2; -} diff --git a/inc/metadef/inc/register/proto/tensorflow/function.proto b/inc/metadef/inc/register/proto/tensorflow/function.proto deleted file mode 100644 index 075897c68..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/function.proto +++ /dev/null @@ -1,100 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "FunctionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "node_def.proto"; -import "op_def.proto"; - -// A library is a set of named functions. -message FunctionDefLibrary { - repeated FunctionDef function = 1; - repeated GradientDef gradient = 2; -} - -// A function can be instantiated when the runtime can bind every attr -// with a value. When a GraphDef has a call to a function, it must -// have binding for every attr defined in the signature. -// * device spec, etc. -message FunctionDef { - // The definition of the function's name, arguments, return values, - // attrs etc. - OpDef signature = 1; - - // Attributes specific to this function definition. - map attr = 5; - - // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. - reserved 2; - - // In both of the following fields, there is the need to specify an - // output that is used as either the input to another node (in - // `node_def`) or as a return value of the function (in `ret`). - // Unlike the NodeDefs in GraphDef, we need to be able to specify a - // list in some cases (instead of just single outputs). Also, we - // need to be able to deal with lists of unknown length (so the - // output index may not be known at function definition time). So - // we use the following format instead: - // * "fun_in" where "fun_in" is the name of a function input arg in - // the `signature` field above. This represents that input, whether - // it is a single tensor or a list. - // * "fun_in:0" gives the first element of a function input arg (a - // non-list input is considered a list of length 1 for these - // purposes). - // * "node:out" where "node" is the name of a node in `node_def` and - // "out" is the name one of its op's output arguments (the name - // comes from the OpDef of the node's op). This represents that - // node's output, whether it is a single tensor or a list. - // Note: We enforce that an op's output arguments are never - // renamed in the backwards-compatibility test. - // * "node:out:0" gives the first element of a node output arg (a - // non-list output is considered a list of length 1 for these - // purposes). - // - // NOT CURRENTLY SUPPORTED (but may be in the future): - // * "node:out:-1" gives last element in a node output list - // * "node:out:1:" gives a list with all but the first element in a - // node output list - // * "node:out::-1" gives a list with all but the last element in a - // node output list - - // The body of the function. Unlike the NodeDefs in a GraphDef, attrs - // may have values of type `placeholder` and the `input` field uses - // the "output" format above. - - // By convention, "op" in node_def is resolved by consulting with a - // user-defined library first. If not resolved, "func" is assumed to - // be a builtin op. - repeated NodeDef node_def = 3; - - // A mapping from the output arg names from `signature` to the - // outputs from `node_def` that should be returned by the function. - map ret = 4; -} - -// GradientDef defines the gradient function of a function defined in -// a function library. -// -// A gradient function g (specified by gradient_func) for a function f -// (specified by function_name) must follow the following: -// -// The function 'f' must be a numerical function which takes N inputs -// and produces M outputs. Its gradient function 'g', which is a -// function taking N + M inputs and produces N outputs. -// -// I.e. if we have -// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), -// then, g is -// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, -// dL/dy1, dL/dy2, ..., dL/dy_M), -// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the -// loss function). dL/dx_i is the partial derivative of L with respect -// to x_i. -message GradientDef { - string function_name = 1; // The function name. - string gradient_func = 2; // The gradient function's name. -} diff --git a/inc/metadef/inc/register/proto/tensorflow/graph.proto b/inc/metadef/inc/register/proto/tensorflow/graph.proto deleted file mode 100644 index d639a7d6c..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/graph.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "GraphProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "node_def.proto"; -import "function.proto"; -import "versions.proto"; - -// Represents the graph of operations -message GraphDef { - repeated NodeDef node = 1; - - // Compatibility versions of the graph. See core/public/version.h for version - // history. The GraphDef version is distinct from the TensorFlow version, and - // each release of TensorFlow will support a range of GraphDef versions. - VersionDef versions = 4; - - // Deprecated single version field; use versions above instead. Since all - // GraphDef changes before "versions" was introduced were forward - // compatible, this field is entirely ignored. - int32 version = 3 [deprecated = true]; - - // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. - // - // "library" provides user-defined functions. - // - // Naming: - // * library.function.name are in a flat namespace. - // NOTE: We may need to change it to be hierarchical to support - // different orgs. E.g., - // { "/google/nn", { ... }}, - // { "/google/vision", { ... }} - // { "/org_foo/module_bar", { ... }} - // map named_lib; - // * If node[i].op is the name of one function in "library", - // node[i] is deemed as a function call. Otherwise, node[i].op - // must be a primitive operation supported by the runtime. - // - // - // Function call semantics: - // - // * The callee may start execution as soon as some of its inputs - // are ready. The caller may want to use Tuple() mechanism to - // ensure all inputs are ready in the same time. - // - // * The consumer of return values may start executing as soon as - // the return values the consumer depends on are ready. The - // consumer may want to use Tuple() mechanism to ensure the - // consumer does not start until all return values of the callee - // function are ready. - FunctionDefLibrary library = 2; -}; diff --git a/inc/metadef/inc/register/proto/tensorflow/graph_library.proto b/inc/metadef/inc/register/proto/tensorflow/graph_library.proto deleted file mode 100644 index e393d38dd..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/graph_library.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; - -import "graph.proto"; - -message GeGraphDef { - string name = 1; - GraphDef graph = 2; -} - -message GraphDefLibrary { - repeated GeGraphDef graph_def = 1; -}; \ No newline at end of file diff --git a/inc/metadef/inc/register/proto/tensorflow/node_def.proto b/inc/metadef/inc/register/proto/tensorflow/node_def.proto deleted file mode 100644 index b9bc97ee6..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/node_def.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; - -message NodeDef { - // The name given to this operator. Used for naming inputs, - // logging, visualization, etc. Unique within a single GraphDef. - // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*". - string name = 1; - - // The operation name. There may be custom parameters in attrs. - // Op names starting with an underscore are reserved for internal use. - string op = 2; - - // Each input is "node:src_output" with "node" being a string name and - // "src_output" indicating which output tensor to use from "node". If - // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs - // may optionally be followed by control inputs that have the format - // "^node". - repeated string input = 3; - - // A (possibly partial) specification for the device on which this - // node should be placed. - // The expected syntax for this string is as follows: - // - // DEVICE_SPEC ::= PARTIAL_SPEC - // - // PARTIAL_SPEC ::= ("/" CONSTRAINT) * - // CONSTRAINT ::= ("job:" JOB_NAME) - // | ("replica:" [1-9][0-9]*) - // | ("task:" [1-9][0-9]*) - // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) - // - // Valid values for this string include: - // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) - // * "/job:worker/device:GPU:3" (partial specification) - // * "" (no specification) - // - // If the constraints do not resolve to a single device (or if this - // field is empty or not present), the runtime will attempt to - // choose a device automatically. - string device = 4; - - // Operation-specific graph-construction-time configuration. - // Note that this should include all attrs defined in the - // corresponding OpDef, including those with a value matching - // the default -- this allows the default to change and makes - // NodeDefs easier to interpret on their own. However, if - // an attr with a default is not specified in this list, the - // default will be used. - // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and - // one of the names from the corresponding OpDef's attr field). - // The values must have a type matching the corresponding OpDef - // attr's type field. - // Add some examples here showing best practices. - map attr = 5; -}; diff --git a/inc/metadef/inc/register/proto/tensorflow/op_def.proto b/inc/metadef/inc/register/proto/tensorflow/op_def.proto deleted file mode 100644 index 3485d0453..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/op_def.proto +++ /dev/null @@ -1,164 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "OpDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "types.proto"; - -// Defines an operation. A NodeDef in a GraphDef specifies an Op by -// using the "op" field which should match the name of a OpDef. -// LINT.IfChange -message OpDef { - // Op names starting with an underscore are reserved for internal use. - // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*". - string name = 1; - - // For describing inputs and outputs. - message ArgDef { - // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". - string name = 1; - - // Human readable description. - string description = 2; - - // Describes the type of one or more tensors that are accepted/produced - // by this input/output arg. The only legal combinations are: - // * For a single tensor: either the "type" field is set or the - // "type_attr" field is set to the name of an attr with type "type". - // * For a sequence of tensors with the same type: the "number_attr" - // field will be set to the name of an attr with type "int", and - // either the "type" or "type_attr" field will be set as for - // single tensors. - // * For a sequence of tensors, the "type_list_attr" field will be set - // to the name of an attr with type "list(type)". - DataType type = 3; - string type_attr = 4; // if specified, attr must have type "type" - string number_attr = 5; // if specified, attr must have type "int" - // If specified, attr must have type "list(type)", and none of - // type, type_attr, and number_attr may be specified. - string type_list_attr = 6; - - // For inputs: if true, the inputs are required to be refs. - // By default, inputs can be either refs or non-refs. - // For outputs: if true, outputs are refs, otherwise they are not. - bool is_ref = 16; - }; - - // Description of the input(s). - repeated ArgDef input_arg = 2; - - // Description of the output(s). - repeated ArgDef output_arg = 3; - - // Description of the graph-construction-time configuration of this - // Op. That is to say, this describes the attr fields that will - // be specified in the NodeDef. - message AttrDef { - // A descriptive name for the argument. May be used, e.g. by the - // Python client, as a keyword argument name, and so should match - // the regexp "[a-z][a-z0-9_]+". - string name = 1; - - // One of the type names from attr_value.proto ("string", "list(string)", - // "int", etc.). - string type = 2; - - // A reasonable default for this attribute if the user does not supply - // a value. If not specified, the user must supply a value. - AttrValue default_value = 3; - - // Human-readable description. - string description = 4; - - - // --- Constraints --- - // These constraints are only in effect if specified. Default is no - // constraints. - - // For type == "int", this is a minimum value. For "list(___)" - // types, this is the minimum length. - bool has_minimum = 5; - int64 minimum = 6; - - // The set of allowed values. Has type that is the "list" version - // of the "type" field above (uses the "list" field of AttrValue). - // If type == "type" or "list(type)" above, then the "type" field - // of "allowed_values.list" has the set of allowed DataTypes. - // If type == "string" or "list(string)", then the "s" field of - // "allowed_values.list" has the set of allowed strings. - AttrValue allowed_values = 7; - } - repeated AttrDef attr = 4; - - // Optional deprecation based on GraphDef versions. - OpDeprecation deprecation = 8; - - // One-line human-readable description of what the Op does. - string summary = 5; - - // Additional, longer human-readable description of what the Op does. - string description = 6; - - // ------------------------------------------------------------------------- - // Which optimizations this operation can participate in. - - // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) - bool is_commutative = 18; - - // If is_aggregate is true, then this operation accepts N >= 2 - // inputs and produces 1 output all of the same type. Should be - // associative and commutative, and produce output with the same - // shape as the input. The optimizer may replace an aggregate op - // taking input from multiple devices with a tree of aggregate ops - // that aggregate locally within each device (and possibly within - // groups of nearby devices) before communicating. - bool is_aggregate = 16; // for things like add - - // Other optimizations go here, like - // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. - - // ------------------------------------------------------------------------- - // Optimization constraints. - - // Ops are marked as stateful if their behavior depends on some state beyond - // their input tensors (e.g. variable reading op) or if they have - // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops - // must always produce the same output for the same input and have - // no side-effects. - // - // By default Ops may be moved between devices. Stateful ops should - // either not be moved, or should only be moved if that state can also - // be moved (e.g. via some sort of save / restore). - // Stateful ops are guaranteed to never be optimized away by Common - // Subexpression Elimination (CSE). - bool is_stateful = 17; // for things like variables, queue - - // ------------------------------------------------------------------------- - // Non-standard options. - - // By default, all inputs to an Op must be initialized Tensors. Ops - // that may initialize tensors for the first time should set this - // field to true, to allow the Op to take an uninitialized Tensor as - // input. - bool allows_uninitialized_input = 19; // for Assign, etc. -}; -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) - -// Information about version-dependent deprecation of an op -message OpDeprecation { - // First GraphDef version at which the op is disallowed. - int32 version = 1; - - // Explanation of why it was deprecated and what to use instead. - string explanation = 2; -}; - -// A collection of OpDefs -message OpList { - repeated OpDef op = 1; -}; diff --git a/inc/metadef/inc/register/proto/tensorflow/resource_handle.proto b/inc/metadef/inc/register/proto/tensorflow/resource_handle.proto deleted file mode 100644 index a34523512..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/resource_handle.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "ResourceHandle"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message ResourceHandleProto { - // Unique name for the device containing the resource. - string device = 1; - - // Container in which this resource is placed. - string container = 2; - - // Unique name of this resource. - string name = 3; - - // Hash code for the type of the resource. Is only valid in the same device - // and in the same execution. - uint64 hash_code = 4; - - // For debug-only, the name of the type pointed to by this handle, if - // available. - string maybe_type_name = 5; -}; diff --git a/inc/metadef/inc/register/proto/tensorflow/tensor.proto b/inc/metadef/inc/register/proto/tensorflow/tensor.proto deleted file mode 100644 index d0a4d024c..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/tensor.proto +++ /dev/null @@ -1,94 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TensorProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "resource_handle.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing a tensor. -message TensorProto { - DataType dtype = 1; - - // Shape of the tensor. - TensorShapeProto tensor_shape = 2; - - // Only one of the representations below is set, one of "tensor_contents" and - // the "xxx_val" attributes. We are not using oneof because as oneofs cannot - // contain repeated fields it would require another extra set of messages. - - // Version number. - // - // In version 0, if the "repeated xxx" representations contain only one - // element, that element is repeated to fill the shape. This makes it easy - // to represent a constant Tensor with a single value. - int32 version_number = 3; - - // Serialized raw tensor content from either Tensor::AsProtoTensorContent or - // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation - // can be used for all tensor types. The purpose of this representation is to - // reduce serialization overhead during RPC call by avoiding serialization of - // many repeated small items. - bytes tensor_content = 4; - - // Type specific representations that make it easy to create tensor protos in - // all languages. Only the representation corresponding to "dtype" can - // be set. The values hold the flattened representation of the tensor in - // row major order. - - // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll - // have some pointless zero padding for each value here. - repeated int32 half_val = 13 [packed = true]; - - // DT_FLOAT. - repeated float float_val = 5 [packed = true]; - - // DT_DOUBLE. - repeated double double_val = 6 [packed = true]; - - // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. - repeated int32 int_val = 7 [packed = true]; - - // DT_STRING - repeated bytes string_val = 8; - - // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real - // and imaginary parts of i-th single precision complex. - repeated float scomplex_val = 9 [packed = true]; - - // DT_INT64 - repeated int64 int64_val = 10 [packed = true]; - - // DT_BOOL - repeated bool bool_val = 11 [packed = true]; - - // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real - // and imaginary parts of i-th double precision complex. - repeated double dcomplex_val = 12 [packed = true]; - - // DT_RESOURCE - repeated ResourceHandleProto resource_handle_val = 14; - - // DT_VARIANT - repeated VariantTensorDataProto variant_val = 15; - - // DT_UINT32 - repeated uint32 uint32_val = 16 [packed = true]; - - // DT_UINT64 - repeated uint64 uint64_val = 17 [packed = true]; -}; - -// Protocol buffer representing the serialization format of DT_VARIANT tensors. -message VariantTensorDataProto { - // Name of the type of objects being serialized. - string type_name = 1; - // Portions of the object that are not Tensors. - bytes metadata = 2; - // Tensors contained within objects being serialized. - repeated TensorProto tensors = 3; -} diff --git a/inc/metadef/inc/register/proto/tensorflow/tensor_shape.proto b/inc/metadef/inc/register/proto/tensorflow/tensor_shape.proto deleted file mode 100644 index 4225a2e37..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/tensor_shape.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Protocol buffer representing the shape of tensors. - -syntax = "proto3"; -option cc_enable_arenas = true; -option java_outer_classname = "TensorShapeProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -package domi.tensorflow; - -// Dimensions of a tensor. -message TensorShapeProto { - // One dimension of the tensor. - message Dim { - // Size of the tensor in that dimension. - // This value must be >= -1, but values of -1 are reserved for "unknown" - // shapes (values of -1 mean "unknown" dimension). Certain wrappers - // that work with TensorShapeProto may fail at runtime when deserializing - // a TensorShapeProto containing a dim value of -1. - int64 size = 1; - - // Optional name of the tensor dimension. - string name = 2; - }; - - // Dimensions of the tensor, such as {"input", 30}, {"output", 40} - // for a 30 x 40 2D tensor. If an entry has size -1, this - // corresponds to a dimension of unknown size. The names are - // optional. - // - // The order of entries in "dim" matters: It indicates the layout of the - // values in the tensor in-memory representation. - // - // The first entry in "dim" is the outermost dimension used to layout the - // values, the last entry is the innermost dimension. This matches the - // in-memory layout of RowMajor Eigen tensors. - // - // If "dim.size()" > 0, "unknown_rank" must be false. - repeated Dim dim = 2; - - // If true, the number of dimensions in the shape is unknown. - // - // If true, "dim.size()" must be 0. - bool unknown_rank = 3; -}; diff --git a/inc/metadef/inc/register/proto/tensorflow/types.proto b/inc/metadef/inc/register/proto/tensorflow/types.proto deleted file mode 100644 index ba7a72b30..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/types.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TypesProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// LINT.IfChange -enum DataType { - // Not a legal value for DataType. Used to indicate a DataType field - // has not been set. - DT_INVALID = 0; - - // Data types that all computation devices are expected to be - // capable to support. - DT_FLOAT = 1; - DT_DOUBLE = 2; - DT_INT32 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_INT8 = 6; - DT_STRING = 7; - DT_COMPLEX64 = 8; // Single-precision complex - DT_INT64 = 9; - DT_BOOL = 10; - DT_QINT8 = 11; // Quantized int8 - DT_QUINT8 = 12; // Quantized uint8 - DT_QINT32 = 13; // Quantized int32 - DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. - DT_QINT16 = 15; // Quantized int16 - DT_QUINT16 = 16; // Quantized uint16 - DT_UINT16 = 17; - DT_COMPLEX128 = 18; // Double-precision complex - DT_HALF = 19; - DT_RESOURCE = 20; - DT_VARIANT = 21; // Arbitrary C++ data types - DT_UINT32 = 22; - DT_UINT64 = 23; - - // Do not use! These are only for parameters. Every enum above - // should have a corresponding value below (verified by types_test). - DT_FLOAT_REF = 101; - DT_DOUBLE_REF = 102; - DT_INT32_REF = 103; - DT_UINT8_REF = 104; - DT_INT16_REF = 105; - DT_INT8_REF = 106; - DT_STRING_REF = 107; - DT_COMPLEX64_REF = 108; - DT_INT64_REF = 109; - DT_BOOL_REF = 110; - DT_QINT8_REF = 111; - DT_QUINT8_REF = 112; - DT_QINT32_REF = 113; - DT_BFLOAT16_REF = 114; - DT_QINT16_REF = 115; - DT_QUINT16_REF = 116; - DT_UINT16_REF = 117; - DT_COMPLEX128_REF = 118; - DT_HALF_REF = 119; - DT_RESOURCE_REF = 120; - DT_VARIANT_REF = 121; - DT_UINT32_REF = 122; - DT_UINT64_REF = 123; -} -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/c/c_api.h, -// https://www.tensorflow.org/code/tensorflow/go/tensor.go, -// https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.h, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, -// https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, -// https://www.tensorflow.org/code/tensorflow/python/framework/function.py) diff --git a/inc/metadef/inc/register/proto/tensorflow/versions.proto b/inc/metadef/inc/register/proto/tensorflow/versions.proto deleted file mode 100644 index 48061218a..000000000 --- a/inc/metadef/inc/register/proto/tensorflow/versions.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "VersionsProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Version information for a piece of serialized data -// -// There are different types of versions for each type of data -// (GraphDef, etc.), but they all have the same common shape -// described here. -// -// Each consumer has "consumer" and "min_producer" versions (specified -// elsewhere). A consumer is allowed to consume this data if -// -// producer >= min_producer -// consumer >= min_consumer -// consumer not in bad_consumers -// -message VersionDef { - // The version of the code that produced this data. - int32 producer = 1; - - // Any consumer below this version is not allowed to consume this data. - int32 min_consumer = 2; - - // Specific consumer versions which are disallowed (e.g. due to bugs). - repeated int32 bad_consumers = 3; -}; diff --git a/inc/metadef/inc/register/prototype_pass_registry.h b/inc/metadef/inc/register/prototype_pass_registry.h deleted file mode 100644 index 0494daa38..000000000 --- a/inc/metadef/inc/register/prototype_pass_registry.h +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef METADEF_PROTOTYPE_PASS_REGISTRY_H -#define METADEF_PROTOTYPE_PASS_REGISTRY_H - -#include - -#include - -#include "external/ge/ge_api_error_codes.h" -#include "register/register_error_codes.h" -#include "register/register_fmk_types.h" - -namespace ge { -class ProtoTypeBasePass { -public: - virtual Status Run(google::protobuf::Message *message) = 0; - virtual ~ProtoTypeBasePass() {} -}; - -class ProtoTypePassRegistry { -public: - using CreateFn = std::function; - ~ProtoTypePassRegistry(); - - static ProtoTypePassRegistry &GetInstance(); - - void RegisterProtoTypePass(const char *pass_name, CreateFn create_fn, domi::FrameworkType fmk_type); - - std::vector> GetCreateFnByType(domi::FrameworkType fmk_type) const; - -private: - ProtoTypePassRegistry(); - class ProtoTypePassRegistryImpl; - std::unique_ptr impl_; -}; - -class ProtoTypePassRegistrar { -public: - ProtoTypePassRegistrar(const char *pass_name, ProtoTypeBasePass *(*create_fn)(), domi::FrameworkType fmk_type); - ~ProtoTypePassRegistrar() {} -}; - -#define REGISTER_PROTOTYPE_PASS(pass_name, pass, fmk_type) \ - REGISTER_PROTOTYPE_PASS_UNIQ_HELPER(__COUNTER__, pass_name, pass, fmk_type) - -#define REGISTER_PROTOTYPE_PASS_UNIQ_HELPER(ctr, pass_name, pass, fmk_type) \ - REGISTER_PROTOTYPE_PASS_UNIQ(ctr, pass_name, pass, fmk_type) - -#define REGISTER_PROTOTYPE_PASS_UNIQ(ctr, pass_name, pass, fmk_type) \ - static ::ge::ProtoTypePassRegistrar register_prototype_pass##ctr __attribute__((unused)) = \ - ::ge::ProtoTypePassRegistrar( \ - pass_name, []()->::ge::ProtoTypeBasePass * { return new (std::nothrow) pass(); }, fmk_type) -} // namespace ge -#endif // METADEF_PROTOTYPE_PASS_REGISTRY_H diff --git a/inc/metadef/inc/register/register.h b/inc/metadef/inc/register/register.h deleted file mode 100644 index 72e9924d8..000000000 --- a/inc/metadef/inc/register/register.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_REGISTRY_H_ -#define INC_REGISTER_REGISTRY_H_ - -#include "external/register/register.h" -#include "external/ge/ge_api_error_codes.h" - -namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY HostCpuOp { - public: - HostCpuOp() = default; - virtual ~HostCpuOp() = default; - - virtual graphStatus Compute(Operator &op, - const std::map &inputs, - std::map &outputs) = 0; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY HostCpuOpRegistrar { - public: - HostCpuOpRegistrar(const char *op_type, HostCpuOp *(*create_fn)()); - ~HostCpuOpRegistrar() = default; -}; - -#define REGISTER_HOST_CPU_OP_BUILDER(name, op) \ - REGISTER_HOST_CPU_OP_BUILDER_UNIQ_HELPER(__COUNTER__, name, op) - -#define REGISTER_HOST_CPU_OP_BUILDER_UNIQ_HELPER(ctr, name, op) \ - REGISTER_HOST_CPU_OP_BUILDER_UNIQ(ctr, name, op) - -#define REGISTER_HOST_CPU_OP_BUILDER_UNIQ(ctr, name, op) \ - static ::ge::HostCpuOpRegistrar register_host_cpu_op##ctr \ - __attribute__((unused)) = \ - ::ge::HostCpuOpRegistrar(name, []()->::ge::HostCpuOp* { \ - return new (std::nothrow) op(); \ - }) -} // namespace ge - -#endif //INC_REGISTER_REGISTRY_H_ diff --git a/inc/metadef/inc/register/register_format_transfer.h b/inc/metadef/inc/register/register_format_transfer.h deleted file mode 100644 index 5cbf4ab42..000000000 --- a/inc/metadef/inc/register/register_format_transfer.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_REGISTER_FORMAT_TRANSFER_H_ -#define INC_REGISTER_REGISTER_FORMAT_TRANSFER_H_ - -#include -#include -#include - -#include "external/graph/types.h" -#include "ge/ge_api_error_codes.h" - -namespace ge { -namespace formats { -struct TransArgs { - const uint8_t *data; - Format src_format; - Format dst_format; - // For scenes that need to supplement the shape, for example, 5D to 4D - // It is not possible to convert the format normally if you only get the src_shape, - // and must get the shape before you mend the shape. - // So the parameters here need to be passed in both src_shape and dst_shape - std::vector src_shape; - std::vector dst_shape; - DataType src_data_type; -}; - -struct TransResult { - std::shared_ptr data; - // data length in bytes - size_t length; -}; - -class FormatTransfer { - public: - virtual ~FormatTransfer() = default; - virtual Status TransFormat(const TransArgs &args, TransResult &result) = 0; - virtual Status TransShape(Format src_format, const std::vector &src_shape, DataType data_type, - Format dst_format, std::vector &dst_shape) = 0; -}; - -using FormatTransferBuilder = std::function()>; - -class FormatTransferRegister { - public: - FormatTransferRegister(FormatTransferBuilder builder, Format src, Format dst); - ~FormatTransferRegister() = default; -}; - -#define REGISTER_FORMAT_TRANSFER(TransferClass, format1, format2) \ - namespace { \ - FormatTransferRegister format_transfer_register_##TransferClass##format1##format2( \ - []() { return std::make_shared(); }, format1, format2); \ - } - -/// Build a formattransfer according to 'args' -/// @param args -/// @param result -/// @return -std::shared_ptr BuildFormatTransfer(const TransArgs &args); - -bool FormatTransferExists(const TransArgs &args); -} // namespace formats -} // namespace ge -#endif // INC_REGISTER_REGISTER_FORMAT_TRANSFER_H_ \ No newline at end of file diff --git a/inc/metadef/inc/register/scope/scope_graph_impl.h b/inc/metadef/inc/register/scope/scope_graph_impl.h deleted file mode 100644 index c28f9a163..000000000 --- a/inc/metadef/inc/register/scope/scope_graph_impl.h +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_GRAPH_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_GRAPH_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" -#include "graph/operator_factory.h" -#include "proto/tensorflow/graph.pb.h" -#include "proto/tensorflow/node_def.pb.h" -#include "graph/utils/type_utils.h" - -namespace ge { -using FusionInnerNodesInfo = std::vector>, // inputs - std::vector>, // outputs - const ge::Operator *>>; // operator - -class Scope::ScopeImpl { - public: - ScopeImpl() : father_scope_(nullptr) {} - Status Init(const std::string &name, const std::string &sub_type = "", Scope *father_scope = nullptr); - ~ScopeImpl(); - - const std::string &Name() const { return name_; } - const std::string &SubType() const { return sub_type_; } - void SetSubType(const std::string &sub_type) { sub_type_ = sub_type; } - void ClearTypeAndSubType(); - void AddNode(ge::OperatorPtr &node_def); - const std::vector &Nodes() const { return nodes_; } - const std::unordered_map &AllNodesMap(); - const std::map &AllNodesMapNew(); - void AddSubScope(Scope *scope) { sub_scopes_[scope->Name()] = scope; } - Scope *GetSubScope(const std::string &scope_name) const; - const std::unordered_map &GetSubScopes() const { return sub_scopes_; } - const std::vector &GetAllSubScopes(); - int32_t GetOpTypeNum(const std::string &op_type) const; - void OpsNumInc(const std::string &op_type); - const std::string LastName() const; - const Scope *GetFatherScope() const { return father_scope_; } - // trim scope_index - static std::string TrimScopeIndex(const std::string &scope_name); - - private: - std::string name_; - std::string sub_type_; - Scope *father_scope_; - std::map op_nums_; - std::unordered_map sub_scopes_; - std::vector nodes_; - std::unordered_map all_nodes_map_; - std::map all_nodes_map_new_; - std::vector all_sub_scopes_; -}; - -class FusionScopesResult::InnerNodeInfo::InnerNodeInfoImpl { - public: - explicit InnerNodeInfoImpl(const std::string &fusion_node_name) : fusion_node_name_(fusion_node_name) {} - InnerNodeInfoImpl(const std::string &fusion_node_name, const std::string &name, const std::string &type) - : fusion_node_name_(fusion_node_name), name_(name), type_(type) { - SetName(name); - } - ~InnerNodeInfoImpl(); - std::string GetFullNodeName(const std::string &relative_name); - void SetName(const std::string &name) { name_ = GetFullNodeName(name); } - void SetType(const std::string &type) { type_ = type; } - void InsertInput(const std::string &input_node, int32_t peer_out_idx); - void InsertOutput(const std::string &output_node, int32_t peer_in_idx); - ge::graphStatus BuildOperator(); - ge::graphStatus SetInputFormat(const std::string &input_name, const std::string &format) ; - ge::graphStatus SetOutputFormat(const std::string &output_name, const std::string &format); - ge::graphStatus SetDynamicInputFormat(const std::string &input_name, uint32_t index, const std::string &format); - ge::graphStatus SetDynamicOutputFormat(const std::string &output_name, uint32_t index, const std::string &format); - std::string GetName() const { return name_; } - std::string GetType() const { return type_; } - std::vector> GetInputs() const { return inner_node_inputs_; } - std::vector> GetOutputs() const { return inner_node_outputs_; } - ge::Operator *MutableOperator() { return &operator_; } - - public: - ge::Operator operator_; - private: - std::string fusion_node_name_; - std::string name_; - std::string type_; - std::vector> inner_node_inputs_; - std::vector> inner_node_outputs_; -}; - -class FusionScopesResult::FusionScopesResultImpl { - public: - FusionScopesResultImpl() {} - ~FusionScopesResultImpl(){}; - void SetName(const std::string &name) { name_ = name; } - void SetType(const std::string &type) { type_ = type; } - void SetDescription(const std::string &description) { description_ = description; } - const std::string &Name() const { return name_; } - const std::string &Type() const { return type_; } - const std::string &Description() const { return description_; } - void AddNodes(std::vector nodes); - const std::vector &Nodes() const { return nodes_; } - void AddScopes(const std::vector &scopes) { scopes_.insert(scopes_.end(), scopes.begin(), scopes.end()); } - const std::vector &Scopes() const { return scopes_; } - const std::map> &GetInputs() const { return inputs_; } - const std::map> &GetOutputs() const { return outputs_; } - void InsertInputs(const std::string &inner_op_name, const std::vector &index_map); - void InsertOutputs(const std::string &inner_op_name, const std::vector &index_map); - bool FindNodes(const std::string &node_name) const; - bool FindScopes(const std::string &scope_name) const; - - InnerNodeInfo *AddInnerNode(const std::string &name, const std::string &type); - InnerNodeInfo *MutableRecentInnerNode(); - InnerNodeInfo *MutableInnerNode(uint32_t index); - FusionInnerNodesInfo GetInnerNodesInfo(); - ge::graphStatus CheckInnerNodesInfo(); - - private: - std::string name_; - std::string type_; - std::string description_; - std::vector scopes_; - std::vector nodes_; - std::map> inputs_; - std::map> outputs_; - std::vector inner_node_infos_; -}; - -class ScopeTree::ScopeTreeImpl { - public: - ScopeTreeImpl() : root_(nullptr) {} - ScopeTreeImpl(const ScopeTreeImpl &) = delete; - ScopeTreeImpl &operator=(const ScopeTreeImpl &) = delete; - Status Init(); - ~ScopeTreeImpl(); - - void AddNodeToScope(ge::OperatorPtr &node_def); - const std::vector &GetAllScopes() const { return scopes_; } - const Scope *Root() const { return root_; } - - private: - std::vector SplitNodeName(const std::string &node_name, char delim) const; - Scope *root_; - std::vector scopes_; -}; - -struct ScopeFusionOpInfo { - std::string node_name; - std::string fusion_node_name; - std::string fusion_op_type; - std::string description; - bool scope_pass = true; -}; - -class ScopeGraph::ScopeGraphImpl { - public: - ScopeGraphImpl() : scope_tree_(nullptr) {} - ScopeGraphImpl(const ScopeGraphImpl &) = delete; - ScopeGraphImpl &operator=(const ScopeGraphImpl &) = delete; - Status Init(); - ~ScopeGraphImpl(); - - const ScopeTree *GetScopeTree() const { return scope_tree_; } - void BuildScopeGraph(domi::tensorflow::GraphDef *graph_def); - void AddFusionScopesResult(FusionScopesResult *result); - const std::unordered_map &FusionScopesResults() const { return fusion_results_; } - FusionScopesResult *GetFusionScopesResults(const domi::tensorflow::NodeDef *node_def) const; - FusionScopesResult *GetFusionScopesResults(const std::string &node_name) const; - const std::unordered_map &GetNodesMap() const { return nodes_map_; } - const std::map &GetNodesMapNew() const { return nodes_map_new_; } - bool IsFusionOpChild(const std::string &node_name, std::vector &info_list); - bool FusionOpChildIgnore(const ScopeFusionOpInfo &info); - bool IsFusionOp(const domi::tensorflow::NodeDef *node_def); - Status GetInputOrOutputIndex(const ScopeFusionOpInfo &info, int32_t old_index, bool input, int32_t &new_index); - - private: - std::vector GetFusionResultInputOrOutput(const ScopeFusionOpInfo &info, - bool input); // input:true,output:false - void CheckScopesResult(FusionScopesResult *fusion_node); - std::unordered_map fusion_results_; - std::unordered_map nodes_map_; - std::map nodes_map_new_; - ScopeTree *scope_tree_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_GRAPH_IMPL_H_ diff --git a/inc/metadef/inc/register/scope/scope_pass_impl.h b/inc/metadef/inc/register/scope/scope_pass_impl.h deleted file mode 100644 index ef2d97c64..000000000 --- a/inc/metadef/inc/register/scope/scope_pass_impl.h +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_PASS_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_PASS_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" - -namespace ge { -class ScopesResult::ScopesResultImpl { - public: - void SetScopes(const std::vector &scopes) { scopes_ = scopes; } - const std::vector &GetScopes() const { return scopes_; } - void SetNodes(const std::vector &nodes) { nodes_ = nodes; } - const std::vector &GetNodes() const { return nodes_; } - - private: - std::vector scopes_; // multiple scopes - std::vector nodes_; // op outside of scope -}; - -class ScopeBasePass::ScopeBasePassImpl { - public: - ScopeBasePassImpl(ScopeBasePass *parent) : parent_(parent) {} - virtual ~ScopeBasePassImpl(); - - Status Run(std::shared_ptr &scope_graph); - - private: - Status AddFusionScopesResultToScopeGraph(std::shared_ptr &scope_graph, - std::vector &scope_results); - // Match rules one by one, support multiple sets of matching rules, and finally output a single scope - // Note: This function does not have to be rewritten. - // In order to match the fusion rules designed by you better, - // you can implement your specific versions separately. - bool MatchAllBatches(const ScopeTree *scope_tree, std::vector &results); - - bool MatchOneBatch(const ScopeTree *scope_tree, const std::vector &patternlist, - std::vector &results); - bool MatchOneScope(const ScopePattern *pattern, Scope *scope, std::vector &results); - Status PrintFusionScopeInfo(std::shared_ptr &scope_graph); - - private: - std::vector patterns_; - ScopeBasePass *parent_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_PASS_IMPL_H_ \ No newline at end of file diff --git a/inc/metadef/inc/register/scope/scope_pass_registry_impl.h b/inc/metadef/inc/register/scope/scope_pass_registry_impl.h deleted file mode 100644 index 9e68dba06..000000000 --- a/inc/metadef/inc/register/scope/scope_pass_registry_impl.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_REGISTRY_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_REGISTRY_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" -#include - -namespace ge { -struct CreatePassFnPack; -class ScopeFusionPassRegistry::ScopeFusionPassRegistryImpl { - public: - void RegisterScopeFusionPass(const std::string &pass_name, ScopeFusionPassRegistry::CreateFn create_fn, - bool is_general); - ScopeFusionPassRegistry::CreateFn GetCreateFn(const std::string &pass_name); - std::unique_ptr CreateScopeFusionPass(const std::string &pass_name); - std::vector GetAllRegisteredPasses(); - bool SetPassEnableFlag(const std::string pass_name, const bool flag); - - private: - std::mutex mu_; - std::vector pass_names_; // In the order of user registration - std::map create_fn_packs_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_REGISTRY_IMPL_H_ \ No newline at end of file diff --git a/inc/metadef/inc/register/scope/scope_pattern_impl.h b/inc/metadef/inc/register/scope/scope_pattern_impl.h deleted file mode 100644 index 7f0445ef3..000000000 --- a/inc/metadef/inc/register/scope/scope_pattern_impl.h +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_PATTERN_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_PATTERN_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" - -namespace ge { -class ScopeAttrValue::ScopeAttrValueImpl { - public: - ScopeAttrValueImpl() : int_value_(0), float_value_(0.0), string_value_(""), bool_value_(false) {} - ~ScopeAttrValueImpl() {} - - void SetIntValue(const int64_t &value) { int_value_ = value; } - void SetFloatValue(const float &value) { float_value_ = value; } - void SetStringValue(const std::string &value) { string_value_ = value; } - void SetBoolValue(const bool &value) { bool_value_ = value; } - const int64_t &GetIntValue() const { return int_value_; } - const float &GetFloatValue() const { return float_value_; } - const std::string &GetStrValue() const { return string_value_; } - const bool &GetBoolValue() const { return bool_value_; } - - private: - int64_t int_value_; - float float_value_; - std::string string_value_; - bool bool_value_; -}; - -class NodeOpTypeFeature::NodeOpTypeFeatureImpl : ScopeBaseFeature { - public: - NodeOpTypeFeatureImpl(std::string nodeType, int num, int step = 0) - : node_type_(nodeType), num_(num), step_(step) {} - ~NodeOpTypeFeatureImpl() {} - bool Match(const Scope *scope) override; - - public: - std::string node_type_; // Node type - int num_; // Node number - int step_; // step -}; - -class NodeAttrFeature::NodeAttrFeatureImpl : ScopeBaseFeature { - public: - NodeAttrFeatureImpl(std::string nodeType, std::string attr_name, ge::DataType datatype, ScopeAttrValue &attr_value) - : node_type_(nodeType), attr_name_(attr_name), datatype_(datatype), attr_value_(attr_value) {} - ~NodeAttrFeatureImpl() {} - bool Match(const Scope *scope) override; - - public: - std::string node_type_; // Node type - std::string attr_name_; // attribute name - ge::DataType datatype_; // datatype - ScopeAttrValue attr_value_; // AttrValue -}; - -class ScopeFeature::ScopeFeatureImpl : ScopeBaseFeature { - public: - ScopeFeatureImpl(std::string sub_type, int32_t num, std::string suffix = "", - std::string sub_scope_mask = "", int step = 0) - : sub_type_(sub_type), num_(num), suffix_(suffix), sub_scope_mask_(sub_scope_mask), step_(step) {} - ~ScopeFeatureImpl() {} - bool Match(const Scope *scope) override; - bool SubScopesMatch(const std::vector &scopes); - - public: - std::string sub_type_; - int32_t num_; - std::string suffix_; - std::string sub_scope_mask_; - int step_; -}; - -class ScopePattern::ScopePatternImpl { - public: - ScopePatternImpl() {} - ~ScopePatternImpl() {} - bool Match(const Scope *scope) const; - void SetSubType(const std::string &sub_type); - const std::string &SubType() const { return sub_type_; } - void AddNodeOpTypeFeature(NodeOpTypeFeature &feature); - void AddNodeAttrFeature(NodeAttrFeature &feature); - void AddScopeFeature(ScopeFeature &feature); - - private: - std::string sub_type_; // get Scope sub type - std::vector node_optype_features_; - std::vector node_attr_features_; - std::vector scopes_features_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_PATTERN_IMPL_H_ \ No newline at end of file diff --git a/inc/metadef/inc/register/tensor_assign.h b/inc/metadef/inc/register/tensor_assign.h deleted file mode 100644 index 57a37f6c2..000000000 --- a/inc/metadef/inc/register/tensor_assign.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TENSOR_ASSIGN_H_ -#define TENSOR_ASSIGN_H_ - -#include "graph/ge_tensor.h" -#include "proto/tensorflow/tensor.pb.h" - -namespace domi { -using GeTensorPtr = std::shared_ptr; -using Status = uint32_t; -using domi::tensorflow::TensorProto; -using google::protobuf::int32; -using google::protobuf::int64; - -class TensorAssign { - public: - static Status SetGeTensor(const TensorProto &tensor, GeTensorPtr &weight); - - static Status SetGeTensorDataType(int64_t dataType, GeTensorPtr &weight); - - static ge::DataType ConvertTensorflowDataType(uint32_t tf_data_type); - - private: - static bool CheckBoolVal(tensorflow::DataType data_type); - - static bool CheckHalfVal(tensorflow::DataType data_type); - - static bool CheckFloatVal(tensorflow::DataType data_type); - - static bool CheckDoubleVal(tensorflow::DataType data_type); - - static bool CheckComplex64Val(tensorflow::DataType data_type); - - static bool CheckComplex128Val(tensorflow::DataType data_type); - - static bool CheckStringVal(tensorflow::DataType data_type); - - static bool CheckByte(tensorflow::DataType data_type); - - static bool CheckDoubleByte(tensorflow::DataType data_type); - - static bool CheckSignedFourByte(tensorflow::DataType data_type); - - static bool CheckUnsignedFourByte(tensorflow::DataType data_type); - - static bool CheckSignedEightByte(tensorflow::DataType data_type); - - static bool CheckUnsignedEightByte(tensorflow::DataType data_type); - - static Status GetDoubleByteVal(int32_t val_size, const google::protobuf::RepeatedField &val_vector, int count, - GeTensorPtr &weight); - static Status GetByteVal(int32_t val_size, const google::protobuf::RepeatedField &val_vector, int count, - GeTensorPtr &weight); - - static Status GetStringVal(int32_t val_size, const google::protobuf::RepeatedPtrField &val_vector, - int count, GeTensorPtr &weight); - - static void SetGeTensorWeightData(const TensorProto &tensor, int32_t val_size, int count, GeTensorPtr &weight); - - static void SetWeightData(tensorflow::DataType data_type, int count, const std::string &tensor_content, - GeTensorPtr &weight); - - template - static Status GetVal(int32_t val_size, const google::protobuf::RepeatedField &val_vector, int count, - GeTensorPtr &weight) { - bool zerosLike = (count != val_size && val_size == 1); - T *addr = new (std::nothrow) T[count](); - GE_CHECK_NOTNULL(addr); - int minCount = (count > val_size) ? val_size : count; - if (!zerosLike) { - for (int32_t i = 0; i < minCount; i++) { - *(addr + i) = val_vector.Get(i); - } - for (int32_t i = minCount; i < count; i++) { - *(addr + i) = val_vector.Get(minCount - 1); - } - } else { - for (int32_t i = 0; i < count; i++) { - *(addr + i) = val_vector.Get(0); - } - } - (void)weight->SetData(reinterpret_cast(addr), count * sizeof(T)); - GE_DELETE_NEW_ARRAY(addr); - return SUCCESS; - } -}; -} // namespace domi -#endif // TENSOR_ASSIGN_H_ diff --git a/inc/mmpa/mmpa_api.h b/inc/mmpa/mmpa_api.h deleted file mode 100644 index ef7af4153..000000000 --- a/inc/mmpa/mmpa_api.h +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _MMPA_API_H_ -#define _MMPA_API_H_ - -#define LINUX 0 -#define WIN 1 - -#if(OS_TYPE == LINUX) //lint !e553 - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#ifdef FUNC_VISIBILITY -#define MMPA_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define MMPA_FUNC_VISIBILITY -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "securec.h" - -#include "./sub_inc/mmpa_typedef_linux.h" -#include "./sub_inc/mmpa_linux.h" - -#endif - - -#if(OS_TYPE == WIN) // lint !e553 - -#ifdef FUNC_VISIBILITY -#define MMPA_FUNC_VISIBILITY _declspec(dllexport) -#else -#define MMPA_FUNC_VISIBILITY -#endif - -#include -#include -#include "Windows.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "shlwapi.h" -#include -#include -#include -#include -#include -#include -#include -#include - -#include "securec.h" - -#include "sub_inc/mmpa_typedef_win.h" -#include "sub_inc/mmpa_win.h" - -#pragma comment(lib, "ws2_32.lib") -#pragma comment(lib, "mswsock.lib") -#pragma comment(lib, "Kernel32.lib") -#pragma comment(lib, "shlwapi.lib") -#pragma comment(lib, "wbemuuid.lib") -#pragma comment(lib, "Iphlpapi.lib") -#endif - -#endif // MMPA_API_H_ - diff --git a/inc/mmpa/sub_inc/mmpa_linux.h b/inc/mmpa/sub_inc/mmpa_linux.h deleted file mode 100644 index 993f36ba9..000000000 --- a/inc/mmpa/sub_inc/mmpa_linux.h +++ /dev/null @@ -1,561 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MMPA_LINUX_MMPA_LINUX_H -#define MMPA_LINUX_MMPA_LINUX_H - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif // __cpluscplus -#endif // __cpluscplus - -#define MMPA_MACINFO_DEFAULT_SIZE 18 -#define MMPA_CPUDESC_DEFAULT_SIZE 64 - -typedef pthread_t mmThread; -typedef pthread_mutex_t mmMutex_t; -typedef pthread_cond_t mmCond; -typedef pthread_mutex_t mmMutexFC; -typedef pthread_rwlock_t mmRWLock_t; -typedef signed int mmProcess; -typedef int mmPollHandle; -typedef int mmPipeHandle; -typedef int mmFileHandle; -typedef int mmComPletionKey; -typedef int mmCompletionHandle; -typedef int mmErrorMsg; -typedef int mmFd_t; - -typedef VOID *mmExitCode; -typedef key_t mmKey_t; -typedef int mmMsgid; -typedef struct dirent mmDirent; -typedef struct dirent mmDirent2; -typedef struct shmid_ds mmshmId_ds; -typedef int (*mmFilter)(const mmDirent *entry); -typedef int (*mmFilter2)(const mmDirent2 *entry); -typedef int (*mmSort)(const mmDirent **a, const mmDirent **b); -typedef int (*mmSort2)(const mmDirent2 **a, const mmDirent2 **b); -typedef size_t mmSize_t; //lint !e410 !e1051 -typedef off_t mmOfft_t; -typedef pid_t mmPid_t; -typedef long MM_LONG; - -typedef VOID *(*userProcFunc)(VOID *pulArg); - -typedef struct { - userProcFunc procFunc; // Callback function pointer - VOID *pulArg; // Callback function parameters -} mmUserBlock_t; - -typedef struct { - const char *dli_fname; - void *dli_fbase; - const char *dli_sname; - void *dli_saddr; - size_t dli_size; /* ELF only */ - int dli_bind; /* ELF only */ - int dli_type; -} mmDlInfo; - -typedef struct { - int wSecond; // Seconds. [0-60] (1 leap second) - int wMinute; // Minutes. [0-59] - int wHour; // Hours. [0-23] - int wDay; // Day. [1-31] - int wMonth; // Month. [1-12] - int wYear; // Year - int wDayOfWeek; // Day of week. [0-6] - int tm_yday; // Days in year.[0-365] - int tm_isdst; // DST. [-1/0/1] - long int wMilliseconds; // milliseconds -} mmSystemTime_t; - -typedef sem_t mmSem_t; -typedef struct sockaddr mmSockAddr; -typedef socklen_t mmSocklen_t; -typedef int mmSockHandle; -typedef timer_t mmTimer; -typedef pthread_key_t mmThreadKey; - -typedef int mmOverLap; - -typedef ssize_t mmSsize_t; -typedef size_t mmSize; // size - -typedef struct { - UINT32 createFlag; - INT32 oaFlag; -} mmCreateFlag; - -typedef struct { - VOID *sendBuf; - INT32 sendLen; -} mmIovSegment; -typedef struct in_addr mmInAddr; - -typedef struct { - VOID *inbuf; - INT32 inbufLen; - VOID *outbuf; - INT32 outbufLen; - mmOverLap *oa; -} mmIoctlBuf; - -typedef int mmAtomicType; -typedef int mmAtomicType64; - -typedef enum { - pollTypeRead = 1, // pipe read - pollTypeRecv, // socket recv - pollTypeIoctl, // ioctl -} mmPollType; - -typedef struct { - mmPollHandle handle; // The file descriptor or handle of poll is required - mmPollType pollType; // Operation type requiring poll - // read or recv or ioctl - INT32 ioctlCode; // IOCTL operation code, dedicated to IOCTL - mmComPletionKey completionKey; // The default value is blank, which is used in windows - // The data used to receive the difference between which handle is readable -} mmPollfd; - -typedef struct { - VOID *priv; // User defined private content - mmPollHandle bufHandle; // Value of handle corresponding to buf - mmPollType bufType; // Data types polled to - VOID *buf; // Data used in poll - UINT32 bufLen; // Data length used in poll - UINT32 bufRes; // Actual return length -} mmPollData, *pmmPollData; - -typedef VOID (*mmPollBack)(pmmPollData); - -typedef struct { - INT32 tz_minuteswest; // How many minutes is it different from Greenwich - INT32 tz_dsttime; // type of DST correction -} mmTimezone; - -typedef struct { - LONG tv_sec; - LONG tv_usec; -} mmTimeval; - -typedef struct { - MM_LONG tv_sec; - MM_LONG tv_nsec; -} mmTimespec; - -typedef struct { - ULONGLONG totalSize; - ULONGLONG freeSize; - ULONGLONG availSize; -} mmDiskSize; - -#define mmTLS __thread -typedef struct stat mmStat_t; -typedef struct stat64 mmStat64_t; -typedef mode_t mmMode_t; - -typedef struct option mmStructOption; - -typedef struct { - char addr[MMPA_MACINFO_DEFAULT_SIZE]; // ex:aa-bb-cc-dd-ee-ff\0 -} mmMacInfo; - -typedef struct { - char **argv; - INT32 argvCount; - char **envp; - INT32 envpCount; -} mmArgvEnv; - -typedef struct { - char arch[MMPA_CPUDESC_DEFAULT_SIZE]; - char manufacturer[MMPA_CPUDESC_DEFAULT_SIZE]; // vendor - char version[MMPA_CPUDESC_DEFAULT_SIZE]; // modelname - INT32 frequency; // cpu frequency - INT32 maxFrequency; // max speed - INT32 ncores; // cpu cores - INT32 nthreads; // cpu thread count - INT32 ncounts; // logical cpu nums -} mmCpuDesc; - -typedef mode_t MODE; - -typedef struct { - INT32 detachFlag; // Determine whether to set separation property 0, not to separate 1 - INT32 priorityFlag; // Determine whether to set priority 0 and not set 1 - INT32 priority; // Priority value range to be set 1-99 - INT32 policyFlag; // Set scheduling policy or not 0 do not set 1 setting - INT32 policy; // Scheduling policy value value - // MMPA_THREAD_SCHED_RR - // MMPA_THREAD_SCHED_OTHER - // MMPA_THREAD_SCHED_FIFO - INT32 stackFlag; // Set stack size or not: 0 does not set 1 setting - UINT32 stackSize; // The stack size unit bytes to be set cannot be less than MMPA_THREAD_STACK_MIN -} mmThreadAttr; - -#ifdef __ANDROID__ -#define S_IREAD S_IRUSR -#define S_IWRITE S_IWUSR -#endif - -#define mm_no_argument no_argument -#define mm_required_argument required_argument -#define mm_optional_argument optional_argument - -#define M_FILE_RDONLY O_RDONLY -#define M_FILE_WRONLY O_WRONLY -#define M_FILE_RDWR O_RDWR -#define M_FILE_CREAT O_CREAT - -#define M_RDONLY O_RDONLY -#define M_WRONLY O_WRONLY -#define M_RDWR O_RDWR -#define M_CREAT O_CREAT -#define M_BINARY O_RDONLY -#define M_TRUNC O_TRUNC -#define M_IRWXU S_IRWXU -#define M_APPEND O_APPEND - -#define M_IN_CREATE IN_CREATE -#define M_IN_CLOSE_WRITE IN_CLOSE_WRITE -#define M_IN_IGNORED IN_IGNORED - -#define M_OUT_CREATE IN_CREATE -#define M_OUT_CLOSE_WRITE IN_CLOSE_WRITE -#define M_OUT_IGNORED IN_IGNORED -#define M_OUT_ISDIR IN_ISDIR - -#define M_IREAD S_IREAD -#define M_IRUSR S_IRUSR -#define M_IWRITE S_IWRITE -#define M_IWUSR S_IWUSR -#define M_IXUSR S_IXUSR -#define FDSIZE 64 -#define M_MSG_CREAT IPC_CREAT -#define M_MSG_EXCL (IPC_CREAT | IPC_EXCL) -#define M_MSG_NOWAIT IPC_NOWAIT - -#define M_WAIT_NOHANG WNOHANG // Non blocking waiting -#define M_WAIT_UNTRACED \ - WUNTRACED // If the subprocess enters the suspended state, it will return immediately - // But the end state of the subprocess is ignored -#define M_UMASK_USRREAD S_IRUSR -#define M_UMASK_GRPREAD S_IRGRP -#define M_UMASK_OTHREAD S_IROTH - -#define M_UMASK_USRWRITE S_IWUSR -#define M_UMASK_GRPWRITE S_IWGRP -#define M_UMASK_OTHWRITE S_IWOTH - -#define M_UMASK_USREXEC S_IXUSR -#define M_UMASK_GRPEXEC S_IXGRP -#define M_UMASK_OTHEXEC S_IXOTH - -#define mmConstructor(x) __attribute__((constructor)) VOID x() -#define mmDestructor(x) __attribute__((destructor)) VOID x() - -#define MMPA_NO_ARGUMENT 0 -#define MMPA_REQUIRED_ARGUMENT 1 -#define MMPA_OPTIONAL_ARGUMENT 2 - -#define MMPA_MAX_PATH PATH_MAX -#define M_NAME_MAX MAX_FNAME - -#define M_F_OK F_OK -#define M_X_OK X_OK -#define M_W_OK W_OK -#define M_R_OK R_OK - - -#define MM_DT_DIR DT_DIR -#define MM_DT_REG DT_REG - -#define MMPA_STDIN STDIN_FILENO -#define MMPA_STDOUT STDOUT_FILENO -#define MMPA_STDERR STDERR_FILENO - -#define MMPA_RTLD_NOW RTLD_NOW -#define MMPA_RTLD_GLOBAL RTLD_GLOBAL -#define MMPA_RTLD_LAZY RTLD_LAZY -#define MMPA_RTLD_NODELETE RTLD_NODELETE - -#define MMPA_DL_EXT_NAME ".so" - -MMPA_FUNC_VISIBILITY INT32 mmCreateTask(mmThread *threadHandle, mmUserBlock_t *funcBlock); -MMPA_FUNC_VISIBILITY INT32 mmJoinTask(mmThread *threadHandle); -MMPA_FUNC_VISIBILITY INT32 mmMutexInit(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexLock(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexTryLock(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexUnLock(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexDestroy(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondInit(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmCondLockInit(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondLock(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondUnLock(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondLockDestroy(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmRWLockInit(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockRDLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockTryRDLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockWRLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockTryWRLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRDLockUnLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmWRLockUnLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockDestroy(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmCondWait(mmCond *cond, mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondTimedWait(mmCond *cond, mmMutexFC *mutex, UINT32 milliSecond); -MMPA_FUNC_VISIBILITY INT32 mmCondNotify(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmCondNotifyAll(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmCondDestroy(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmGetPid(); -MMPA_FUNC_VISIBILITY INT32 mmGetTid(); -MMPA_FUNC_VISIBILITY INT32 mmGetPidHandle(mmProcess *processHandle); -MMPA_FUNC_VISIBILITY INT32 mmGetLocalTime(mmSystemTime_t *sysTime); -MMPA_FUNC_VISIBILITY INT32 mmGetSystemTime(mmSystemTime_t *sysTime); - -MMPA_FUNC_VISIBILITY INT32 mmSemInit(mmSem_t *sem, UINT32 value); -MMPA_FUNC_VISIBILITY INT32 mmSemWait(mmSem_t *sem); -MMPA_FUNC_VISIBILITY INT32 mmSemPost(mmSem_t *sem); -MMPA_FUNC_VISIBILITY INT32 mmSemDestroy(mmSem_t *sem); -MMPA_FUNC_VISIBILITY INT32 mmOpen(const CHAR *pathName, INT32 flags); -MMPA_FUNC_VISIBILITY INT32 mmOpen2(const CHAR *pathName, INT32 flags, MODE mode); -MMPA_FUNC_VISIBILITY FILE *mmPopen(CHAR *command, CHAR *type); -MMPA_FUNC_VISIBILITY INT32 mmClose(INT32 fd); -MMPA_FUNC_VISIBILITY INT32 mmPclose(FILE *stream); -MMPA_FUNC_VISIBILITY mmSsize_t mmWrite(INT32 fd, VOID *buf, UINT32 bufLen); -MMPA_FUNC_VISIBILITY mmSsize_t mmRead(INT32 fd, VOID *buf, UINT32 bufLen); -MMPA_FUNC_VISIBILITY mmSockHandle mmSocket(INT32 sockFamily, INT32 type, INT32 protocol); -MMPA_FUNC_VISIBILITY INT32 mmBind(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); -MMPA_FUNC_VISIBILITY INT32 mmListen(mmSockHandle sockFd, INT32 backLog); -MMPA_FUNC_VISIBILITY mmSockHandle mmAccept(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t *addrLen); -MMPA_FUNC_VISIBILITY INT32 mmConnect(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); -MMPA_FUNC_VISIBILITY INT32 mmCloseSocket(mmSockHandle sockFd); -MMPA_FUNC_VISIBILITY mmSsize_t mmSocketSend(mmSockHandle sockFd, VOID *sendBuf, INT32 sendLen, INT32 sendFlag); -MMPA_FUNC_VISIBILITY mmSsize_t mmSocketRecv(mmSockHandle sockFd, VOID *recvBuf, INT32 recvLen, INT32 recvFlag); -MMPA_FUNC_VISIBILITY INT32 mmSocketSendTo(mmSockHandle sockFd, - VOID *sendMsg, - INT32 sendLen, - UINT32 sendFlag, - const mmSockAddr* addr, - INT32 tolen); -MMPA_FUNC_VISIBILITY mmSsize_t mmSocketRecvFrom(mmSockHandle sockFd, - VOID *recvBuf, - mmSize recvLen, - UINT32 recvFlag, - mmSockAddr* addr, - mmSocklen_t *FromLen); -MMPA_FUNC_VISIBILITY INT32 mmSAStartup(); -MMPA_FUNC_VISIBILITY INT32 mmSACleanup(); -MMPA_FUNC_VISIBILITY VOID *mmDlopen(const CHAR *fileName, INT32 mode); -MMPA_FUNC_VISIBILITY INT32 mmDladdr(VOID *addr, mmDlInfo *info); -MMPA_FUNC_VISIBILITY VOID *mmDlsym(VOID *handle, const CHAR *funcName); -MMPA_FUNC_VISIBILITY INT32 mmDlclose(VOID *handle); -MMPA_FUNC_VISIBILITY CHAR *mmDlerror(); -MMPA_FUNC_VISIBILITY INT32 mmCreateAndSetTimer(mmTimer *timerHandle, - mmUserBlock_t *timerBlock, - UINT milliSecond, - UINT period); -MMPA_FUNC_VISIBILITY INT32 mmDeleteTimer(mmTimer timerHandle); -MMPA_FUNC_VISIBILITY INT32 mmStatGet(const CHAR *path, mmStat_t *buffer); -MMPA_FUNC_VISIBILITY INT32 mmStat64Get(const CHAR *path, mmStat64_t *buffer); -MMPA_FUNC_VISIBILITY INT32 mmFStatGet(INT32 fd, mmStat_t *buffer); -MMPA_FUNC_VISIBILITY INT32 mmMkdir(const CHAR *pathName, mmMode_t mode); -MMPA_FUNC_VISIBILITY INT32 mmSleep(UINT32 milliSecond); - -MMPA_FUNC_VISIBILITY INT32 mmCreateTaskWithAttr(mmThread *threadHandle, mmUserBlock_t *funcBlock); -MMPA_FUNC_VISIBILITY INT32 mmGetProcessPrio(mmProcess pid); -MMPA_FUNC_VISIBILITY INT32 mmSetProcessPrio(mmProcess pid, INT32 processPrio); -MMPA_FUNC_VISIBILITY INT32 mmGetThreadPrio(mmThread *threadHandle); -MMPA_FUNC_VISIBILITY INT32 mmSetThreadPrio(mmThread *threadHandle, INT32 threadPrio); -MMPA_FUNC_VISIBILITY INT32 mmAccess(const CHAR *pathName); -MMPA_FUNC_VISIBILITY INT32 mmAccess2(const CHAR *pathName, INT32 mode); -MMPA_FUNC_VISIBILITY INT32 mmRmdir(const CHAR *pathName); - -MMPA_FUNC_VISIBILITY INT32 mmIoctl(mmProcess fd, INT32 ioctlCode, mmIoctlBuf *bufPtr); -MMPA_FUNC_VISIBILITY INT32 mmSemTimedWait(mmSem_t *sem, INT32 timeout); -MMPA_FUNC_VISIBILITY mmSsize_t mmWritev(mmProcess fd, mmIovSegment *iov, INT32 iovcnt); -MMPA_FUNC_VISIBILITY VOID mmMb(); -MMPA_FUNC_VISIBILITY INT32 mmInetAton(const CHAR *addrStr, mmInAddr *addr); - -MMPA_FUNC_VISIBILITY mmProcess mmOpenFile(const CHAR *fileName, UINT32 access, mmCreateFlag fileFlag); -MMPA_FUNC_VISIBILITY mmSsize_t mmReadFile(mmProcess fileId, VOID *buffer, INT32 len); -MMPA_FUNC_VISIBILITY mmSsize_t mmWriteFile(mmProcess fileId, VOID *buffer, INT32 len); -MMPA_FUNC_VISIBILITY INT32 mmCloseFile(mmProcess fileId); - -MMPA_FUNC_VISIBILITY mmAtomicType mmSetData(mmAtomicType *ptr, mmAtomicType value); -MMPA_FUNC_VISIBILITY mmAtomicType mmValueInc(mmAtomicType *ptr, mmAtomicType value); -MMPA_FUNC_VISIBILITY mmAtomicType mmValueSub(mmAtomicType *ptr, mmAtomicType value); -MMPA_FUNC_VISIBILITY mmAtomicType64 mmSetData64(mmAtomicType64 *ptr, mmAtomicType64 value); -MMPA_FUNC_VISIBILITY mmAtomicType64 mmValueInc64(mmAtomicType64 *ptr, mmAtomicType64 value); -MMPA_FUNC_VISIBILITY mmAtomicType64 mmValueSub64(mmAtomicType64 *ptr, mmAtomicType64 value); -MMPA_FUNC_VISIBILITY INT32 mmCreateTaskWithDetach(mmThread *threadHandle, mmUserBlock_t *funcBlock); - -// The following 3 interfaces are to be deleted -MMPA_FUNC_VISIBILITY INT32 mmCreateNamedPipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); -MMPA_FUNC_VISIBILITY INT32 mmOpenNamePipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); -MMPA_FUNC_VISIBILITY VOID mmCloseNamedPipe(mmPipeHandle namedPipe[]); - -MMPA_FUNC_VISIBILITY INT32 mmCreatePipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); -MMPA_FUNC_VISIBILITY INT32 mmOpenPipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); -MMPA_FUNC_VISIBILITY VOID mmClosePipe(mmPipeHandle pipe[], UINT32 pipeCount); - -// Poll related interface -MMPA_FUNC_VISIBILITY mmCompletionHandle mmCreateCompletionPort(); -MMPA_FUNC_VISIBILITY VOID mmCloseCompletionPort(mmCompletionHandle handle); -MMPA_FUNC_VISIBILITY INT32 mmPoll(mmPollfd *fds, - INT32 fdCount, - INT32 timeout, - mmCompletionHandle handleIOCP, - pmmPollData polledData, - mmPollBack pollBack); -MMPA_FUNC_VISIBILITY INT32 mmGetErrorCode(); -MMPA_FUNC_VISIBILITY CHAR *mmGetErrorFormatMessage(mmErrorMsg errnum, CHAR *buf, mmSize size); -MMPA_FUNC_VISIBILITY INT32 mmGetTimeOfDay(mmTimeval *timeVal, mmTimezone *timeZone); -MMPA_FUNC_VISIBILITY mmTimespec mmGetTickCount(); -MMPA_FUNC_VISIBILITY INT32 mmGetRealPath(CHAR *path, CHAR *realPath); -MMPA_FUNC_VISIBILITY INT32 mmRealPath(const CHAR *path, CHAR *realPath, INT32 realPathLen); - -MMPA_FUNC_VISIBILITY INT32 mmDup2(INT32 oldFd, INT32 newFd); - -MMPA_FUNC_VISIBILITY INT32 mmDup(INT32 fd); - -MMPA_FUNC_VISIBILITY INT32 mmUnlink(const CHAR *filename); - -MMPA_FUNC_VISIBILITY INT32 mmChmod(const CHAR *filename, INT32 mode); - -MMPA_FUNC_VISIBILITY INT32 mmFileno(FILE *stream); - -MMPA_FUNC_VISIBILITY INT32 mmScandir(const CHAR *path, mmDirent ***entryList, mmFilter filterFunc, mmSort sort); -MMPA_FUNC_VISIBILITY INT32 mmScandir2(const CHAR *path, mmDirent2 ***entryList, mmFilter2 filterFunc, mmSort2 sort); - -MMPA_FUNC_VISIBILITY VOID mmScandirFree(mmDirent **entryList, INT32 count); -MMPA_FUNC_VISIBILITY VOID mmScandirFree2(mmDirent2 **entryList, INT32 count); - -MMPA_FUNC_VISIBILITY mmMsgid mmMsgCreate(mmKey_t key, INT32 msgFlag); - -MMPA_FUNC_VISIBILITY mmMsgid mmMsgOpen(mmKey_t key, INT32 msgFlag); - -MMPA_FUNC_VISIBILITY INT32 mmMsgSnd(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); - -MMPA_FUNC_VISIBILITY INT32 mmMsgRcv(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); - -MMPA_FUNC_VISIBILITY INT32 mmMsgClose(mmMsgid msqid); - -MMPA_FUNC_VISIBILITY INT32 mmLocalTimeR(const time_t *timep, struct tm *result); - -MMPA_FUNC_VISIBILITY INT32 mmGetOptErr(); -MMPA_FUNC_VISIBILITY VOID mmSetOptErr(INT32 mmOptErr); -MMPA_FUNC_VISIBILITY INT32 mmGetOptInd(); -MMPA_FUNC_VISIBILITY VOID mmSetOptInd(INT32 mmOptInd); -MMPA_FUNC_VISIBILITY INT32 mmGetOptOpt(); -MMPA_FUNC_VISIBILITY VOID mmSetOpOpt(INT32 mmOptOpt); -MMPA_FUNC_VISIBILITY CHAR *mmGetOptArg(); -MMPA_FUNC_VISIBILITY VOID mmSetOptArg(CHAR *mmOptArg); -MMPA_FUNC_VISIBILITY INT32 mmGetOpt(INT32 argc, char *const *argv, const char *opts); -MMPA_FUNC_VISIBILITY INT32 mmGetOptLong(INT32 argc, - char *const *argv, - const char *opts, - const mmStructOption *longOpts, - INT32 *longIndex); - -MMPA_FUNC_VISIBILITY LONG mmLseek(INT32 fd, INT64 offset, INT32 seekFlag); -MMPA_FUNC_VISIBILITY INT32 mmFtruncate(mmProcess fd, UINT32 length); - -MMPA_FUNC_VISIBILITY INT32 mmTlsCreate(mmThreadKey *key, VOID (*destructor)(VOID *)); -MMPA_FUNC_VISIBILITY INT32 mmTlsSet(mmThreadKey key, const VOID *value); -MMPA_FUNC_VISIBILITY VOID *mmTlsGet(mmThreadKey key); -MMPA_FUNC_VISIBILITY INT32 mmTlsDelete(mmThreadKey key); -MMPA_FUNC_VISIBILITY INT32 mmGetOsType(); - -MMPA_FUNC_VISIBILITY INT32 mmFsync(mmProcess fd); -MMPA_FUNC_VISIBILITY INT32 mmFsync2(INT32 fd); -MMPA_FUNC_VISIBILITY INT32 mmChdir(const CHAR *path); -MMPA_FUNC_VISIBILITY INT32 mmUmask(INT32 pmode); -MMPA_FUNC_VISIBILITY INT32 mmThreadKill(mmThread id); -MMPA_FUNC_VISIBILITY INT32 mmWaitPid(mmProcess pid, INT32 *status, INT32 options); - -MMPA_FUNC_VISIBILITY INT32 mmGetCwd(CHAR *buffer, INT32 maxLen); -MMPA_FUNC_VISIBILITY INT32 mmGetEnv(const CHAR *name, CHAR *value, UINT32 len); -MMPA_FUNC_VISIBILITY INT32 mmSetEnv(const CHAR *name, const CHAR *value, INT32 overwrite); -MMPA_FUNC_VISIBILITY CHAR *mmStrTokR(CHAR *str, const CHAR *delim, CHAR **saveptr); -MMPA_FUNC_VISIBILITY CHAR *mmDirName(CHAR *path); -MMPA_FUNC_VISIBILITY CHAR *mmBaseName(CHAR *path); -MMPA_FUNC_VISIBILITY INT32 mmGetDiskFreeSpace(const char *path, mmDiskSize *diskSize); - -/* - * Function: set the thread name created by mmcreatetask - * Input: pstThreadHandle: thread ID - * name: thread name, the actual length of name must be < MMPA_THREADNAME_SIZE - * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the - * execution failure returns EN_ERROR - */ -MMPA_FUNC_VISIBILITY INT32 mmSetThreadName(mmThread *threadHandle, const CHAR *name); - -/* - * Function: get thread name - * Input: pstThreadHandle: thread ID - * size: Cache length of thread name - * name:User allocated cache for thread name, Cache length must be >= MMPA_THREADNAME_SIZE - * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the - * execution failure returns EN_ERROR - */ -MMPA_FUNC_VISIBILITY INT32 mmGetThreadName(mmThread *threadHandle, CHAR *name, INT32 size); -/* - * Function:Set the thread name of the currently executing thread - call inside the thread body - * Input:name:Thread name to be set - * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the - * execution failure returns EN_ERROR - */ -MMPA_FUNC_VISIBILITY INT32 mmSetCurrentThreadName(const CHAR *name); -/* - * Function:Get the thread name of the currently executing thread - in body call - * Input:name:The name of the thread to get, and the cache is allocated by the user,size>=MMPA_THREADNAME_SIZE - * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the - * execution failure returns EN_ERROR - */ -MMPA_FUNC_VISIBILITY INT32 mmGetCurrentThreadName(CHAR *name, INT32 size); -MMPA_FUNC_VISIBILITY INT32 mmGetFileSize(const CHAR *fileName, ULONGLONG *length); -MMPA_FUNC_VISIBILITY INT32 mmIsDir(const CHAR *fileName); -MMPA_FUNC_VISIBILITY INT32 mmGetOsName(CHAR *name, INT32 nameSize); -MMPA_FUNC_VISIBILITY INT32 mmGetOsVersion(CHAR *versionInfo, INT32 versionLength); -MMPA_FUNC_VISIBILITY INT32 mmGetMac(mmMacInfo **list, INT32 *count); -MMPA_FUNC_VISIBILITY INT32 mmGetMacFree(mmMacInfo *list, INT32 count); -MMPA_FUNC_VISIBILITY INT32 mmGetCpuInfo(mmCpuDesc **cpuInfo, INT32 *count); -MMPA_FUNC_VISIBILITY INT32 mmCpuInfoFree(mmCpuDesc *cpuInfo, INT32 count); -MMPA_FUNC_VISIBILITY INT32 mmCreateProcess(const CHAR *fileName, - const mmArgvEnv *env, - const char *stdoutRedirectFile, - mmProcess *id); - -MMPA_FUNC_VISIBILITY INT32 mmCreateTaskWithThreadAttr(mmThread *threadHandle, - const mmUserBlock_t *funcBlock, - const mmThreadAttr *threadAttr); -MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMode_t mode); -MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); -MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); -MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); -#define MMPA_DLL_API - -#ifdef __cplusplus -#if __cplusplus -} -#endif /* __cpluscplus */ -#endif // __cpluscplus - -#endif // MMPA_LINUX_MMPA_LINUX_H_ diff --git a/inc/mmpa/sub_inc/mmpa_typedef_linux.h b/inc/mmpa/sub_inc/mmpa_typedef_linux.h deleted file mode 100644 index 9df5b9cec..000000000 --- a/inc/mmpa/sub_inc/mmpa_typedef_linux.h +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MMPA_TYPEDEF_LINUX_H -#define MMPA_TYPEDEF_LINUX_H - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif // __cpluscplus -#endif // __cpluscplus - -#ifndef FALSE -#define FALSE 0 -#endif - -#ifndef TRUE -#define TRUE 1 -#endif - -typedef unsigned char UINT8; -typedef signed char INT8; -typedef unsigned short UINT16; -typedef signed short INT16; -typedef unsigned int UINT32; -typedef signed int INT32; -typedef unsigned long long UINT64; -typedef signed long long INT64; -typedef float FLOAT; -typedef double DOUBLE; -typedef void VOID; -typedef unsigned char UCHAR; -typedef char CHAR; -typedef unsigned short USHORT; -typedef short SHORT; -typedef unsigned int UINT; -typedef int INT; -typedef unsigned long ULONG; -typedef unsigned long long ULONGLONG; - -typedef long LONG; - -#define HANDLE_INVALID_VALUE (-1) -#define MMPA_MEM_MAX_LEN (0x7fffffff) -#define MMPA_PROCESS_ERROR (0x7fffffff) -#define PATH_SIZE 256 -#define MAX_IOVEC_SIZE 32 -#define MMPA_MAX_SLEEP_MILLSECOND 4294967 -#define MAX_PIPE_COUNT 2 -#define MMPA_PIPE_COUNT 2 -#define MMPA_THREADNAME_SIZE 16 -#define MMPA_MIN_OS_NAME_SIZE 64 -#define MMPA_MIN_OS_VERSION_SIZE 128 - -#define MMPA_ONE_THOUSAND 1000 -#define MMPA_ONE_BILLION 1000000000 -#define MMPA_COMPUTER_BEGIN_YEAR 1900 -#define MMPA_ZERO 0 -#define MMPA_MAX_THREAD_PIO 99 -#define MMPA_MIN_THREAD_PIO 1 -#define MMPA_DEFAULT_PIPE_PERMISSION 0777 -#define MMPA_DEFAULT_MSG_TYPE 1 - -#define MMPA_THREAD_SCHED_RR SCHED_RR -#define MMPA_THREAD_SCHED_FIFO SCHED_FIFO -#define MMPA_THREAD_SCHED_OTHER SCHED_OTHER -#define MMPA_THREAD_MIN_STACK_SIZE PTHREAD_STACK_MIN - -#define MM_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER - -#define MMPA_MAX_NI 19 -#define MMPA_MIN_NI (-20) - -#define EN_OK 0 -#define EN_ERR 1 -#define EN_ERROR (-1) -#define EN_INVALID_PARAM (-2) -#define EN_TIMEOUT (-3) - -#ifdef __cplusplus -#if __cplusplus -} -#endif // __cpluscplus -#endif // __cpluscplus -#endif // MMPA_TYPEDEF_LINUX_H_ diff --git a/inc/mmpa/sub_inc/mmpa_typedef_win.h b/inc/mmpa/sub_inc/mmpa_typedef_win.h deleted file mode 100644 index 8200bea6a..000000000 --- a/inc/mmpa/sub_inc/mmpa_typedef_win.h +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MMPA_TYPEDEF_WIN_H -#define MMPA_TYPEDEF_WIN_H - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif // __cpluscplus -#endif // __cpluscplus - -#ifndef FALSE -#define FALSE 0 -#endif - -#ifndef TRUE -#define TRUE 1 -#endif - -#define EN_OK 0 -#define EN_ERR 1 -#define EN_ERROR (-1) -#define EN_INVALID_PARAM (-2) -#define EN_TIMEOUT (-3) - -#define HANDLE_INVALID_VALUE (-1) -#define INVALID_SOCKET_HANDLE INVALID_SOCKET -#define MMPA_MEM_MAX_LEN (0x7fffffff) -#define MMPA_PROCESS_ERROR (0x7fffffff) - -#define MMPA_ONE_THOUSAND 1000 -#define MMPA_COMPUTER_BEGIN_YEAR 1900 -#define SUMMER_TIME_OR_NOT (-1) -#define MMPA_ZERO 0 -#define MMPA_VALUE_ONE 1 -#define MMPA_SOCKET_MAIN_EDITION 2 -#define MMPA_SOCKET_SECOND_EDITION 0 -#define MMPA_PIPE_BUF_SIZE 1024 -#define MMPA_MAX_SCANDIR_COUNT 1024 -#define MAX_IOVEC_SIZE 32 -#define MMPA_PIPE_COUNT 2 -#define MMPA_THREADNAME_SIZE 16 -#define MMPA_MIN_OS_NAME_SIZE (MAX_COMPUTERNAME_LENGTH + 1) -#define MMPA_MIN_OS_VERSION_SIZE 64 - -#define MMPA_MAX_NI 19 -#define MMPA_MIDDLE_NI 5 -#define MMPA_LOW_NI (-5) -#define MMPA_MIN_NI (-20) -#define MMPA_MAX_FILE 128 - -#define MMPA_MAX_THREAD_PIO 99 -#define MMPA_MIDDLE_THREAD_PIO 66 -#define MMPA_LOW_THREAD_PIO 33 -#define MMPA_MIN_THREAD_PIO 1 - -#define MMPA_THREAD_SCHED_RR 0 -#define MMPA_THREAD_SCHED_FIFO 0 -#define MMPA_THREAD_SCHED_OTHER 0 -#define MMPA_THREAD_MIN_STACK_SIZE 0 - -#define MM_MUTEX_INITIALIZER NULL - -#ifdef __cplusplus -#if __cplusplus -} -#endif // __cpluscplus -#endif // __cpluscplus -#endif // _MMPA_TYPEDEF_WIN_H_ diff --git a/inc/mmpa/sub_inc/mmpa_win.h b/inc/mmpa/sub_inc/mmpa_win.h deleted file mode 100644 index 49e97a5d8..000000000 --- a/inc/mmpa/sub_inc/mmpa_win.h +++ /dev/null @@ -1,566 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MMPA_WIN_MMPA_WIN_H -#define MMPA_WIN_MMPA_WIN_H -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif // __cpluscplus -#endif // __cpluscplus -#ifdef MMPA_DLL -#define MMPA_DLL_API __declspec(dllexport) -#else -#define MMPA_DLL_API __declspec(dllimport) -#endif - -#define MMPA_MACINFO_DEFAULT_SIZE 18 -#define MMPA_CPUDESC_DEFAULT_SIZE 64 - -#pragma section(".CRT$XCU", long, read) -#pragma section(".CRT$XPU", long, read) - -typedef HANDLE mmMutex_t; -typedef HANDLE mmThread; -typedef HANDLE mmProcess; -typedef HANDLE mmPollHandle; -typedef HANDLE mmPipeHandle; -typedef HANDLE mmFileHandle; -typedef HANDLE mmCompletionHandle; -typedef HANDLE mmFd_t; -typedef CRITICAL_SECTION mmMutexFC; -typedef CONDITION_VARIABLE mmCond; - -typedef VOID *(*userProcFunc)(VOID *pulArg); -typedef struct { - userProcFunc procFunc; - VOID *pulArg; -} mmUserBlock_t; - -typedef DWORD mmThreadKey; -typedef SYSTEMTIME mmSystemTime_t; - -typedef HANDLE mmSem_t; -typedef SOCKET mmSockHandle; -typedef SRWLOCK mmRWLock_t; -typedef struct sockaddr mmSockAddr; -typedef int mmSocklen_t; -typedef int mmSemTimeout_t; -typedef long mmAtomicType; -typedef long long mmAtomicType64; -typedef DWORD mmExitCode; -typedef DWORD mmErrorMsg; -typedef int mmKey_t; -typedef HANDLE mmMsgid; -typedef long int mmOfft_t; -typedef int mmPid_t; - -typedef INT32 mmSsize_t; -typedef int mmSize; // size -typedef size_t mmSize_t; -typedef VOID mmshmId_ds; -typedef long long MM_LONG; - -typedef enum { - DT_DIR = FILE_ATTRIBUTE_DIRECTORY, -} mmDtype; - -typedef struct { - unsigned char d_type; - char d_name[MAX_PATH]; // file name -} mmDirent; - -typedef struct { - unsigned long d_type; - char d_name[MAX_PATH]; // file name -} mmDirent2; - -typedef int (*mmFilter)(const mmDirent *entry); -typedef int (*mmFilter2)(const mmDirent2 *entry); -typedef int (*mmSort)(const mmDirent **a, const mmDirent **b); -typedef int (*mmSort2)(const mmDirent2 **a, const mmDirent2 **b); - -typedef struct { - VOID *sendBuf; - INT32 sendLen; -} mmIovSegment; -typedef PVOID mmInAddr; - -typedef enum { - pollTypeRead = 1, // pipeline reading - pollTypeRecv, // socket receive - pollTypeIoctl, // ioctl read -} mmPollType; - -typedef struct { - HANDLE completionHandle; - mmPollType overlapType; - OVERLAPPED oa; -} mmComPletionKey, *pmmComPletionKey; - -typedef struct { - VOID *priv; // User defined private content - mmPollHandle bufHandle; // Value of handle corresponding to buf - mmPollType bufType; // Data types polled to - VOID *buf; - UINT32 bufLen; - UINT32 bufRes; -} mmPollData, *pmmPollData; - -typedef VOID (*mmPollBack)(pmmPollData); -typedef struct { - mmPollHandle handle; // The file descriptor or handle of poll is required - mmPollType pollType; // Operation type requiring poll,read or recv or ioctl - INT32 ioctlCode; // IOCTL operation code, dedicated to IOCTL - mmComPletionKey completionKey; // The default value is blank, which will be used in windows to receive the data with - // different handle -} mmPollfd; - -typedef struct { - OVERLAPPED oa; - HANDLE completionHandle; - WSABUF DataBuf; -} PRE_IO_DATA, *PPRE_IO_DATA; - -typedef OVERLAPPED mmOverLap; - -typedef struct { - UINT32 createFlag; - INT32 oaFlag; // Overlap operation is supported if it is not 0 -} mmCreateFlag; - -typedef struct { - VOID *inbuf; - INT32 inbufLen; - VOID *outbuf; - INT32 outbufLen; - mmOverLap *oa; -} mmIoctlBuf; - -typedef struct { - HANDLE timerQueue; - HANDLE timerHandle; -} mmTimerHandle; - -typedef struct { - LONG tv_sec; - LONG tv_usec; -} mmTimeval; - -typedef struct { - INT32 tz_minuteswest; // How many minutes is it different from Greenwich - INT32 tz_dsttime; // DST correction type -} mmTimezone; - -typedef struct { - MM_LONG tv_sec; - MM_LONG tv_nsec; -} mmTimespec; - -typedef mmTimerHandle mmTimer; - -#define mmTLS __declspec(thread) - -typedef struct stat mmStat_t; -typedef struct _stat64 mmStat64_t; -typedef int mmMode_t; - -typedef int MODE; - -typedef struct { - const char *name; - int has_arg; - int *flag; - int val; -} mmStructOption; - -typedef struct { - ULONGLONG totalSize; - ULONGLONG freeSize; - ULONGLONG availSize; -} mmDiskSize; - -typedef struct { - const char *dli_fname; - void *dli_fbase; - const char *dli_sname; - void *dli_saddr; - size_t dli_size; /* ELF only */ - int dli_bind; /* ELF only */ - int dli_type; -} mmDlInfo; - -typedef struct { - char addr[MMPA_MACINFO_DEFAULT_SIZE]; // ex:aa-bb-cc-dd-ee-ff\0 -} mmMacInfo; - -typedef struct { - char arch[MMPA_CPUDESC_DEFAULT_SIZE]; - char manufacturer[MMPA_CPUDESC_DEFAULT_SIZE]; // vendor - char version[MMPA_CPUDESC_DEFAULT_SIZE]; // modelname - INT32 frequency; // cpu frequency - INT32 maxFrequency; // max speed - INT32 ncores; // cpu cores - INT32 nthreads; // cpu thread count - INT32 ncounts; // logical cpu nums -} mmCpuDesc; - -typedef struct { - char **argv; - INT32 argvCount; - char **envp; - INT32 envpCount; -} mmArgvEnv; - -// Windows currently does not support properties other than thread separation properties -typedef struct { - INT32 detachFlag; // Thread detach property: 0 do not detach 1 detach - INT32 priorityFlag; - INT32 priority; - INT32 policyFlag; - INT32 policy; - INT32 stackFlag; - UINT32 stackSize; -} mmThreadAttr; - -typedef VOID (*mmPf)(VOID); - -#define mm_no_argument 0 -#define mm_required_argument 1 -#define mm_optional_argument 2 - -#define M_FILE_RDONLY GENERIC_READ -#define M_FILE_WRONLY GENERIC_WRITE -#define M_FILE_RDWR (GENERIC_READ | GENERIC_WRITE) -#define M_FILE_CREAT OPEN_ALWAYS - -#define M_RDONLY _O_RDONLY -#define M_WRONLY _O_WRONLY -#define M_RDWR _O_RDWR -#define M_IRWXU _O_RDWR -#define M_CREAT _O_CREAT -#define M_BINARY _O_BINARY -#define M_TRUNC _O_TRUNC -#define M_APPEND _O_APPEND - -#define M_IREAD _S_IREAD -#define M_IRUSR _S_IREAD -#define M_IWRITE _S_IWRITE -#define M_IWUSR _S_IWRITE -#define M_IXUSR 0 - -#define M_IN_CREATE FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME -#define M_IN_CLOSE_WRITE FILE_NOTIFY_CHANGE_LAST_WRITE -#define M_IN_IGNORED FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME - -#define M_OUT_CREATE 0x00000100 -#define M_OUT_CLOSE_WRITE 0x00000008 -#define M_OUT_IGNORED 0x00008000 -#define M_OUT_ISDIR 0x40000000 - -#define M_MSG_CREAT 1 -#define M_MSG_EXCL 2 -#define M_MSG_NOWAIT 3 - -#define M_WAIT_NOHANG 1 -#define M_WAIT_UNTRACED 2 - -#define M_UMASK_USRREAD _S_IREAD -#define M_UMASK_GRPREAD _S_IREAD -#define M_UMASK_OTHREAD _S_IREAD - -#define M_UMASK_USRWRITE _S_IWRITE -#define M_UMASK_GRPWRITE _S_IWRITE -#define M_UMASK_OTHWRITE _S_IWRITE - -#define M_UMASK_USREXEC 0 -#define M_UMASK_GRPEXEC 0 -#define M_UMASK_OTHEXEC 0 - -#define DT_UNKNOWN 0 -#define DT_FIFO 1 -#define DT_CHR 2 -#define DT_BLK 6 -#define DT_REG 8 -#define DT_LNK 10 -#define DT_SOCK 12 -#define DT_WHT 14 -#define MM_DT_DIR 16 -#define MM_DT_REG 32 - -#define mmConstructor(x) __declspec(allocate(".CRT$XCU")) mmPf con = x -#define mmDestructor(x) __declspec(allocate(".CRT$XPU")) mmPf de = x - -#define MMPA_PRINT_ERROR ((opterr) && (*options != ':')) -#define MMPA_FLAG_PERMUTE 0x01 // permute non-options to the end of argv -#define MMPA_FLAG_ALLARGS 0x02 // treat non-options as args to option "-1" -#define MMPA_FLAG_LONGONLY 0x04 // operate as getopt_long_only -// return values -#define MMPA_BADCH (INT32)'?' -#define MMPA_BADARG ((*options == ':') ? (INT32)':' : (INT32)'?') -#define MMPA_INORDER (INT32)1 - -#define MMPA_NO_ARGUMENT 0 -#define MMPA_REQUIRED_ARGUMENT 1 -#define MMPA_OPTIONAL_ARGUMENT 2 - -#define MMPA_EMSG "" -#define MMPA_MAX_PATH MAX_PATH -#define M_NAME_MAX _MAX_FNAME - -#define M_F_OK 0 -#define M_X_OK 1 -#define M_W_OK 2 -#define M_R_OK 4 - -#define MMPA_STDIN stdin -#define MMPA_STDOUT stdout -#define MMPA_STDERR stderr - -#define MMPA_RTLD_NOW 0 -#define MMPA_RTLD_GLOBAL 0 -#define MMPA_RTLD_LAZY 0 -#define MMPA_RTLD_NODELETE 0 - -#define MMPA_DL_EXT_NAME ".dll" - -#define __attribute__(v) - -MMPA_FUNC_VISIBILITY INT32 mmCreateTask(mmThread *threadHandle, mmUserBlock_t *funcBlock); -MMPA_FUNC_VISIBILITY INT32 mmJoinTask(mmThread *threadHandle); -MMPA_FUNC_VISIBILITY INT32 mmMutexInit(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexLock(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexTryLock(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexUnLock(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmMutexDestroy(mmMutex_t *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondInit(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmCondLockInit(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondLock(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondUnLock(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondLockDestroy(mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmRWLockInit(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockRDLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockTryRDLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockWRLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockTryWRLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRDLockUnLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmWRLockUnLock(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmRWLockDestroy(mmRWLock_t *rwLock); -MMPA_FUNC_VISIBILITY INT32 mmCondWait(mmCond *cond, mmMutexFC *mutex); -MMPA_FUNC_VISIBILITY INT32 mmCondTimedWait(mmCond *cond, mmMutexFC *mutex, UINT32 milliSecond); - -MMPA_FUNC_VISIBILITY INT32 mmCondNotify(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmCondNotifyAll(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmCondDestroy(mmCond *cond); -MMPA_FUNC_VISIBILITY INT32 mmGetPid(VOID); -MMPA_FUNC_VISIBILITY INT32 mmGetTid(VOID); -MMPA_FUNC_VISIBILITY INT32 mmGetPidHandle(mmProcess *processHandle); -MMPA_FUNC_VISIBILITY INT32 mmGetLocalTime(mmSystemTime_t *sysTime); -MMPA_FUNC_VISIBILITY INT32 mmGetSystemTime(mmSystemTime_t *sysTime); -MMPA_FUNC_VISIBILITY INT32 mmSemInit(mmSem_t *sem, UINT32 value); -MMPA_FUNC_VISIBILITY INT32 mmSemWait(mmSem_t *sem); -MMPA_FUNC_VISIBILITY INT32 mmSemPost(mmSem_t *sem); -MMPA_FUNC_VISIBILITY INT32 mmSemDestroy(mmSem_t *sem); -MMPA_FUNC_VISIBILITY INT32 mmOpen(const CHAR *pathName, INT32 flags); -MMPA_FUNC_VISIBILITY INT32 mmOpen2(const CHAR *pathName, INT32 flags, MODE mode); -MMPA_FUNC_VISIBILITY FILE *mmPopen(CHAR *command, CHAR *type); -MMPA_FUNC_VISIBILITY INT32 mmClose(INT32 fd); -MMPA_FUNC_VISIBILITY INT32 mmPclose(FILE *stream); -MMPA_FUNC_VISIBILITY mmSsize_t mmWrite(INT32 fd, VOID *buf, UINT32 bufLen); -MMPA_FUNC_VISIBILITY mmSsize_t mmRead(INT32 fd, VOID *buf, UINT32 bufLen); -MMPA_FUNC_VISIBILITY mmSockHandle mmSocket(INT32 sockFamily, INT32 type, INT32 protocol); -MMPA_FUNC_VISIBILITY INT32 mmBind(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); -MMPA_FUNC_VISIBILITY INT32 mmListen(mmSockHandle sockFd, INT32 backLog); -MMPA_FUNC_VISIBILITY mmSockHandle mmAccept(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t *addrLen); -MMPA_FUNC_VISIBILITY INT32 mmConnect(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); -MMPA_FUNC_VISIBILITY INT32 mmCloseSocket(mmSockHandle sockFd); -MMPA_FUNC_VISIBILITY mmSsize_t mmSocketRecv(mmSockHandle sockFd, VOID *recvBuf, INT32 recvLen, INT32 recvFlag); -MMPA_FUNC_VISIBILITY mmSsize_t mmSocketSend(mmSockHandle sockFd, VOID *sendBuf, INT32 sendLen, INT32 sendFlag); -MMPA_FUNC_VISIBILITY INT32 mmSocketSendTo(mmSockHandle sockFd, - VOID *sendMsg, - INT32 sendLen, - UINT32 sendFlag, - const mmSockAddr* addr, - INT32 tolen); -MMPA_FUNC_VISIBILITY mmSsize_t mmSocketRecvFrom(mmSockHandle sockFd, - VOID *recvBuf, - mmSize recvLen, - UINT32 recvFlag, - mmSockAddr* addr, - mmSocklen_t *FromLen); -MMPA_FUNC_VISIBILITY INT32 mmSAStartup(VOID); -MMPA_FUNC_VISIBILITY INT32 mmSACleanup(VOID); -MMPA_FUNC_VISIBILITY VOID *mmDlopen(const CHAR *fileName, INT mode); -MMPA_FUNC_VISIBILITY INT32 mmDladdr(VOID *addr, mmDlInfo *info); -MMPA_FUNC_VISIBILITY VOID *mmDlsym(VOID *handle, const CHAR *fileName); -MMPA_FUNC_VISIBILITY INT32 mmDlclose(VOID *handle); -MMPA_FUNC_VISIBILITY CHAR *mmDlerror(VOID); -MMPA_FUNC_VISIBILITY INT32 - mmCreateAndSetTimer(mmTimer *timerHandle, mmUserBlock_t *timerBlock, UINT milliSecond, UINT period); -MMPA_FUNC_VISIBILITY INT32 mmDeleteTimer(mmTimer timerHandle); -MMPA_FUNC_VISIBILITY INT32 mmStatGet(const CHAR *path, mmStat_t *buffer); -MMPA_FUNC_VISIBILITY INT32 mmStat64Get(const CHAR *path, mmStat64_t *buffer); -MMPA_FUNC_VISIBILITY INT32 mmFStatGet(INT32 fd, mmStat_t *buffer); -MMPA_FUNC_VISIBILITY INT32 mmMkdir(const CHAR *pathName, mmMode_t mode); -MMPA_FUNC_VISIBILITY INT32 mmSleep(UINT32 milliSecond); -MMPA_FUNC_VISIBILITY INT32 mmCreateTaskWithAttr(mmThread *threadHandle, mmUserBlock_t *funcBlock); -MMPA_FUNC_VISIBILITY INT32 mmGetProcessPrio(mmProcess pid); -MMPA_FUNC_VISIBILITY INT32 mmSetProcessPrio(mmProcess pid, INT32 processPrio); -MMPA_FUNC_VISIBILITY INT32 mmGetThreadPrio(mmThread *threadHandle); -MMPA_FUNC_VISIBILITY INT32 mmSetThreadPrio(mmThread *threadHandle, INT32 threadPrio); -MMPA_FUNC_VISIBILITY INT32 mmAccess(const CHAR *pathName); -MMPA_FUNC_VISIBILITY INT32 mmAccess2(const CHAR *pathName, INT32 mode); -MMPA_FUNC_VISIBILITY INT32 mmRmdir(const CHAR *pathName); - -MMPA_FUNC_VISIBILITY INT32 mmIoctl(mmProcess fd, INT32 ioctlCode, mmIoctlBuf *bufPtr); -MMPA_FUNC_VISIBILITY INT32 mmSemTimedWait(mmSem_t *sem, INT32 timeout); -MMPA_FUNC_VISIBILITY mmSsize_t mmWritev(mmSockHandle fd, mmIovSegment *iov, INT32 iovcnt); -MMPA_FUNC_VISIBILITY VOID mmMb(); -MMPA_FUNC_VISIBILITY INT32 mmInetAton(const CHAR *addrStr, mmInAddr *addr); - -MMPA_FUNC_VISIBILITY mmProcess mmOpenFile(const CHAR *fileName, UINT32 access, mmCreateFlag fileFlag); -MMPA_FUNC_VISIBILITY mmSsize_t mmReadFile(mmProcess fileId, VOID *buffer, INT32 len); -MMPA_FUNC_VISIBILITY mmSsize_t mmWriteFile(mmProcess fileId, VOID *buffer, INT32 len); -MMPA_FUNC_VISIBILITY INT32 mmCloseFile(mmProcess fileId); - -MMPA_FUNC_VISIBILITY mmAtomicType mmSetData(mmAtomicType *ptr, mmAtomicType value); -MMPA_FUNC_VISIBILITY mmAtomicType mmValueInc(mmAtomicType *ptr, mmAtomicType value); -MMPA_FUNC_VISIBILITY mmAtomicType mmValueSub(mmAtomicType *ptr, mmAtomicType value); -MMPA_FUNC_VISIBILITY mmAtomicType64 mmSetData64(mmAtomicType64 *ptr, mmAtomicType64 value); -MMPA_FUNC_VISIBILITY mmAtomicType64 mmValueInc64(mmAtomicType64 *ptr, mmAtomicType64 value); -MMPA_FUNC_VISIBILITY mmAtomicType64 mmValueSub64(mmAtomicType64 *ptr, mmAtomicType64 value); -MMPA_FUNC_VISIBILITY INT32 mmCreateTaskWithDetach(mmThread *threadHandle, mmUserBlock_t *funcBlock); - -MMPA_FUNC_VISIBILITY INT32 mmCreateNamedPipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); -MMPA_FUNC_VISIBILITY INT32 mmOpenNamePipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); -MMPA_FUNC_VISIBILITY VOID mmCloseNamedPipe(mmPipeHandle namedPipe[]); - -MMPA_FUNC_VISIBILITY INT32 mmCreatePipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); -MMPA_FUNC_VISIBILITY INT32 mmOpenPipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); -MMPA_FUNC_VISIBILITY VOID mmClosePipe(mmPipeHandle pipe[], UINT32 pipeCount); - -MMPA_FUNC_VISIBILITY mmCompletionHandle mmCreateCompletionPort(); -MMPA_FUNC_VISIBILITY VOID mmCloseCompletionPort(mmCompletionHandle handle); -MMPA_FUNC_VISIBILITY INT32 mmPoll(mmPollfd *fds, INT32 fdCount, INT32 timeout, mmCompletionHandle handleIOCP, - pmmPollData polledData, mmPollBack pollBack); - -MMPA_FUNC_VISIBILITY INT32 mmGetErrorCode(); -MMPA_FUNC_VISIBILITY CHAR *mmGetErrorFormatMessage(mmErrorMsg errnum, CHAR *buf, mmSize size); -MMPA_FUNC_VISIBILITY INT32 mmGetTimeOfDay(mmTimeval *timeVal, mmTimezone *timeZone); -MMPA_FUNC_VISIBILITY mmTimespec mmGetTickCount(); -MMPA_FUNC_VISIBILITY INT32 mmGetRealPath(CHAR *path, CHAR *realPath); - -MMPA_FUNC_VISIBILITY INT32 mmRealPath(const CHAR *path, CHAR *realPath, INT32 realPathLen); - -MMPA_FUNC_VISIBILITY INT32 mmDup2(INT32 oldFd, INT32 newFd); -MMPA_FUNC_VISIBILITY INT32 mmDup(INT32 fd); -MMPA_FUNC_VISIBILITY INT32 mmUnlink(const CHAR *filename); -MMPA_FUNC_VISIBILITY INT32 mmChmod(const CHAR *filename, INT32 mode); -MMPA_FUNC_VISIBILITY INT32 mmFileno(FILE *stream); -MMPA_FUNC_VISIBILITY INT32 mmScandir(const CHAR *path, mmDirent ***entryList, mmFilter filterFunc, mmSort sort); -MMPA_FUNC_VISIBILITY INT32 mmScandir2(const CHAR *path, mmDirent2 ***entryList, mmFilter2 filterFunc, mmSort2 sort); -MMPA_FUNC_VISIBILITY VOID mmScandirFree(mmDirent **entryList, INT32 count); -MMPA_FUNC_VISIBILITY VOID mmScandirFree2(mmDirent2 **entryList, INT32 count); - -MMPA_FUNC_VISIBILITY mmMsgid mmMsgCreate(mmKey_t key, INT32 msgFlag); -MMPA_FUNC_VISIBILITY mmMsgid mmMsgOpen(mmKey_t key, INT32 msgFlag); -MMPA_FUNC_VISIBILITY INT32 mmMsgRcv(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); -MMPA_FUNC_VISIBILITY INT32 mmMsgSnd(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); - -MMPA_FUNC_VISIBILITY INT32 mmMsgClose(mmMsgid msqid); - -MMPA_FUNC_VISIBILITY INT32 mmLocalTimeR(const time_t *timep, struct tm *result); -MMPA_FUNC_VISIBILITY INT32 mmGetOptErr(); -MMPA_FUNC_VISIBILITY VOID mmSetOptErr(INT32 mmOptErr); -MMPA_FUNC_VISIBILITY INT32 mmGetOptInd(); -MMPA_FUNC_VISIBILITY VOID mmSetOptInd(INT32 mmOptInd); -MMPA_FUNC_VISIBILITY INT32 mmGetOptOpt(); -MMPA_FUNC_VISIBILITY VOID mmSetOpOpt(INT32 mmOptOpt); -MMPA_FUNC_VISIBILITY CHAR *mmGetOptArg(); -MMPA_FUNC_VISIBILITY VOID mmSetOptArg(CHAR *mmOptArg); -MMPA_FUNC_VISIBILITY INT32 mmGetOpt(INT32 argc, char *const *argv, const char *opts); -MMPA_FUNC_VISIBILITY INT32 - mmGetOptLong(INT32 argc, CHAR *const *argv, const CHAR *opts, const mmStructOption *longopts, INT32 *longindex); - -MMPA_FUNC_VISIBILITY LONG mmLseek(INT32 fd, INT64 offset, INT32 seekFlag); -MMPA_FUNC_VISIBILITY INT32 mmFtruncate(mmProcess fd, UINT32 length); - -MMPA_FUNC_VISIBILITY INT32 mmTlsCreate(mmThreadKey *key, VOID (*destructor)(VOID *)); -MMPA_FUNC_VISIBILITY INT32 mmTlsSet(mmThreadKey key, const VOID *value); -MMPA_FUNC_VISIBILITY VOID *mmTlsGet(mmThreadKey key); -MMPA_FUNC_VISIBILITY INT32 mmTlsDelete(mmThreadKey key); -MMPA_FUNC_VISIBILITY INT32 mmGetOsType(); - -MMPA_FUNC_VISIBILITY INT32 mmFsync(mmProcess fd); -MMPA_FUNC_VISIBILITY INT32 mmFsync2(INT32 fd); -MMPA_FUNC_VISIBILITY INT32 mmChdir(const CHAR *path); -MMPA_FUNC_VISIBILITY INT32 mmUmask(INT32 pmode); -MMPA_FUNC_VISIBILITY INT32 mmWaitPid(mmProcess pid, INT32 *status, INT32 options); - -MMPA_FUNC_VISIBILITY INT32 mmGetCwd(CHAR *buffer, INT32 maxLen); -MMPA_FUNC_VISIBILITY CHAR *mmStrTokR(CHAR *str, const CHAR *delim, CHAR **saveptr); - -MMPA_FUNC_VISIBILITY INT32 mmGetEnv(const CHAR *name, CHAR *value, UINT32 len); -MMPA_FUNC_VISIBILITY INT32 mmSetEnv(const CHAR *name, const CHAR *value, INT32 overwrite); -MMPA_FUNC_VISIBILITY CHAR *mmDirName(CHAR *path); -MMPA_FUNC_VISIBILITY CHAR *mmBaseName(CHAR *path); -MMPA_FUNC_VISIBILITY INT32 mmGetDiskFreeSpace(const char *path, mmDiskSize *diskSize); - -MMPA_FUNC_VISIBILITY INT32 mmSetThreadName(mmThread *threadHandle, const CHAR *name); -MMPA_FUNC_VISIBILITY INT32 mmGetThreadName(mmThread *threadHandle, CHAR *name, INT32 size); - -/* - * Function: set the thread name of the currently executing thread - internal call of thread, which is not supported - * under Windows temporarily, and is null. - * Input: name: the thread name to be set - * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the - * execution failure returns EN_ERROR - */ -MMPA_FUNC_VISIBILITY INT32 mmSetCurrentThreadName(const CHAR *name); - -/* - * Function: Get the thread name of the currently executing thread - thread body call, not supported under windows, null - * implementation. - * Input:name:The name of the thread to get, and the cache is allocated by the user,size>=MMPA_THREADNAME_SIZE. - * The input parameter error returns EN_INVALID_PARAM, the execution success returns - * EN_OK, and the execution failure returns EN_ERROR - */ -MMPA_FUNC_VISIBILITY INT32 mmGetCurrentThreadName(CHAR *name, INT32 size); - -MMPA_FUNC_VISIBILITY INT32 mmGetFileSize(const CHAR *fileName, ULONGLONG *length); -MMPA_FUNC_VISIBILITY INT32 mmIsDir(const CHAR *fileName); -MMPA_FUNC_VISIBILITY INT32 mmGetOsName(CHAR *name, INT32 nameSize); -MMPA_FUNC_VISIBILITY INT32 mmGetOsVersion(CHAR *versionInfo, INT32 versionLength); -MMPA_FUNC_VISIBILITY INT32 mmGetMac(mmMacInfo **list, INT32 *count); -MMPA_FUNC_VISIBILITY INT32 mmGetMacFree(mmMacInfo *list, INT32 count); -MMPA_FUNC_VISIBILITY INT32 mmGetCpuInfo(mmCpuDesc **cpuInfo, INT32 *count); -MMPA_FUNC_VISIBILITY INT32 mmCpuInfoFree(mmCpuDesc *cpuInfo, INT32 count); -MMPA_FUNC_VISIBILITY INT32 - mmCreateProcess(const CHAR *fileName, const mmArgvEnv *env, const char *stdoutRedirectFile, mmProcess *id); - -MMPA_FUNC_VISIBILITY INT32 - mmCreateTaskWithThreadAttr(mmThread *threadHandle, const mmUserBlock_t *funcBlock, const mmThreadAttr *threadAttr); -MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMode_t mode); -MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); -MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); -MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); -#ifdef __cplusplus -#if __cplusplus -} -#endif /* __cpluscplus */ -#endif // __cpluscplus - -#endif // MMPA_WIN_MMPA_WIN_H_ diff --git a/inc/parser/inc/external/parser/onnx_parser.h b/inc/parser/inc/external/parser/onnx_parser.h deleted file mode 100644 index 92877c633..000000000 --- a/inc/parser/inc/external/parser/onnx_parser.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_EXTERNAL_PARSER_ONNX_PARSER_H_ -#define INC_EXTERNAL_PARSER_ONNX_PARSER_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define PARSER_FUNC_VISIBILITY _declspec(dllexport) -#else -#define PARSER_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define PARSER_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define PARSER_FUNC_VISIBILITY -#endif -#endif - -#include "graph/ascend_string.h" -#include "graph/ge_error_codes.h" -#include "graph/graph.h" -#include "graph/types.h" - -namespace ge { -PARSER_FUNC_VISIBILITY graphStatus aclgrphParseONNX(const char *model_file, - const std::map &parser_params, ge::Graph &graph); - -PARSER_FUNC_VISIBILITY graphStatus aclgrphParseONNXFromMem(const char *buffer, size_t size, - const std::map &parser_params, ge::Graph &graph); -} // namespace ge - -#endif // INC_EXTERNAL_PARSER_ONNX_PARSER_H_ diff --git a/inc/runtime/base.h b/inc/runtime/base.h deleted file mode 100644 index 5b246eedd..000000000 --- a/inc/runtime/base.h +++ /dev/null @@ -1,358 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_BASE_H__ -#define __CCE_RUNTIME_BASE_H__ - -#include -#include "toolchain/prof_callback.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -// If you need export the function of this library in Win32 dll, use __declspec(dllexport) -#ifndef RTS_API -#ifdef RTS_DLL_EXPORT -#define RTS_API __declspec(dllexport) -#else -#define RTS_API -#endif -#endif - -typedef int32_t rtError_t; -static const int32_t RT_ERROR_NONE = 0; // success - -/** - * @ingroup dvrt_base - * @brief runtime exception numbers. - */ -typedef enum tagRtExceptionType { - RT_EXCEPTION_NONE = 0, - RT_EXCEPTION_TS_DOWN = 1, - RT_EXCEPTION_TASK_TIMEOUT = 2, - RT_EXCEPTION_TASK_FAILURE = 3, - RT_EXCEPTION_DEV_RUNNING_DOWN = 4, - RT_EXCEPTION_STREAM_ID_FREE_FAILED = 5 -} rtExceptionType; - -/** - * @ingroup dvrt_base - * @brief Switch type. - */ -typedef enum tagRtCondition { - RT_EQUAL = 0, - RT_NOT_EQUAL, - RT_GREATER, - RT_GREATER_OR_EQUAL, - RT_LESS, - RT_LESS_OR_EQUAL -} rtCondition_t; - -/** - * @ingroup dvrt_base - * @brief Data Type of Extensible Switch Task. - */ -typedef enum tagRtSwitchDataType { - RT_SWITCH_INT32 = 0, - RT_SWITCH_INT64 = 1, -} rtSwitchDataType_t; - -typedef enum tagRtStreamFlagType { - RT_HEAD_STREAM = 0, // first stream - RT_INVALID_FLAG = 0xFFFFFFFF, -} rtStreamFlagType_t; - -typedef enum tagRtLimitType { - RT_LIMIT_TYPE_LOW_POWER_TIMEOUT = 0, // timeout for power down , ms -} rtLimitType_t; - -typedef struct rtExceptionInfo { - uint32_t taskid; - uint32_t streamid; - uint32_t tid; - uint32_t deviceid; - uint32_t retcode; -} rtExceptionInfo; - -typedef void (*rtErrorCallback)(rtExceptionType); - -typedef void (*rtTaskFailCallback)(rtExceptionInfo *exceptionInfo); - -typedef void (*rtDeviceStateCallback)(uint32_t devId, bool isOpen); - -/** - * @ingroup dvrt_base - * @brief stream handle. - */ -typedef void *rtStream_t; - -/** - * @ingroup dvrt_base - * @brief runtime event handle. - */ -typedef void *rtEvent_t; - -/** - * @ingroup dvrt_base - * @brief label handle. - */ -typedef void *rtLabel_t; - -/** - * @ingroup dvrt_base - * @brief model handle. - */ -typedef void *rtModel_t; - -/** - * @ingroup profiling_base - * @brief runtime handle. - */ -RTS_API rtError_t rtSetProfDirEx(const char *profDir, const char *address, const char *jobCtx); - -/** - * @ingroup profiling_base - * @brief init profiler object. - */ -RTS_API rtError_t rtProfilerInit(const char *profDir, const char *address, const char *jobCtx); - -/** - * @ingroup profiling_base - * @brief config rts profiler. - */ -RTS_API rtError_t rtProfilerConfig(uint16_t type); - -/** - * @ingroup profiling_base - * @brief start rts profiler. - */ -RTS_API rtError_t rtProfilerStart(uint64_t profConfig, int32_t numsDev, uint32_t *deviceList); - -/** - * @ingroup profiling_base - * @brief stop rts profiler. - */ -RTS_API rtError_t rtProfilerStop(uint64_t profConfig, int32_t numsDev, uint32_t *deviceList); - -/** - * @ingroup profiling_base - * @brief ts send keypoint profiler log. - */ -RTS_API rtError_t rtProfilerTrace(uint64_t id, bool notify, uint32_t flags, rtStream_t stream); - -/** - * @ingroup profiling_base - * @brief ts set profiling reporter callback. - */ -RTS_API rtError_t rtSetMsprofReporterCallback(MsprofReporterCallback callback); - -/** - * @ingroup dvrt_base - * @brief Returns the last error from a runtime call. - */ -RTS_API rtError_t rtGetLastError(); - -/** - * @ingroup dvrt_base - * @brief Returns the last error from a runtime call. - */ -RTS_API rtError_t rtPeekAtLastError(); - -/** - * @ingroup dvrt_base - * @brief register callback for error code - * @param [out] NA - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtSetExceptCallback(rtErrorCallback callback); - -/** - * @ingroup dvrt_base - * @brief register callback for task fail - * @param [out] NA - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtSetTaskFailCallback(rtTaskFailCallback callback); - -/** - * @ingroup dvrt_base - * @brief register callback for deviceid - * @param [in] uniName unique register name, can't be null - * @param [in] callback Device state callback function - * @param [out] NA - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtRegDeviceStateCallback(const char *regName, rtDeviceStateCallback callback); - -/** - * @ingroup dvrt_base - * @brief register callback for fail task - * @param [in] uniName unique register name, can't be null - * @param [in] callback fail task callback function - * @param [out] NA - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtRegTaskFailCallbackByModule(const char *moduleName, rtTaskFailCallback callback); - -/** - * @ingroup dvrt_base - * @brief notify handle. - */ -typedef void *rtNotify_t; - -/** - * @ingroup dvrt_base - * @brief create label instance - * @param [out] label created label - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelCreate(rtLabel_t *label); - -/** - * @ingroup dvrt_base - * @brief create label instance - * @param [out] label created label - * @param [in] model label set model - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelCreateV2(rtLabel_t *label, rtModel_t model); - -/** - * @ingroup dvrt_base - * @brief set label and stream instance - * @param [in] label set label - * @param [in] stream set stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelSet(rtLabel_t label, rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief destroy label instance - * @param [in] label label to destroy - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelDestroy(rtLabel_t label); - -/** - * @ingroup dvrt_base - * @brief label switch instance - * @param [in] ptr address to get value compared - * @param [in] condition - * @param [in] value to compare - * @param [in] true_label goto label - * @param [in] stream to submit label_switch task - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelSwitch(void *ptr, rtCondition_t condition, uint32_t value, rtLabel_t trueLabel, - rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief goto label instance - * @param [in] label goto label - * @param [in] stream to submit label_goto task - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelGoto(rtLabel_t label, rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief name label instance - * @param [in] label instance - * @param [in] name label name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtNameLabel(rtLabel_t label, const char *name); - -/** - * @ingroup dvrt_base - * @brief label switch by index - * @param [in] ptr index value ptr - * @param [in] max index max value - * @param [in] labelInfoPtr label content info ptr - * @param [in] stream set stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelSwitchByIndex(void *ptr, uint32_t max, void *labelInfoPtr, rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief stream goto label - * @param [in] label goto label - * @param [in] stream stream to submit label_goto task - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelGotoEx(rtLabel_t label, rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief labels to dev info - * @param [in] label model label list - * @param [in] labelNumber label number - * @param [in] dst device ptr - * @param [in] dstMax dst size - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelListCpy(rtLabel_t *label, uint32_t labelNumber, void *dst, uint32_t dstMax); - -/** - * @ingroup dvrt_base - * @brief labels to dev info - * @param [out] label created label handle - * @param [in] stream label bind stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLabelCreateEx(rtLabel_t *label, rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief labels to dev info - * @param [out] label created label handle - * @param [in] model label bind model - * @param [in] stream label bind stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -rtError_t rtLabelCreateExV2(rtLabel_t *label, rtModel_t model, rtStream_t stream); - -/** - * @ingroup dvrt_base - * @brief get current thread last stream id and task id - * @param [out] stream id and task id - * @param [in] null - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for input null ptr - */ -RTS_API rtError_t rtGetTaskIdAndStreamID(uint32_t *taskId, uint32_t *streamId); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_BASE_H__ diff --git a/inc/runtime/config.h b/inc/runtime/config.h deleted file mode 100644 index db93443d0..000000000 --- a/inc/runtime/config.h +++ /dev/null @@ -1,255 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_CONFIG_H__ -#define __CCE_RUNTIME_CONFIG_H__ - -#include "base.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -#define PLAT_COMBINE(arch, chip, ver) ((arch << 16) | (chip << 8) | (ver)) -#define PLAT_GET_ARCH(type) ((type >> 16) & 0xffff) -#define PLAT_GET_CHIP(type) ((type >> 8) & 0xff) -#define PLAT_GET_VER(type) (type & 0xff) - -typedef enum tagRtArchType { - ARCH_BEGIN = 0, - ARCH_V100 = ARCH_BEGIN, - ARCH_V200, - ARCH_END, -} rtArchType_t; - -typedef enum tagRtChipType { - CHIP_BEGIN = 0, - CHIP_MINI = CHIP_BEGIN, - CHIP_CLOUD, - CHIP_MDC, - CHIP_LHISI, - CHIP_DC, - CHIP_CLOUD_V2, - CHIP_END, -} rtChipType_t; - -typedef enum tagRtAicpuScheType { - SCHEDULE_SOFTWARE = 0, /* Software Schedule */ - SCHEDULE_SOFTWARE_OPT, - SCHEDULE_HARDWARE, /* HWTS Schedule */ -} rtAicpuScheType; - -typedef enum tagRtDeviceCapabilityType { - RT_SCHEDULE_SOFTWARE = 0, // SoftWare Schedule - RT_SCHEDULE_SOFTWARE_OPT, - RT_SCHEDULE_HARDWARE, // HWTS Schedule - RT_AICPU_BLOCKING_OP_NOT_SUPPORT, - RT_AICPU_BLOCKING_OP_SUPPORT, // 1910/1980/1951 ts support AICPU blocking operation - RT_MODE_NO_FFTS, - RT_MODE_FFTS, - RT_MODE_FFTS_PLUS -} rtDeviceCapabilityType; - -typedef enum tagRtVersion { - VER_BEGIN = 0, - VER_NA = VER_BEGIN, - VER_ES, - VER_CS, - VER_SD3403, - VER_END, -} rtVersion_t; - -/* match rtChipType_t */ -typedef enum tagRtPlatformType { - PLATFORM_BEGIN = 0, - PLATFORM_MINI_V1 = PLATFORM_BEGIN, - PLATFORM_CLOUD_V1, - PLATFORM_MINI_V2, - PLATFORM_LHISI_ES, - PLATFORM_LHISI_CS, - PLATFORM_DC, - PLATFORM_CLOUD_V2, - PLATFORM_LHISI_SD3403, - PLATFORM_END, -} rtPlatformType_t; - -typedef enum tagRtCubeFracMKNFp16 { - RT_CUBE_MKN_FP16_2_16_16 = 0, - RT_CUBE_MKN_FP16_4_16_16, - RT_CUBE_MKN_FP16_16_16_16, - RT_CUBE_MKN_FP16_Default, -} rtCubeFracMKNFp16_t; - -typedef enum tagRtCubeFracMKNInt8 { - RT_CUBE_MKN_INT8_2_32_16 = 0, - RT_CUBE_MKN_INT8_4_32_4, - RT_CUBE_MKN_INT8_4_32_16, - RT_CUBE_MKN_INT8_16_32_16, - RT_CUBE_MKN_INT8_Default, -} rtCubeFracMKNInt8_t; - -typedef enum tagRtVecFracVmulMKNFp16 { - RT_VEC_VMUL_MKN_FP16_1_16_16 = 0, - RT_VEC_VMUL_MKN_FP16_Default, -} rtVecFracVmulMKNFp16_t; - -typedef enum tagRtVecFracVmulMKNInt8 { - RT_VEC_VMUL_MKN_INT8_1_32_16 = 0, - RT_VEC_VMUL_MKN_INT8_Default, -} rtVecFracVmulMKNInt8_t; - -typedef struct tagRtAiCoreSpec { - uint32_t cubeFreq; - uint32_t cubeMSize; - uint32_t cubeKSize; - uint32_t cubeNSize; - rtCubeFracMKNFp16_t cubeFracMKNFp16; - rtCubeFracMKNInt8_t cubeFracMKNInt8; - rtVecFracVmulMKNFp16_t vecFracVmulMKNFp16; - rtVecFracVmulMKNInt8_t vecFracVmulMKNInt8; -} rtAiCoreSpec_t; - -typedef struct tagRtAiCoreRatesPara { - uint32_t ddrRate; - uint32_t l2Rate; - uint32_t l2ReadRate; - uint32_t l2WriteRate; - uint32_t l1ToL0ARate; - uint32_t l1ToL0BRate; - uint32_t l0CToUBRate; - uint32_t ubToL2; - uint32_t ubToDDR; - uint32_t ubToL1; -} rtAiCoreMemoryRates_t; - -typedef struct tagRtMemoryConfig { - uint32_t flowtableSize; - uint32_t compilerSize; -} rtMemoryConfig_t; - -typedef struct tagRtPlatformConfig { - uint32_t platformConfig; -} rtPlatformConfig_t; - -typedef enum tagRTTaskTimeoutType { - RT_TIMEOUT_TYPE_OP_WAIT = 0, - RT_TIMEOUT_TYPE_OP_EXECUTE, -} rtTaskTimeoutType_t; - -/** - * @ingroup - * @brief get AI core count - * @param [in] aiCoreCnt - * @return aiCoreCnt - */ -RTS_API rtError_t rtGetAiCoreCount(uint32_t *aiCoreCnt); - -/** - * @ingroup - * @brief get AI cpu count - * @param [in] aiCpuCnt - * @return aiCpuCnt - */ -RTS_API rtError_t rtGetAiCpuCount(uint32_t *aiCpuCnt); - -/** - * @ingroup - * @brief get AI core frequency - * @param [in] aiCoreSpec - * @return aiCoreSpec - */ -RTS_API rtError_t rtGetAiCoreSpec(rtAiCoreSpec_t *aiCoreSpec); - -/** - * @ingroup - * @brief AI get core band Info - * @param [in] aiCoreMemoryRates - * @return aiCoreMemoryRates - */ -RTS_API rtError_t rtGetAiCoreMemoryRates(rtAiCoreMemoryRates_t *aiCoreMemoryRates); - -/** - * @ingroup - * @brief AI get core buffer Info,FlowTable Size,Compiler Size - * @param [in] memoryConfig - * @return memoryConfig - */ -RTS_API rtError_t rtGetMemoryConfig(rtMemoryConfig_t *memoryConfig); - -/** - * @ingroup - * @brief get l2 buffer Info,virtual baseaddr,Size - * @param [in] stream - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtMemGetL2Info(rtStream_t stream, void **ptr, uint32_t *size); - -/** - * @ingroup - * @brief get runtime version. The version is returned as (1000 major + 10 minor). For example, RUNTIME 9.2 would be - * represented by 9020. - * @param [out] runtimeVersion - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetRuntimeVersion(uint32_t *runtimeVersion); - - -/** - * @ingroup - * @brief get device feature ability by device id, such as task schedule ability. - * @param [in] deviceId - * @param [in] moduleType - * @param [in] featureType - * @param [out] value - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetDeviceCapability(int32_t deviceId, int32_t moduleType, int32_t featureType, int32_t *value); - -/** - * @ingroup - * @brief set event wait task timeout time. - * @param [in] timeout - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetOpWaitTimeOut(uint32_t timeout); - -/** - * @ingroup - * @brief set op execute task timeout time. - * @param [in] timeout - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetOpExecuteTimeOut(uint32_t timeout); - -/** - * @ingroup - * @brief get is Heterogeneous - * @param [out] heterogeneous=1 Heterogeneous Mode: read isHeterogeneous=1 in ini file. - * @param [out] heterogeneous=0 NOT Heterogeneous Mode: - * 1: not found init file, 2: error when reading ini, 3:Heterogeneous value is not 1 - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtGetIsHeterogenous(int32_t *heterogeneous); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_STREAM_H__ diff --git a/inc/runtime/context.h b/inc/runtime/context.h deleted file mode 100644 index e95d4c894..000000000 --- a/inc/runtime/context.h +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_CONTEXT_H__ -#define __CCE_RUNTIME_CONTEXT_H__ - -#include "base.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -/** - * @ingroup rt_context - * @brief runtime context handle. - */ -typedef void *rtContext_t; - -typedef enum tagDryRunFlag { - RT_DRYRUN_FLAG_FALSE = 0, - RT_DRYRUN_FLAG_TRUE = 1, -} rtDryRunFlag_t; - -typedef enum tagCtxMode { - RT_CTX_NORMAL_MODE = 0, - RT_CTX_GEN_MODE = 1, -} rtCtxMode_t; - -typedef struct tagRtGroupInfo { - int32_t groupId; - uint32_t flag; - uint32_t aicoreNum; - uint32_t aicpuNum; - uint32_t aivectorNum; - uint32_t sdmaNum; - uint32_t activeStreamNum; - void *extrPtr; -} rtGroupInfo_t; - -/** - * @ingroup rt_context - * @brief create context and associates it with the calling thread - * @param [out] ctx created context - * @param [in] flags context creation flag. set to 0. - * @param [in] device device to create context on - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxCreate(rtContext_t *ctx, uint32_t flags, int32_t device); - -/** - * @ingroup rt_context - * @brief create context and associates it with the calling thread - * @param [out] ctx created context - * @param [in] flags context creation flag. set to 0. - * @param [in] device device to create context on - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxCreateEx(rtContext_t *ctx, uint32_t flags, int32_t device); - -/** - * @ingroup rt_context - * @brief destroy context instance - * @param [in] ctx context to destroy - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxDestroy(rtContext_t ctx); - -/** - * @ingroup rt_context - * @brief destroy context instance - * @param [in] ctx context to destroy - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxDestroyEx(rtContext_t ctx); - -/** - * @ingroup rt_context - * @brief binds context to the calling CPU thread. - * @param [in] ctx context to bind. if NULL, unbind current context. - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxSetCurrent(rtContext_t ctx); - -/** - * @ingroup rt_context - * @brief block for a context's tasks to complete - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxSynchronize(void); - -/** - * @ingroup rt_context - * @brief returns the context bound to the calling CPU thread. - * @param [out] ctx returned context - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxGetCurrent(rtContext_t *ctx); - -/** - * @ingroup rt_context - * @brief returns the primary context of device. - * @param [out] ctx returned context - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtGetPriCtxByDeviceId(int32_t device, rtContext_t *ctx); - -/** - * @ingroup rt_context - * @brief returns the device ID for the current context - * @param [out] device returned device id - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtCtxGetDevice(int32_t *device); - -/** - * @ingroup - * @brief set group id - * @param [in] groupid - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtSetGroup(int32_t groupId); - -/** - * @ingroup - * @brief get group info - * @param [in] groupid count - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtGetGroupInfo(int32_t groupId, rtGroupInfo_t *groupInfo, uint32_t count); - -/** - * @ingroup - * @brief get group count - * @param [in] groupid count - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtGetGroupCount(uint32_t *count); - -/** - * @ingroup rt_context - * @brief set context INF mode - * @param [in] mode - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtSetCtxINFMode(bool mode); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - - -#endif // __CCE_RUNTIME_CONTEXT_H__ diff --git a/inc/runtime/dev.h b/inc/runtime/dev.h deleted file mode 100644 index 49f6a3f66..000000000 --- a/inc/runtime/dev.h +++ /dev/null @@ -1,364 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_DEVICE_H__ -#define __CCE_RUNTIME_DEVICE_H__ - -#include "base.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -#define RT_CAPABILITY_SUPPORT (0x1) -#define RT_CAPABILITY_NOT_SUPPORT (0x0) - -typedef struct tagRTDeviceInfo { - uint8_t env_type; // 0: FPGA 1: EMU 2: ESL - uint32_t ctrl_cpu_ip; - uint32_t ctrl_cpu_id; - uint32_t ctrl_cpu_core_num; - uint32_t ctrl_cpu_endian_little; - uint32_t ts_cpu_core_num; - uint32_t ai_cpu_core_num; - uint32_t ai_core_num; - uint32_t ai_core_freq; - uint32_t ai_cpu_core_id; - uint32_t ai_core_id; - uint32_t aicpu_occupy_bitmap; - uint32_t hardware_version; - uint32_t ts_num; -} rtDeviceInfo_t; - -typedef enum tagRtRunMode { - RT_RUN_MODE_OFFLINE = 0, - RT_RUN_MODE_ONLINE = 1, - RT_RUN_MODE_AICPU_SCHED = 2, - RT_RUN_MODE_RESERVED -} rtRunMode; - -typedef enum tagRtAicpuDeployType { - AICPU_DEPLOY_CROSS_OS = 0x0, - AICPU_DEPLOY_CROSS_PROCESS = 0x1, - AICPU_DEPLOY_CROSS_THREAD = 0x2, - AICPU_DEPLOY_RESERVED -} rtAicpuDeployType_t; - -typedef enum tagRtFeatureType { - FEATURE_TYPE_MEMCPY = 0, - FEATURE_TYPE_RSV -} rtFeatureType_t; - -typedef enum tagMemcpyInfo { - MEMCPY_INFO_SUPPORT_ZEROCOPY = 0, - MEMCPY_INFO_RSV -} rtMemcpyInfo_t; - -/** - * @ingroup dvrt_dev - * @brief get total device number. - * @param [in|out] count the device number - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetDeviceCount(int32_t *count); -/** - * @ingroup dvrt_dev - * @brief get device ids - * @param [in|out] get details of device ids - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_DRV_ERR for error - */ -RTS_API rtError_t rtGetDeviceIDs(uint32_t *devices, uint32_t len); - -/** - * @ingroup dvrt_dev - * @brief get device infomation. - * @param [in] device the device id - * @param [in] moduleType module type - typedef enum { - MODULE_TYPE_SYSTEM = 0, system info - MODULE_TYPE_AICPU, aicpu info - MODULE_TYPE_CCPU, ccpu_info - MODULE_TYPE_DCPU, dcpu info - MODULE_TYPE_AICORE, AI CORE info - MODULE_TYPE_TSCPU, tscpu info - MODULE_TYPE_PCIE, PCIE info - } DEV_MODULE_TYPE; - * @param [in] infoType info type - typedef enum { - INFO_TYPE_ENV = 0, - INFO_TYPE_VERSION, - INFO_TYPE_MASTERID, - INFO_TYPE_CORE_NUM, - INFO_TYPE_OS_SCHED, - INFO_TYPE_IN_USED, - INFO_TYPE_ERROR_MAP, - INFO_TYPE_OCCUPY, - INFO_TYPE_ID, - INFO_TYPE_IP, - INFO_TYPE_ENDIAN, - } DEV_INFO_TYPE; - * @param [out] value the device info - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_DRV_ERR for error - */ -RTS_API rtError_t rtGetDeviceInfo(uint32_t deviceId, int32_t moduleType, int32_t infoType, int64_t *value); - -/** - * @ingroup dvrt_dev - * @brief set target device for current thread - * @param [int] device the device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetDevice(int32_t device); - -/** - * @ingroup dvrt_dev - * @brief set target device for current thread - * @param [int] device the device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetDeviceEx(int32_t device); - -/** - * @ingroup dvrt_dev - * @brief get Index by phyId. - * @param [in] phyId the physical device id - * @param [out] devIndex the logic device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetDeviceIndexByPhyId(uint32_t phyId, uint32_t *devIndex); - -/** - * @ingroup dvrt_dev - * @brief get phyId by Index. - * @param [in] devIndex the logic device id - * @param [out] phyId the physical device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetDevicePhyIdByIndex(uint32_t devIndex, uint32_t *phyId); - -/** - * @ingroup dvrt_dev - * @brief enable direction:devIdDes---->phyIdSrc. - * @param [in] devIdDes the logical device id - * @param [in] phyIdSrc the physical device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEnableP2P(uint32_t devIdDes, uint32_t phyIdSrc, uint32_t flag); - -/** - * @ingroup dvrt_dev - * @brief disable direction:devIdDes---->phyIdSrc. - * @param [in] devIdDes the logical device id - * @param [in] phyIdSrc the physical device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDisableP2P(uint32_t devIdDes, uint32_t phyIdSrc); - -/** - * @ingroup dvrt_dev - * @brief get cability of P2P omemry copy betwen device and peeredevic. - * @param [in] device the logical device id - * @param [in] peerDevice the physical device id - * @param [outv] *canAccessPeer 1:enable 0:disable - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceCanAccessPeer(int32_t *canAccessPeer, uint32_t device, uint32_t peerDevice); - -/** - * @ingroup dvrt_dev - * @brief get status - * @param [in] devIdDes the logical device id - * @param [in] phyIdSrc the physical device id - * @param [in|out] status status value - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetP2PStatus(uint32_t devIdDes, uint32_t phyIdSrc, uint32_t *status); - -/** - * @ingroup dvrt_dev - * @brief get value of current thread - * @param [in|out] pid value of pid - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtDeviceGetBareTgid(uint32_t *pid); - -/** - * @ingroup dvrt_dev - * @brief get target device of current thread - * @param [in|out] device the device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetDevice(int32_t *device); - -/** - * @ingroup dvrt_dev - * @brief reset all opened device - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceReset(int32_t device); - -/** - * @ingroup dvrt_dev - * @brief reset opened device - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceResetEx(int32_t device); - -/** - * @ingroup dvrt_dev - * @brief get total device infomation. - * @param [in] device the device id - * @param [in] type limit type RT_LIMIT_TYPE_LOW_POWER_TIMEOUT=0 - * @param [in] value limit value - * @param [out] info the device info - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceSetLimit(int32_t device, rtLimitType_t type, uint32_t value); - -/** - * @ingroup dvrt_dev - * @brief Wait for compute device to finish - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceSynchronize(void); - -/** - * @ingroup dvrt_dev - * @brief get priority range of current device - * @param [in|out] leastPriority least priority - * @param [in|out] greatestPriority greatest priority - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceGetStreamPriorityRange(int32_t *leastPriority, int32_t *greatestPriority); - -/** - * @ingroup dvrt_dev - * @brief Set exception handling callback function - * @param [in] callback rtExceptiontype - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetExceptCallback(rtErrorCallback callback); - -/** - * @ingroup dvrt_dev - * @brief Setting Scheduling Type of Graph - * @param [in] tsId the ts id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetTSDevice(uint32_t tsId); - -/** - * @ingroup dvrt_dev - * @brief init aicpu executor - * @param [out] runtime run mode - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_DRV_ERR for can not get run mode - */ -RTS_API rtError_t rtGetRunMode(rtRunMode *mode); - -/** - * @ingroup dvrt_dev - * @brief get aicpu deploy - * @param [out] aicpu deploy - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_DRV_ERR for can not get aicpu deploy - */ -RTS_API rtError_t rtGetAicpuDeploy(rtAicpuDeployType_t *deployType); - -/** - * @ingroup dvrt_dev - * @brief set chipType - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtSetSocVersion(const char *version); - -/** - * @ingroup dvrt_dev - * @brief get chipType - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtGetSocVersion(char *version, const uint32_t maxLen); - -/** - * @ingroup dvrt_dev - * @brief get status - * @param [in] devId the logical device id - * @param [in] otherDevId the other logical device id - * @param [in] infoType info type - * @param [in|out] value pair info - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtGetPairDevicesInfo(uint32_t devId, uint32_t otherDevId, int32_t infoType, int64_t *value); - -/** - * @ingroup dvrt_dev - * @brief get capability infomation. - * @param [in] featureType feature type - typedef enum tagRtFeatureType { - FEATURE_TYPE_MEMCPY = 0, - FEATURE_TYPE_RSV, - } rtFeatureType_t; - * @param [in] featureInfo info type - typedef enum tagMemcpyInfo { - MEMCPY_INFO_SUPPORT_ZEROCOPY = 0, - MEMCPY_INFO _RSV, - } rtMemcpyInfo_t; - * @param [out] value the capability info RT_CAPABILITY_SUPPORT or RT_CAPABILITY_NOT_SUPPORT - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtGetRtCapability(rtFeatureType_t featureType, int32_t featureInfo, int64_t *value); - -/** - * @ingroup dvrt_dev - * @brief set target device for current thread - * @param [int] device the device id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetDeviceWithoutTsd(int32_t device); - -/** - * @ingroup dvrt_dev - * @brief reset all opened device - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDeviceResetWithoutTsd(int32_t device); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_DEVICE_H__ diff --git a/inc/runtime/dvfsprofile.h b/inc/runtime/dvfsprofile.h deleted file mode 100644 index 6e4516955..000000000 --- a/inc/runtime/dvfsprofile.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_DVFSPROFILE_H__ -#define __CCE_RUNTIME_DVFSPROFILE_H__ - -#include "base.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -typedef enum dvfsProfileMode { - DVFS_PROFILE_PERFORMANCE_PRIORITY, - DVFS_PROFILE_BALANCE_PRIORITY, - DVFS_PROFILE_POWER_PRIORITY, - DVFS_PROFILE_PRIORITY_MAX -} DvfsProfileMode; - -/** - * @ingroup dvrt_dvfsprofile - * @brief Set the performance mode of the device - * @param [in] mode dvfsProfileMode - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetDvfsProfile(DvfsProfileMode mode); - -/** - * @ingroup dvrt_dvfsprofile - * @brief Set the performance mode of the device - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for invalid value - */ -RTS_API rtError_t rtUnsetDvfsProfile(); - -/** - * @ingroup dvrt_dvfsprofile - * @brief Get the current performance mode of the device - * @param [in|out] pmode dvfsProfileMode type pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetDvfsProfile(DvfsProfileMode *pmode); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_PROFILE_H__ diff --git a/inc/runtime/event.h b/inc/runtime/event.h deleted file mode 100644 index 41e611ea7..000000000 --- a/inc/runtime/event.h +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_EVENT_H__ -#define __CCE_RUNTIME_EVENT_H__ - -#include "base.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -/** - * @ingroup event_flags - * @brief event op bit flags - */ -#define RT_EVENT_DEFAULT (0x00) -#define RT_EVENT_WITH_FLAG (0x01) - -/** - * @ingroup dvrt_event - * @brief create event instance - * @param [in|out] event created event - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEventCreate(rtEvent_t *event); - -/** - * @ingroup dvrt_event - * @brief create event instance with flag - * @param [in|out] event created event flag event op flag - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEventCreateWithFlag(rtEvent_t *event, uint32_t flag); - -/** - * @ingroup dvrt_event - * @brief destroy event instance - * @param [in] event event to destroy - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEventDestroy(rtEvent_t event); - -/** - * @ingroup dvrt_event - * @brief get event id - * @param [in] event_ event to be get - * @param [in|out] event_id event_id id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetEventID(rtEvent_t event, uint32_t *eventId); - -/** - * @ingroup dvrt_event - * @brief event record - * @param [int] event event to record - * @param [int] stream stream handle - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEventRecord(rtEvent_t event, rtStream_t stream); - -/** - * @ingroup dvrt_event - * @brief event reset - * @param [int] event event to reset - * @param [int] stream stream handle - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtEventReset(rtEvent_t event, rtStream_t stream); - -/** - * @ingroup dvrt_event - * @brief wait event to be complete - * @param [in] event event to wait - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEventSynchronize(rtEvent_t event); - -/** - * @ingroup dvrt_event - * @brief Queries an event's status - * @param [in] event event to query - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_EVENT_NOT_COMPLETE for not complete - */ -RTS_API rtError_t rtEventQuery(rtEvent_t event); - -/** - * @ingroup dvrt_event - * @brief computes the elapsed time between events. - * @param [in] time time between start and end in ms - * @param [in] start starting event - * @param [in] end ending event - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtEventElapsedTime(float *time, rtEvent_t start, rtEvent_t end); - -/** - * @ingroup dvrt_event - * @brief get the elapsed time from a event after event recorded. - * @param [in] time time in ms - * @param [in] event event handle - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtEventGetTimeStamp(uint64_t *time, rtEvent_t event); - -/** - * @ingroup dvrt_event - * @brief name an event - * @param [in] event event to be named - * @param [in] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input of event, name - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtNameEvent(rtEvent_t event, const char *name); - -/** - * @ingroup dvrt_event - * @brief Create a notify - * @param [in] device_id device id - * @param [in|out] notify_ notify to be created - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtNotifyCreate(int32_t deviceId, rtNotify_t *notify); - -/** - * @ingroup dvrt_event - * @brief Destroy a notify - * @param [in] notify_ notify to be destroyed - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtNotifyDestroy(rtNotify_t notify); - -/** - * @ingroup dvrt_event - * @brief Record a notify - * @param [in] notify_ notify to be recorded - * @param [in] stream_ input stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_STREAM_CONTEXT for stream is not in current ctx - */ -RTS_API rtError_t rtNotifyRecord(rtNotify_t notify, rtStream_t stream); - -/** - * @ingroup dvrt_event - * @brief Wait for a notify - * @param [in] notify_ notify to be wait - * @param [in] stream_ input stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_STREAM_CONTEXT for stream is not in current ctx - */ -RTS_API rtError_t rtNotifyWait(rtNotify_t notify, rtStream_t stream); - -/** - * @ingroup dvrt_event - * @brief Name a notify - * @param [in] notify_ notify to be named - * @param [in|out] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtNameNotify(rtNotify_t notify, const char *name); - -/** - * @ingroup dvrt_event - * @brief get notify id - * @param [in] notify_ notify to be get - * @param [in|out] notify_id notify id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetNotifyID(rtNotify_t notify, uint32_t *notifyId); - -/** - * @ingroup dvrt_event - * @brief Set a notify to IPC notify - * @param [in] notify_ notify to be set to IPC notify - * @param [in] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input of - */ -RTS_API rtError_t rtIpcSetNotifyName(rtNotify_t notify, char *name, uint32_t len); - -/** - * @ingroup dvrt_event - * @brief Open IPC notify - * @param [out] notify the opened notify - * @param [in] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtIpcOpenNotify(rtNotify_t *notify, const char *name); - -/** - * @ingroup dvrt_event - * @brief Get the physical address corresponding to notify - * @param [in] notify notify to be queried - * @param [in] devAddrOffset device physical address offset - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtNotifyGetAddrOffset(rtNotify_t notify, uint64_t *devAddrOffset); - -/** - * @ingroup dvrt_event - * @brief Ipc set notify pid - * @param [in] name name to be queried - * @param [in] pid process id - * @param [in] num length of pid[] - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtSetIpcNotifyPid(const char *name, int32_t pid[], int num); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_EVENT_H__ diff --git a/inc/runtime/kernel.h b/inc/runtime/kernel.h deleted file mode 100644 index dc16ca585..000000000 --- a/inc/runtime/kernel.h +++ /dev/null @@ -1,566 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_KERNEL_H__ -#define __CCE_RUNTIME_KERNEL_H__ - -#include "base.h" -#include "stream.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -/** - * @ingroup rt_kernel - * @brief shared memory data control - */ -typedef struct tagRtSmData { - uint64_t L2_mirror_addr; // preload or swap source address - uint32_t L2_data_section_size; // every data size - uint8_t L2_preload; // 1 - preload from mirrorAddr, 0 - no preload - uint8_t modified; // 1 - data will be modified by kernel, 0 - no modified - uint8_t priority; // data priority - int8_t prev_L2_page_offset_base; // remap source section offset - uint8_t L2_page_offset_base; // remap destination section offset - uint8_t L2_load_to_ddr; // 1 - need load out, 0 - no need - uint8_t reserved[2]; // reserved -} rtSmData_t; - -/** - * @ingroup rt_kernel - * @brief shared memory description - */ -typedef struct tagRtSmCtrl { - rtSmData_t data[8]; // data description - uint64_t size; // max page Num - uint8_t remap[64]; /* just using for static remap mode, default:0xFF - array index: virtual l2 page id, array value: physic l2 page id */ - uint8_t l2_in_main; // 0-DDR, 1-L2, default:0xFF - uint8_t reserved[3]; -} rtSmDesc_t; - -typedef rtSmDesc_t rtL2Ctrl_t; - -/** - * @ingroup rt_kernel - * @brief device binary type - */ -typedef struct tagRtDevBinary { - uint32_t magic; // magic number - uint32_t version; // version of binary - const void *data; // binary data - uint64_t length; // binary length -} rtDevBinary_t; - -/** - * @ingroup rt_kernel - * @brief function mode type - */ -#define ONLINE_PROF_MAX_PMU_NUM (8) - -typedef struct ProfilefDataInfo { - const void *stubFunc; - uint32_t blockDim; - const void *args; - uint32_t argsSize; - rtSmDesc_t *smDesc; - rtStream_t stream; - uint64_t totalcycle; - uint64_t ovcycle; - uint64_t pmu_cnt[ONLINE_PROF_MAX_PMU_NUM]; -} rtProfDataInfo_t; - -/** - * @ingroup rt_kernel - * @brief function mode type - */ -typedef enum { - FUNC_MODE_NORMAL = 0, - FUNC_MODE_PCTRACE_USERPROFILE_RECORDLOOP, - FUNC_MODE_PCTRACE_USERPROFILE_SKIPLOOP, - FUNC_MODE_PCTRACE_CYCLECNT_RECORDLOOP, - FUNC_MODE_PCTRACE_CYCLECNT_SKIPLOOP, - FUNC_MODE_BUTT -} rtFuncModeType_t; - -/** - * @ingroup rt_kernel - * @brief kernel info - */ -typedef struct rtKernelInfo { - uint64_t task_offset; // kernel offset in module - /* flowtable */ - void *arg; // launch kernel arg - uint32_t arg_size; - /* module */ - void *module_addr; // module::baseaddr_ - uint32_t module_size; -} *rtKernelInfo_t; - -/** - * @ingroup rt_KernelConfigDump - * @brief device dump type - */ -typedef enum tagRtDumpKind { - RT_DATA_DUMP_KIND_INVALID = -1, - RT_DATA_DUMP_KIND_DUMP = 0, - RT_DATA_DUMP_KIND_RESERVED -} rtDumpKind_t; - -/** - * @ingroup rt_kernel - * @brief report callback - */ -typedef rtError_t (*rtKernelReportCallback)(rtStream_t stream, rtKernelInfo_t kernelInfo); - -/** - * @ingroup rt_kernel - * @brief stream report callback - */ -typedef void (*rtCallback_t)(void *fnData); - -/** - * @ingroup rt_kernel - * @brief magic number of plain binary for aicore - */ -#define RT_DEV_BINARY_MAGIC_PLAIN 0xabceed50 - -/** - * @ingroup rt_kernel - * @brief magic number of plain binary for aicpu - */ -#define RT_DEV_BINARY_MAGIC_PLAIN_AICPU 0xabceed51 - -/** - * @ingroup rt_kernel - * @brief magic number of plain binary for aivector - */ -#define RT_DEV_BINARY_MAGIC_PLAIN_AIVEC 0xabceed52 - -/** - * @ingroup rt_kernel - * @brief magic number of elf binary for aicore - */ -#define RT_DEV_BINARY_MAGIC_ELF 0x43554245 - -/** - * @ingroup rt_kernel - * @brief magic number of elf binary for aicpu - */ -#define RT_DEV_BINARY_MAGIC_ELF_AICPU 0x41415243 - -/** - * @ingroup rt_kernel - * @brief magic number of elf binary for aivector - */ -#define RT_DEV_BINARY_MAGIC_ELF_AIVEC 0x41415246 - -/** - * @ingroup rt_kernel - * @brief magic number of elf binary for aicube - */ -#define RT_DEV_BINARY_MAGIC_ELF_AICUBE 0x41415247 - -/** - * @ingroup rt_kernel - * @brief magic number of elf binary for aivector - */ -#define RT_DEV_BINARY_MAGIC_ELF_AIVECTOR 0x41415248 - -/** - * @ingroup rt_kernel_flags - * @brief kernel op bit flags - */ -#define RT_KERNEL_DEFAULT (0x00) -#define RT_KERNEL_CONVERT (0x01) -#define RT_KERNEL_DUMPFLAG (0x02) -#define RT_FUSION_KERNEL_DUMPFLAG (0x04) -#define RT_KERNEL_CUSTOM_AICPU (0x08) - -/** - * @ingroup rt_kernel - * @brief kernel L1 Fusion Dump bit flags - */ -#define RT_DDR_ADDR (0x0) - -/** - * @ingroup rt_kernel - * @brief register device binary - * @param [in] bin device binary description - * @param [out] handle device binary handle - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDevBinaryRegister(const rtDevBinary_t *bin, void **handle); - -/** - * @ingroup rt_kernel - * @brief register fast memeory device binary - * @param [in] handle device binary handle - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtBinaryRegisterToFastMemory(void *handle); - -/** - * @ingroup rt_kernel - * @brief unregister device binary - * @param [in] handle device binary handle - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDevBinaryUnRegister(void *handle); - -/** - * @ingroup rt_kernel - * @brief register device binary metadata - * @param [in] handle device binary description - * @param [in] metadata device binary metadata - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMetadataRegister(void *handle, const char *metadata); - -/** - * @ingroup rt_kernel - * @brief register device binary dependency - * @param [in] mHandle master device binary description - * @param [in] sHandle slave device binary description - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDependencyRegister(void *mHandle, void *sHandle); - -/** - * @ingroup rt_kernel - * @brief register device function - * @param [in] binHandle device binary handle - * @param [in] stubFunc stub function - * @param [in] stubName stub function name - * @param [in] devFunc device function description. symbol name or address - * offset, depending binary type. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtFunctionRegister(void *binHandle, const void *stubFunc, const char *stubName, const void *devFunc, - uint32_t funcMode); - -/** - * @ingroup rt_kernel - * @brief find stub function by name - * @param [in] stubName stub function name - * @param [out] stubFunc stub function - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetFunctionByName(const char *stubName, void **stubFunc); - -/** - * @ingroup rt_kernel - * @brief find addr by stub func - * @param [in] stubFunc stub function - * @param [out] addr - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetAddrByFun(const void *stubFunc, void **addr); -/** - * @ingroup rt_kernel - * @brief query registered or not by stubName - * @param [in] stubName stub function name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtQueryFunctionRegistered(const char *stubName); - -/** - * @ingroup rt_kernel - * @brief config data dump - * @param [in] dumpSizePerBlock dump size - * @param [in] blockDim block dimentions - * @param [in] dumpBaseAddr dump base address - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelConfigDump(uint32_t kind, uint32_t dumpSizePerBlock, uint32_t blockDim, void **dumpBaseAddr, - rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief launch kernel to device - * @param [in] stubFunc stub function - * @param [in] blockDim block dimentions - * @param [in] args argments address for kernel function - * @param [in] argsSize argements size - * @param [in] smDesc shared memory description - * @param [in] stream associated stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelLaunch(const void *stubFunc, uint32_t blockDim, void *args, uint32_t argsSize, - rtSmDesc_t *smDesc, rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief launch kernel to device - * @param [in] stubFunc stub function - * @param [in] blockDim block dimentions - * @param [in] args argments address for kernel function - * @param [in] argsSize argements size - * @param [in] smDesc shared memory description - * @param [in] stream associated stream - * @param [in] flag dump flag - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelLaunchWithFlag(const void *stubFunc, uint32_t blockDim, void *args, uint32_t argsSize, - rtSmDesc_t *smDesc, rtStream_t stream, uint32_t flags); - -/** - * @ingroup rt_kernel - * @brief launch kernel to device - * @param [in] args argments address for kernel function - * @param [in] argsSize argements size - * @param [in] flags launch flags - * @param [in] stream associated stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelLaunchEx(void *args, uint32_t argsSize, uint32_t flags, rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief launch cpu kernel to device - * @param [in] soName so name - * @param [in] kernelName kernel name - * @param [in] blockDim block dimentions - * @param [in] args argments address for kernel function - * @param [in] argsSize argments size - * @param [in] smDesc shared memory description - * @param [in] stream associated stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtCpuKernelLaunch(const void *soName, const void *kernelName, uint32_t blockDim, const void *args, - uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief launch cpu kernel to device with dump identifier - * @param [in] soName so name - * @param [in] kernelName kernel name - * @param [in] blockDim block dimentions - * @param [in] args argments address for kernel function - * @param [in] argsSize argments size - * @param [in] smDesc shared memory description - * @param [in] stream associated stream - * @param [in] flag dump flag or others function flag - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtCpuKernelLaunchWithFlag(const void *soName, const void *kernelName, uint32_t blockDim, - const void *args, uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream, - uint32_t flags); - -/** - * @ingroup rt_kernel - * @brief L1 fusion dump addr transfered to device - * @param [in] model handle info - * @param [in] addr ddr address of L1 Fusion Dump - * @param [in] dumpSize memory size - * @param [in] flag memory flag - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDumpAddrSet(rtModel_t model, void *addr, uint32_t dumpSize, uint32_t flag); - -/** - * @ingroup rt_kernel - * @brief load dump info to aicpu - * @param [in] dumpInfo dump info - * @param [in] length length of dump info - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDatadumpInfoLoad(const void *dumpInfo, uint32_t length); - -#ifndef __CLANG_CCE_RUNTIME_H__ -#define __CLANG_CCE_RUNTIME_H__ -/** - * @ingroup rt_kernel - * @brief configure call argment for next rtLaunch in current thread - * @param [in] numBlocks block dimentions - * @param [in] smDesc shared memory description - * @param [in] stream associated stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -#ifdef __cplusplus -RTS_API rtError_t rtConfigureCall(uint32_t numBlocks, rtSmDesc_t *smDesc = nullptr, rtStream_t stream = nullptr); -#else -RTS_API rtError_t rtConfigureCall(uint32_t numBlocks, rtSmDesc_t *smDesc, rtStream_t stream); - -#endif -#endif // __CLANG_CCE_RUNTIME_H__ - -/** - * @ingroup rt_kernel - * @brief setup argment for next rtLaunch in current thread - * @param [in] arg argment address for kernel function - * @param [in] size argment size - * @param [in] offset argment table offset - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetupArgument(const void *arg, uint32_t size, uint32_t offset); - -/** - * @ingroup rt_kernel - * @brief launch kernel to device with previous setting kernel argment - * and call argment - * @param [in] stubFunc stub function - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtLaunch(const void *stubFunc); - -/** - * @ingroup rt_kernel - * @brief implicitly transfered data to device. - * lifecycle end after next kernel task finish - * @param [in] ptr host memory - * @param [in] size host memory size - * @param [in] flag reserved. set to 0 - * @param [out] arg returned arg. used for next kernel's arg. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelConfigTransArg(const void *ptr, uint64_t size, uint32_t flag, void **arg); - -/** - * @ingroup rt_kernel - * @brief start fusion kernels. - * @param [in] stream stream for fusion kernels - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelFusionStart(rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief end fusion kernels. - * @param [in] stream stream for fusion kernels - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtKernelFusionEnd(rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief set kernelinfo callback - * @param [in] callback - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetKernelReportCallback(rtKernelReportCallback callBack); - -/** - * @ingroup rt_kernel - * @brief subscribe stream callback report. - * @param [in] threadId thread id for stream - * @param [in] stream stream for subscribe - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSubscribeReport(uint64_t threadId, rtStream_t stream); - -/** - * @ingroup rt_kernel - * @brief add callback launch task in stream. - * @param [in] callBackFunc app callback function - * @param [in] fnData user data - * @param [in] stream subscribed stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtCallbackLaunch(rtCallback_t callBackFunc, void *fnData, rtStream_t stream, bool isBlock); - -/** - * @ingroup rt_kernel - * @brief process callback report. - * @param [in] timeout if timeout=-1, while(1); else timeout - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtProcessReport(int32_t timeout); - -/** - * @ingroup rt_kernel - * @brief unsubscribe callback report. - * @param [in] threadId thread id for stream - * @param [in] stream stream for subscribe - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtUnSubscribeReport(uint64_t threadId, rtStream_t stream); - -/** - * @ingroup profiling_base - * @brief start online prof. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStartOnlineProf(rtStream_t stream, uint32_t sampleNum); - -/** - * @ingroup profiling_base - * @brief stop online prof. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStopOnlineProf(rtStream_t stream); - -/** - * @ingroup profiling_base - * @brief get online prof. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetOnlineProfData(rtStream_t stream, rtProfDataInfo_t *pProfData, uint32_t profDataNum); - -/** - * @ingroup profiling_base - * @brief start mdc profiler. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStartMDCProfiler(void **addr, uint32_t length); - -/** - * @ingroup profiling_base - * @brief stop mdc profiler. - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStopMDCProfiler(void *addr); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_KERNEL_H__ - diff --git a/inc/runtime/mem.h b/inc/runtime/mem.h deleted file mode 100644 index 051e3de13..000000000 --- a/inc/runtime/mem.h +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_MEM_H__ -#define __CCE_RUNTIME_MEM_H__ - -/*lint -e7*/ -#include -/*lint +e7*/ -#include "base.h" -#include "config.h" -#include "stream.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -/** - * @ingroup dvrt_mem - * @brief memory type - */ -#define RT_MEMORY_DEFAULT ((uint32_t)0x0) // default memory on device -#define RT_MEMORY_HBM ((uint32_t)0x2) // HBM memory on device -#define RT_MEMORY_RDMA_HBM ((uint32_t)0x3) // RDMA-HBM memory on device -#define RT_MEMORY_DDR ((uint32_t)0x4) // DDR memory on device -#define RT_MEMORY_SPM ((uint32_t)0x8) // shared physical memory on device -#define RT_MEMORY_P2P_HBM ((uint32_t)0x10) // HBM memory on other 4P device -#define RT_MEMORY_P2P_DDR ((uint32_t)0x11) // DDR memory on other device -#define RT_MEMORY_DDR_NC ((uint32_t)0x20) // DDR memory of non-cache -#define RT_MEMORY_TS_4G ((uint32_t)0x40) -#define RT_MEMORY_TS ((uint32_t)0x80) -#define RT_MEMORY_RESERVED ((uint32_t)0x100) - -#define RT_MEMORY_L1 ((uint32_t)0x1<<16) -#define RT_MEMORY_L2 ((uint32_t)0x1<<17) - -/** - * @ingroup dvrt_mem - * @brief memory info type - */ -#define RT_MEM_INFO_TYPE_DDR_SIZE ((uint32_t)0x1) -#define RT_MEM_INFO_TYPE_HBM_SIZE ((uint32_t)0x2) -#define RT_MEM_INFO_TYPE_DDR_P2P_SIZE ((uint32_t)0x3) -#define RT_MEM_INFO_TYPE_HBM_P2P_SIZE ((uint32_t)0x4) - -/** - * @ingroup dvrt_mem - * @brief memory Policy - */ -#define RT_MEMORY_POLICY_NONE ((uint32_t)0x0) // Malloc mem prior hage page, then default page -#define RT_MEMORY_POLICY_HUGE_PAGE_FIRST ((uint32_t)0x1 << 10) // Malloc mem prior hage page, then default page -#define RT_MEMORY_POLICY_HUGE_PAGE_ONLY ((uint32_t)0x1 << 11) // Malloc mem only use hage page -#define RT_MEMORY_POLICY_DEFAULT_PAGE_ONLY ((uint32_t)0x1 << 12) // Malloc mem only use default page -#define RT_MEMORY_POLICY_HUGE_PAGE_FIRST_P2P ((uint32_t)0x1 << 13) // Malloc mem prior hage page, then default page, use for p2p -#define RT_MEMORY_POLICY_HUGE_PAGE_ONLY_P2P ((uint32_t)0x1 << 14) // Malloc mem only use hage page, use for p2p -#define RT_MEMORY_POLICY_DEFAULT_PAGE_ONLY_P2P ((uint32_t)0x1 << 15) // Malloc mem only use default page, use for p2p - -#define MEM_ALLOC_TYPE_BIT ((uint32_t)0x3FF) // mem type bit in <0, 9> - -/** - * @ingroup dvrt_mem - * @brief memory type | memory Policy - */ -typedef uint32_t rtMemType_t; - -/** - * @ingroup dvrt_mem - * @brief memory advise type - */ -#define RT_MEMORY_ADVISE_EXE (0x02) -#define RT_MEMORY_ADVISE_THP (0x04) -#define RT_MEMORY_ADVISE_PLE (0x08) -#define RT_MEMORY_ADVISE_PIN (0x16) - -/** - * @ingroup dvrt_mem - * @brief memory copy type - */ -typedef enum tagRtMemcpyKind { - RT_MEMCPY_HOST_TO_HOST = 0, // host to host - RT_MEMCPY_HOST_TO_DEVICE, // host to device - RT_MEMCPY_DEVICE_TO_HOST, // device to host - RT_MEMCPY_DEVICE_TO_DEVICE, // device to device, 1P && P2P - RT_MEMCPY_MANAGED, // managed memory - RT_MEMCPY_ADDR_DEVICE_TO_DEVICE, - RT_MEMCPY_HOST_TO_DEVICE_EX, // host to device ex (only used for 8 bytes) - RT_MEMCPY_DEVICE_TO_HOST_EX, // device to host ex - RT_MEMCPY_RESERVED, -} rtMemcpyKind_t; - -typedef enum tagRtMemInfoType { - RT_MEMORYINFO_DDR, - RT_MEMORYINFO_HBM, - RT_MEMORYINFO_DDR_HUGE, // Hugepage memory of DDR - RT_MEMORYINFO_DDR_NORMAL, // Normal memory of DDR - RT_MEMORYINFO_HBM_HUGE, // Hugepage memory of HBM - RT_MEMORYINFO_HBM_NORMAL, // Normal memory of HBM - RT_MEMORYINFO_DDR_P2P_HUGE, // Hugepage memory of DDR - RT_MEMORYINFO_DDR_P2P_NORMAL, // Normal memory of DDR - RT_MEMORYINFO_HBM_P2P_HUGE, // Hugepage memory of HBM - RT_MEMORYINFO_HBM_P2P_NORMAL, // Normal memory of HBM -} rtMemInfoType_t; - -typedef enum tagRtRecudeKind { - RT_MEMCPY_SDMA_AUTOMATIC_ADD = 10, // D2D, SDMA inline reduce, include 1P, and P2P - RT_RECUDE_KIND_END -} rtRecudeKind_t; - -typedef enum tagRtDataType { - RT_DATA_TYPE_FP32 = 0, // fp32 - RT_DATA_TYPE_FP16 = 1, // fp16 - RT_DATA_TYPE_INT16 = 2, // int16 - RT_DATA_TYPE_END -} rtDataType_t; - -/** - * @ingroup dvrt_mem - * @brief memory copy channel type - */ -typedef enum tagRtMemcpyChannelType { - RT_MEMCPY_CHANNEL_TYPE_INNER = 0, // 1P - RT_MEMCPY_CHANNEL_TYPE_PCIe, - RT_MEMCPY_CHANNEL_TYPE_HCCs, // not support now - RT_MEMCPY_CHANNEL_TYPE_RESERVED, -} rtMemcpyChannelType_t; - -/** - * @ingroup rt_kernel - * @brief ai core memory size - */ -typedef struct rtAiCoreMemorySize { - uint32_t l0ASize; - uint32_t l0BSize; - uint32_t l0CSize; - uint32_t l1Size; - uint32_t ubSize; - uint32_t l2Size; - uint32_t l2PageNum; - uint32_t blockSize; - uint64_t bankSize; - uint64_t bankNum; - uint64_t burstInOneBlock; - uint64_t bankGroupNum; -} rtAiCoreMemorySize_t; - -/** - * @ingroup dvrt_mem - * @brief memory type - */ -typedef enum tagRtMemoryType { - RT_MEMORY_TYPE_HOST = 1, - RT_MEMORY_TYPE_DEVICE = 2, - RT_MEMORY_TYPE_SVM = 3, - RT_MEMORY_TYPE_DVPP = 4 -} rtMemoryType_t; - -/** - * @ingroup dvrt_mem - * @brief memory attribute - */ -typedef struct tagRtPointerAttributes { - rtMemoryType_t memoryType; // host memory or device memory - rtMemoryType_t locationType; - uint32_t deviceID; // device ID - uint32_t pageSize; -} rtPointerAttributes_t; - - -typedef struct rtMallocHostSharedMemoryIn { - const char *name; - const uint64_t size; - uint32_t flag; -} rtMallocHostSharedMemoryIn; - -typedef struct rtMallocHostSharedMemoryOut { - int fd; - void *ptr; - void *devPtr; -} rtMallocHostSharedMemoryOut; - -typedef struct rtFreeHostSharedMemoryIn { - const char *name; - const uint64_t size; - int fd; - void *ptr; - void *devPtr; -} rtFreeHostSharedMemoryIn; - - -/** - * @ingroup dvrt_mem - * @brief alloc device memory - * @param [in|out] devPtr memory pointer - * @param [in] size memory size - * @param [in] type memory type - * @param [in] moduleId alloc memory module id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMalloc(void **devPtr, uint64_t size, rtMemType_t type, const uint16_t moduleId); - -/** - * @ingroup dvrt_mem - * @brief free device memory - * @param [in|out] devPtr memory pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtFree(void *devPtr); - -/** - * @ingroup dvrt_mem - * @brief alloc device memory for dvpp - * @param [in|out] devPtr memory pointer - * @param [in] size memory size - * @param [in] moduleId alloc memory module id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDvppMalloc(void **devPtr, uint64_t size, uint16_t moduleId); - -/** - * @ingroup dvrt_mem - * @brief free device memory for dvpp - * @param [in|out] devPtr memory pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDvppFree(void *devPtr); - -/** - * @ingroup dvrt_mem - * @brief alloc host memory - * @param [in|out] hostPtr memory pointer - * @param [in] size memory size - * @param [in] moduleId alloc memory module id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMallocHost(void **hostPtr, uint64_t size, uint16_t moduleId); - -/** - * @ingroup dvrt_mem - * @brief free host memory - * @param [in] hostPtr memory pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtFreeHost(void *hostPtr); - -/** - * @ingroup dvrt_mem - * @brief alloc host shared memory - * @param [in] in alloc host shared memory inputPara pointer - * @param [in] out alloc host shared memory outputInfo pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ - -RTS_API rtError_t rtMallocHostSharedMemory(rtMallocHostSharedMemoryIn *in, - rtMallocHostSharedMemoryOut *out); - -/** - * @ingroup dvrt_mem - * @brief free host memory - * @param [in] in free host shared memory inputPara pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ - -RTS_API rtError_t rtFreeHostSharedMemory(rtFreeHostSharedMemoryIn *in); - -/** - * @ingroup dvrt_mem - * @brief alloc managed memory - * @param [in|out] ptr memory pointer - * @param [in] size memory size - * @param [in] flag reserved, set to 0. - * @param [in] moduleId alloc memory module id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemAllocManaged(void **ptr, uint64_t size, uint32_t flag, uint16_t moduleId); - -/** - * @ingroup dvrt_mem - * @brief free managed memory - * @param [in] ptr memory pointer - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemFreeManaged(void *ptr); -/** - * @ingroup dvrt_mem - * @brief alloc cached device memory - * @param [in|out] devPtr memory pointer - * @param [in] size memory size - * @param [in] type memory type - * @param [in] moduleId alloc memory module id - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtMallocCached(void **devPtr, uint64_t size, rtMemType_t type, uint16_t moduleId); - -/** - * @ingroup dvrt_mem - * @brief flush device mempory - * @param [in] base virtal base address - * @param [in] len memory size - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtFlushCache(void *base, size_t len); - -/** - * @ingroup dvrt_mem - * @brief invalid device mempory - * @param [in] base virtal base address - * @param [in] len memory size - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtInvalidCache(void *base, size_t len); - -/** - * @ingroup dvrt_mem - * @brief synchronized memcpy - * @param [in] dst destination address pointer - * @param [in] Max length of destination address memory - * @param [in] src source address pointer - * @param [in] count the number of byte to copy - * @param [in] kind memcpy type - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemcpy(void *dst, uint64_t destMax, const void *src, uint64_t count, rtMemcpyKind_t kind); - -/** - * @ingroup dvrt_mem - * @brief asynchronized memcpy - * @param [in] dst destination address pointer - * @param [in] Max length of destination address memory - * @param [in] src source address pointer - * @param [in] count the number of byte to copy - * @param [in] kind memcpy type - * @param [in] stream asynchronized task stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemcpyAsync(void *dst, uint64_t destMax, const void *src, uint64_t count, rtMemcpyKind_t kind, - rtStream_t stream); - -/** - * @ingroup dvrt_mem - * @brief asynchronized reduce memcpy - * @param [in] dst destination address pointer - * @param [in] Max length of destination address memory - * @param [in] src source address pointer - * @param [in] count the number of byte to copy - * @param [in] kind memcpy type - * @param [in] type data type - * @param [in] stream asynchronized task stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtReduceAsync(void *dst, uint64_t destMax, const void *src, uint64_t count, rtRecudeKind_t kind, - rtDataType_t type, rtStream_t stream); - -/** - * @ingroup dvrt_mem - * @brief query memory size - * @param [in] aiCoreMemorySize - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtAiCoreMemorySizes(rtAiCoreMemorySize_t *aiCoreMemorySize); - -/** - * @ingroup dvrt_mem - * @brief set memory size, Setting before model reasoning, Bright screen to prevent model can not be fully - integrated network due to memory limitations.Requirement come from JiaMinHu.Only use for Tiny. - * @param [in] aiCoreMemorySize - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetAiCoreMemorySizes(rtAiCoreMemorySize_t *aiCoreMemorySize); - -/** - * @ingroup dvrt_mem - * @brief set memory with uint32_t value - * @param [in] devPtr - * @param [in] Max length of destination address memory - * @param [in] value - * @param [in] count byte num - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemset(void *devPtr, uint64_t destMax, uint32_t value, uint64_t count); - -/** - * @ingroup dvrt_mem - * @brief set memory with uint32_t value async - * @param [in] devPtr - * @param [in] Max length of destination address memory - * @param [in] value - * @param [in] count byte num - * @param [in] stream - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemsetAsync(void *ptr, uint64_t destMax, uint32_t value, uint64_t count, rtStream_t stream); - -/** - * @ingroup dvrt_mem - * @brief get current device memory total and free - * @param [out] free - * @param [out] total - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemGetInfo(size_t *free, size_t *total); - -/** - * @ingroup dvrt_mem - * @brief get current device memory total and free - * @param [in] memInfoType - * @param [out] free - * @param [out] total - * @return RT_ERROR_NONE for ok, errno for failed - */ -RTS_API rtError_t rtMemGetInfoEx(rtMemInfoType_t memInfoType, size_t *free, size_t *total); - -/** - * @ingroup dvrt_mem - * @brief set memory with uint32_t value - * @param [in] devPtr - * @param [in] len - * @param [in] device - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtMemPrefetchToDevice(void *devPtr, uint64_t len, int32_t device); - -/** - * @ingroup dvrt_mem - * @brief get memory attribute:Host or Device - * @param [in] ptr - * @param [out] attributes - * @return RT_ERROR_NONE for ok, errno for failed - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtPointerGetAttributes(rtPointerAttributes_t *attributes, const void *ptr); - -/** - * @ingroup dvrt_mem - * @brief make memory shared interprocess and assigned a name - * @param [in] ptr device memory address pointer - * @param [in] name identification name - * @param [in] byteCount identification byteCount - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtIpcSetMemoryName(const void *ptr, uint64_t byteCount, char *name, uint32_t len); - -/** - * @ingroup dvrt_mem - * @brief destroy a interprocess shared memory - * @param [in] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtIpcDestroyMemoryName(const char *name); - -/** - * @ingroup dvrt_mem - * @brief open a interprocess shared memory - * @param [in|out] ptr device memory address pointer - * @param [in] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtIpcOpenMemory(void **ptr, const char *name); - -/** - * @ingroup dvrt_mem - * @brief close a interprocess shared memory - * @param [in] ptr device memory address pointer - * @param [in] name identification name - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtIpcCloseMemory(const void *ptr); - -/** - * @ingroup dvrt_mem - * @brief HCCL Async memory cpy - * @param [in] index sq index - * @param [in] wqeIndex moudle index - * @param [in] stream asynchronized task stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtRDMASend(uint32_t index, uint32_t wqeIndex, rtStream_t stream); - -/** - * @ingroup dvrt_mem - * @brief Ipc set mem pid - * @param [in] name name to be queried - * @param [in] pid process id - * @param [in] num length of pid[] - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtSetIpcMemPid(const char *name, int32_t pid[], int num); - -/** - * @ingroup dvrt_mem - * @brief HCCL Async memory cpy - * @param [in] dbindex single device 0 - * @param [in] dbinfo doorbell info - * @param [in] stream asynchronized task stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - * @return RT_ERROR_DRV_ERR for driver error - */ -RTS_API rtError_t rtRDMADBSend(uint32_t dbIndex, uint64_t dbInfo, rtStream_t stream); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_MEM_H__ diff --git a/inc/runtime/rt.h b/inc/runtime/rt.h deleted file mode 100644 index 83cafa3c5..000000000 --- a/inc/runtime/rt.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_RT_H__ -#define __CCE_RUNTIME_RT_H__ - -#include "base.h" -#include "config.h" -#include "context.h" -#include "dev.h" -#include "dvfsprofile.h" -#include "event.h" -#include "kernel.h" -#include "mem.h" -#include "rt_model.h" -#include "stream.h" - -#endif // __CCE_RUNTIME_RT_H__ diff --git a/inc/runtime/rt_mem_queue.h b/inc/runtime/rt_mem_queue.h deleted file mode 100644 index 502777657..000000000 --- a/inc/runtime/rt_mem_queue.h +++ /dev/null @@ -1,260 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef CCE_RUNTIME_MEM_QUEUE_H -#define CCE_RUNTIME_MEM_QUEUE_H -#include -#include "base.h" - - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -#define WEAKFUC __attribute__((weak)) -#define RT_MQ_MAX_NAME_LEN 128 -#define RT_MQ_DEPTH_MIN 2 -#define RT_MQ_MODE_PUSH 1 -#define RT_MQ_MODE_PULL 2 -#define RT_MQ_MODE_DEFAULT RT_MQ_MODE_PUSH - -typedef struct tagMemQueueInfo -{ - int32_t id; - int32_t size; - uint32_t depth; - int32_t status; -} rtMemQueueInfo_t; - -typedef struct tagMemQueueAttr { - char name[RT_MQ_MAX_NAME_LEN]; - uint32_t depth; - uint32_t workMode; - bool flowCtrlFlag; - uint32_t flowCtrlDropTime; - bool overWriteFlag; -} rtMemQueueAttr_t; - -typedef struct tagMemQueueShareAttr -{ - int manage:1; - int read:1; - int write:1; - int rsv:29; -}rtMemQueueShareAttr_t; - -typedef struct tagMemQueueBuffInfo { - void *addr; - size_t len; -} rtMemQueueBuffInfo; - -typedef struct tagMemQueueBuff { - void *contextAddr; - size_t contextLen; - uint32_t buffCount; - rtMemQueueBuffInfo *buffInfo; -} rtMemQueueBuff_t; - -typedef enum tagMemQueueQueryCmd { - RT_MQ_QUERY_QUE_ATTR_OF_CUR_PROC = 0, - RT_MQ_QUERY_QUES_OF_CUR_PROC =1, - RT_MQ_QUERY_CMD_MAX = 2 -} rtMemQueueQueryCmd_t; - -#define RT_MQ_EVENT_QS_MSG 27 - -#define RT_MQ_SCHED_PRIORITY_LEVEL0 0 -#define RT_MQ_SCHED_PRIORITY_LEVEL1 1 -#define RT_MQ_SCHED_PRIORITY_LEVEL2 2 -#define RT_MQ_SCHED_PRIORITY_LEVEL3 3 -#define RT_MQ_SCHED_PRIORITY_LEVEL4 4 -#define RT_MQ_SCHED_PRIORITY_LEVEL5 5 -#define RT_MQ_SCHED_PRIORITY_LEVEL6 6 -#define RT_MQ_SCHED_PRIORITY_LEVEL7 7 - -#define RT_MQ_DST_ENGINE_ACPU_DEVICE 0 -#define RT_MQ_DST_ENGINE_ACPU_HOST 1 -#define RT_MQ_DST_ENGINE_CCPU_DEVICE 2 -#define RT_MQ_DST_ENGINE_CCPU_HOST 3 -#define RT_MQ_DST_ENGINE_DCPU_DEVICE 4 - -#define RT_MQ_SCHED_EVENT_QS_MSG 25 - -typedef struct tagEschedEventSummary { - int32_t pid; - uint32_t grpId; - int32_t eventId; - uint32_t subeventId; - uint32_t msgLen; - char *msg; - uint32_t dstEngine; - int32_t policy; -} rtEschedEventSummary_t; - -typedef struct tagEschedEventReply { - char *buf; - uint32_t bufLen; - uint32_t replyLen; -} rtEschedEventReply_t; - -#define RT_DEV_PROCESS_CP1 0 -#define RT_DEV_PROCESS_CP2 1 -#define RT_DEV_PROCESS_DEV_ONLY 2 -#define RT_DEV_PROCESS_QS 3 -#define RT_DEV_PROCESS_SIGN_LENGTH 49 - -typedef struct tagBindHostpidInfo { - int32_t hostPid; - uint32_t vfid; - uint32_t chipId; - int32_t mode; - int32_t cpType; - uint32_t len; - char sign[RT_DEV_PROCESS_SIGN_LENGTH]; -} rtBindHostpidInfo_t; - -#define RT_MEM_BUFF_MAX_CFG_NUM 64 - -typedef struct { - uint32_t cfgId = 0; - uint32_t totalSize = 0; - uint32_t blkSize = 0; - uint32_t maxBufSize = 0; - uint32_t pageType = 0; - int32_t elasticEnable = 0; - int32_t elasticRate = 0; - int32_t elasticRateMax = 0; - int32_t elasticHighLevel = 0; - int32_t elasticLowLevel = 0; -} rtMemZoneCfg_t; - -typedef struct { - rtMemZoneCfg_t cfg[RT_MEM_BUFF_MAX_CFG_NUM]; -} rtMemBuffCfg_t; - -typedef void *rtMbufPtr_t; - -RTS_API rtError_t rtMemQueueInitQS(int32_t devId) WEAKFUC; - -RTS_API rtError_t rtMemQueueCreate(int32_t devId, const rtMemQueueAttr_t *queAttr, uint32_t *qid) WEAKFUC; - -RTS_API rtError_t rtMemQueueDestroy(int32_t devId, uint32_t qid) WEAKFUC; - -RTS_API rtError_t rtMemQueueInit(int32_t devId) WEAKFUC; - -RTS_API rtError_t rtMemQueueEnQueue(int32_t devId, uint32_t qid, void *mbuf) WEAKFUC; - -RTS_API rtError_t rtMemQueueDeQueue(int32_t devId, uint32_t qid, void **mbuf) WEAKFUC; - -RTS_API rtError_t rtMemQueuePeek(int32_t devId, uint32_t qid, size_t *bufLen, int32_t timeout) WEAKFUC; - -RTS_API rtError_t rtMemQueueEnQueueBuff(int32_t devId, uint32_t qid, rtMemQueueBuff_t *inBuf, int32_t timeout) WEAKFUC; - -RTS_API rtError_t rtMemQueueDeQueueBuff(int32_t devId, uint32_t qid, rtMemQueueBuff_t *outBuf, int32_t timeout) WEAKFUC; - -RTS_API rtError_t rtMemQueueQuery(int32_t devId, rtMemQueueQueryCmd_t cmd, void *inBuff, uint32_t inLen, - void *outBuff, uint32_t *outLen) WEAKFUC; - -RTS_API rtError_t rtMemQueueQueryInfo(int32_t device, uint32_t qid, rtMemQueueInfo_t *queueInfo) WEAKFUC; - -RTS_API rtError_t rtMemQueueGrant(int32_t devId, uint32_t qid, int32_t pid, rtMemQueueShareAttr_t *attr) WEAKFUC; - -RTS_API rtError_t rtMemQueueAttach(int32_t devId, uint32_t qid, int32_t timeout) WEAKFUC; - -RTS_API rtError_t rtEschedSubmitEventSync(int32_t devId, rtEschedEventSummary_t *event, rtEschedEventReply_t *ack) WEAKFUC; - -RTS_API rtError_t rtQueryDevPid(rtBindHostpidInfo_t *info, int32_t *devPid) WEAKFUC; - -RTS_API rtError_t rtMbufInit(rtMemBuffCfg_t *cfg) WEAKFUC; - -RTS_API rtError_t rtMbufAlloc(rtMbufPtr_t *mbuf, uint64_t size) WEAKFUC; - -RTS_API rtError_t rtMbufFree(rtMbufPtr_t mbuf) WEAKFUC; - -RTS_API rtError_t rtMbufGetBuffAddr(rtMbufPtr_t mbuf, void **databuf) WEAKFUC; - -RTS_API rtError_t rtMbufGetBuffSize(rtMbufPtr_t mbuf, uint64_t *size) WEAKFUC; - -RTS_API rtError_t rtMbufGetPrivInfo(rtMbufPtr_t mbuf, void **priv, uint64_t *size) WEAKFUC; - -#define RT_MEM_GRP_NAME_LEN 32 // it must be same as driver define BUFF_GRP_NAME_LEN -// mem group -typedef struct { - uint64_t maxMemSize; - uint32_t cacheAllocFlag; - int32_t rsv[RT_MEM_GRP_NAME_LEN - 1]; -} rtMemGrpConfig_t; - -typedef struct { - uint32_t admin : 1; // admin permission, can add other proc to grp - uint32_t read : 1; // read only permission - uint32_t write : 1; // read and write permission - uint32_t alloc : 1; // alloc permission (have read and write permission) - uint32_t rsv : 28; -} rtMemGrpShareAttr_t; - -#define RT_MEM_GRP_QUERY_GROUPS_OF_PROCESS 1 - -typedef struct { - int pid; -} rtMemGrpQueryByProc_t; - -typedef union { - rtMemGrpQueryByProc_t grpQueryByProc; -} rtMemGrpQueryInput_t; - -#define RT_MEM_GRP_NAME_LEN 32 - -typedef struct { - char groupName[RT_MEM_GRP_NAME_LEN]; - rtMemGrpShareAttr_t attr; -} rtMemGrpOfProc_t; - -typedef struct { - rtMemGrpOfProc_t *groupsOfProc; - size_t maxNum; - size_t resultNum; -} rtMemGrpQueryOutput_t; - -#define RT_MEM_CACHE_MAX_NUM 1024 -typedef struct { - uint64_t memSize; - uint32_t memFlag; - int32_t rsv[RT_MEM_CACHE_MAX_NUM]; -} rtMemGrpCacheAllocPara; - -RTS_API rtError_t rtMemGrpCreate(const char *name, const rtMemGrpConfig_t *cfg) WEAKFUC; - -RTS_API rtError_t rtBuffConfirm(void *buff, const uint64_t size) WEAKFUC; - -RTS_API rtError_t rtBuffAlloc(const uint64_t size, void **const buff) WEAKFUC; - -RTS_API rtError_t rtBuffFree(void *buff) WEAKFUC; - -RTS_API rtError_t rtMbufBuild(void *buff, const uint64_t size, rtMbufPtr_t *mbuf) WEAKFUC; - -RTS_API rtError_t rtMbufUnBuild(rtMbufPtr_t mbuf, void **buff, uint64_t *const size) WEAKFUC; - -RTS_API rtError_t rtMemGrpAddProc(const char *name, int32_t pid, const rtMemGrpShareAttr_t *attr) WEAKFUC; - -RTS_API rtError_t rtMemGrpAttach(const char *name, int32_t timeout) WEAKFUC; - -RTS_API rtError_t rtMemGrpQuery(int32_t cmd, const rtMemGrpQueryInput_t *input, rtMemGrpQueryOutput_t *output) WEAKFUC; - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif -#endif // CCE_RUNTIME_MEM_QUEUE_H diff --git a/inc/runtime/rt_model.h b/inc/runtime/rt_model.h deleted file mode 100644 index 482486a8f..000000000 --- a/inc/runtime/rt_model.h +++ /dev/null @@ -1,456 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_MODEL_H__ -#define __CCE_RUNTIME_MODEL_H__ - -#include "base.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -typedef enum tagModelTaskType { - RT_MODEL_TASK_KERNEL = 0, - RT_MODEL_TASK_EVENT_RECORD, - RT_MODEL_TASK_EVENT_WAIT, - RT_MODEL_TASK_FUSION_START, - RT_MODEL_TASK_FUSION_END, - RT_MODEL_TASK_KERNEL_EX, - RT_MODEL_TASK_HCCL, - RT_MODEL_TASK_STREAM_SWITCH, - RT_MODEL_TASK_STREAM_ACTIVE, - RT_MODEL_TASK_LABEL_SET, - RT_MODEL_TASK_LABEL_SWITCH, - RT_MODEL_TASK_LABEL_GOTO, - RT_MODEL_TASK_PROFILER_TRACE, - RT_MODEL_TASK_MEMCPY_ASYNC, - RT_MODEL_TASK_NOTIFY_RECORD, - RT_MODEL_TASK_NOTIFY_WAIT, - RT_MODEL_TASK_REDUCE_ASYNC, - RT_MODEL_TASK_RDMA_SEND, - RT_MODEL_TASK_EVENT_RESET = 18, - RT_MODEL_TASK_MODEL_END_GRAPH, - RT_MODEL_TASK_STREAM_SWITCH_N, - RT_MODEL_TASK_RDMA_DB_SEND, - RT_MODEL_TASK_MEMCPY_ADDR_ASYNC, - RT_MODEL_TASK_STREAM_LABEL_SWITCH_BY_INDEX, - RT_MODEL_TASK_STREAM_LABEL_GOTO, - RT_MODEL_TASK_MODEL_EXIT, -} rtModelTaskType_t; - -typedef enum tagModelStreamType { - RT_MODEL_HEAD_STREAM = 0, - RT_MODEL_WAIT_ACTIVE_STREAM = 1 -} rtModelStreamType_t; - -typedef enum tagModelQueueFlag { - RT_MODEL_INPUT_QUEUE = 0, - RT_MODEL_OUTPUT_QUEUE = 1 -} rtModelQueueFlag_t; - -#define EXECUTOR_NONE ((uint32_t)0x0) -#define EXECUTOR_TS ((uint32_t)0x01) -#define EXECUTOR_AICPU ((uint32_t)0x02) - -/* - * @ingroup rt_model - * @brief debug flag for kernel exception dump - */ -#define RT_DEBUG_FLAG_AICORE_OVERFLOW (0x1 << 0) -#define RT_DEBUG_FLAG_ATOMIC_ADD_OVERFLOW (0x1 << 1) - -/** - * @ingroup - * @brief the type defination of aicpu model task command - */ -typedef enum tagTsAicpuModelCmd { - TS_AICPU_MODEL_LOAD = 1, - TS_AICPU_MODEL_EXECUTE, - TS_AICPU_MODEL_DESTROY, - TS_AICPU_MODEL_ABORT, - TS_AICPU_MODEL_RESERVED, -} tsAicpuModelCmd; - -typedef struct tagAicpuTaskInfo { - uint32_t taskID; - uint32_t streamID; - uint32_t kernelType; - uint64_t kernelName; - uint64_t kernelSo; - uint64_t paraBase; - uint32_t taskFlag; -} rtAicpuTaskInfo_t; - -typedef struct tagModelStreamInfo { - uint32_t streamID; - uint32_t streamFlag; -} rtModelStreamInfo_t; - -typedef struct tagModelQueueInfo { - uint32_t queueID; - uint32_t flag; -} rtModelQueueInfo_t; - -typedef struct tagAicpuModelInfo { - uint32_t moduleID; - uint32_t tsId; - uint16_t streamInfoNum; - uint16_t aicpuTaskNum; - uint64_t streamInfoPtr; - uint64_t aicpuTaskPtr; - uint16_t queueSize; - uint64_t queueInfoPtr; -} rtAicpuModelInfo_t; - -typedef struct tagKernelTaskInfo { - uint16_t blockDim; - uint16_t argsCount; - uint16_t argsSize; - uint16_t reserved; - char *stubFunc; - uint8_t *smDesc; - uint8_t *args; - uint16_t *argsOffset; -} rtKernelTaskInfo_t; - -typedef struct tagKernelTaskInfoEx { - uint32_t flags; - uint32_t argsSize; - void *args; - uint32_t reserved[6]; -} rtKernelTaskInfoEx_t; - -typedef struct tagEventTaskInfo { - uint32_t eventID; - uint32_t reserved[9]; -} rtEventTaskInfo_t; - -typedef struct tagStreamSwitchTaskInfo { - int64_t value; - uint64_t pValuePtr; - uint32_t trueStreamID; - uint32_t dataType; - uint32_t reserved[4]; -} rtStreamSwitchTaskInfo_t; - -typedef struct tagStreamSwitchNTaskInfo { - uint64_t pValuePtr; - uint64_t pTrueStreamPtr; - uint32_t size; - uint32_t elementSize; - uint32_t dataType; - uint32_t reserved[3]; -} rtStreamSwitchNTaskInfo_t; - -typedef struct tagStreamActiveTaskInfo { - uint32_t activeStreamID; - uint32_t reserved[9]; -} rtStreamActiveTaskInfo_t; - -typedef struct tagSetTaskInfo { - uint16_t labelId; - uint32_t reserved[9]; -} rtLabelSetTaskInfo_t; - -typedef struct tagSwitchTaskInfo { - uint32_t value; - uint32_t reserved[9]; -} rtLabelSwitchTaskInfo_t; - -typedef struct tagLabelGotoTaskInfo { - uint16_t labelId; - uint32_t reserved[9]; -} rtLabelGotoTaskInfo_t; - -typedef struct tagProfilerTraceTaskInfo { - uint64_t profilerTraceId; - uint32_t notify : 8; - uint32_t reserved_ : 24; - uint32_t flags; - uint32_t reserved[6]; -} rtProfilerTrace_t; - -typedef struct tagrtMemcpyAsyncTaskInfo { - void *dst; - uint64_t destMax; - void *src; - uint64_t count; - uint32_t kind; - uint32_t reserved; -} rtMemcpyAsyncTaskInfo_t; - -typedef struct tagrtNotifyTaskInfo { - uint32_t notifyID; - uint32_t reserved[9]; -} rtNotifyTaskInfo_t; - -typedef struct tagrtReduceAsyncTaskInfo { - void *dst; - uint64_t destMax; - void *src; - uint64_t count; - uint32_t kind; - uint32_t type; -} rtReduceAsyncTaskInfo_t; - -typedef struct tagrtRdmaSendTaskInfo { - uint32_t index; - uint32_t wqe_index; - uint32_t reserved[8]; -} rtRdmaSendTaskInfo_t; - -typedef struct tagrtRdmaDbSendTaskInfo { - uint64_t dbInfo; - uint32_t dbIndex; - uint32_t reserved[7]; // offset 7 -} rtRdmaDbSendTaskInfo_t; - -typedef struct tagrtModelEndGraphTaskInfo { - uint32_t modelId; - uint32_t executorFlag; - uint32_t reserved[8]; -} rtModelEndGraphTaskInfo_t; - -typedef struct tagrtModelExitInfo { - uint32_t modelId; - uint32_t streamId; - uint32_t reserved[8]; -} rtModelExitTaskInfo_t; - - -typedef struct tagrtStreamLabelSwitchByIndexTask_t { - uint64_t indexPtr; - uint64_t labelInfoPtr; - uint32_t max; - uint8_t reserved[20]; -} rtStreamLabelSwitchByIndexTask_t; - -typedef struct tagrtStreamLabelGotoTask_t { - uint16_t labelId; - uint16_t modelId; - uint8_t reserved[36]; -} rtStreamLabelGotoTask_t; - -typedef struct tagTaskInfo { - uint32_t type; - uint32_t streamID; - union { - rtKernelTaskInfoEx_t kernelTaskEx; - rtKernelTaskInfo_t kernelTask; - rtEventTaskInfo_t eventTask; - rtStreamSwitchTaskInfo_t streamSwitchTask; - rtStreamActiveTaskInfo_t streamActiveTask; - rtLabelSetTaskInfo_t labelSetTask; - rtLabelSwitchTaskInfo_t labelSwitchTask; - rtLabelGotoTaskInfo_t labelGotoTask; - rtProfilerTrace_t profilertraceTask; - rtMemcpyAsyncTaskInfo_t memcpyAsyncTask; - rtNotifyTaskInfo_t notifyTask; - rtReduceAsyncTaskInfo_t reduceAsyncTask; - rtRdmaSendTaskInfo_t rdmaSendTask; - rtRdmaDbSendTaskInfo_t rdmaDbSendTask; - rtModelEndGraphTaskInfo_t modelEndGraphTask; - rtModelExitTaskInfo_t modelExitTask; - rtStreamSwitchNTaskInfo_t streamSwitchNTask; - rtStreamLabelSwitchByIndexTask_t streamLabelSwitchIndexTask; - rtStreamLabelGotoTask_t streamLabelGotoTask; - uint32_t reserved[10]; - } u; -} rtTaskInfo_t; - -typedef struct tagLabelDevInfo_t { - uint16_t modelId; - uint16_t streamId; - uint16_t labelId; -}rtLabelDevInfo; - -typedef rtError_t (*rtTaskGenCallback)(rtModel_t model, rtTaskInfo_t *taskInfo); - -/** - * @ingroup rt_model - * @brief set callback for generate model - * @param [in] callBack callback function - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtSetTaskGenCallback(rtTaskGenCallback callback); - -/** - * @ingroup rt_model - * @brief create model instance - * @param [out] model created model - * @param [in] flag reserved - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelCreate(rtModel_t *model, uint32_t flag); - -/** - * @ingroup rt_model - * @brief destroy model instance - * @param [in] model model to destroy - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelDestroy(rtModel_t model); - -/** - * @ingroup rt_model - * @brief bind model and stream instance - * @param [in] model binded model - * @param [in] stream binded stream - * @param [in] flag reserved - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelBindStream(rtModel_t model, rtStream_t stream, uint32_t flag); - -/** - * @ingroup rt_model - * @brief unbind model and stream instance - * @param [in] model unbinded model - * @param [in] stream unbinded stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelUnbindStream(rtModel_t model, rtStream_t stream); - -/** - * @ingroup rt_model - * @brief tell runtime Model has been Loaded - * @param [in] model model to execute - * @return RT_ERROR_NONE for ok - */ -RTS_API rtError_t rtModelLoadComplete(rtModel_t model); - -/** - * @ingroup rt_model - * @brief execute model instance - * @param [in] model model to execute - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelExecute(rtModel_t model, rtStream_t stream, uint32_t flag); - -/** - * @ingroup rt_model - * @brief get model the last persist task id - * @param [in] model model to execute - * @param [out] taskid last task id of the model - * @param [out] streamid last steam id of the model - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelGetTaskId(rtModel_t model, uint32_t *taskid, uint32_t *streamid); - -/** - * @ingroup rt_model - * @brief add a end graph task to stream - * @param [in] model model to execute - * @param [in] end graph stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEndGraph(rtModel_t model, rtStream_t stream); - -/** - * @ingroup rt_model - * @brief add a end graph task with flag to stream - * @param [in] model model to execute - * @param [in] end graph stream - * @param [in] flags AICPU datadump - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtEndGraphEx(rtModel_t model, rtStream_t stream, uint32_t flags); - -/** - * @ingroup rt_model - * @brief add a end graph task to stream - * @param [in] model model to execute - * @param [in] flags EXECUTOR_TS | EXECUTOR_AICPU - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelExecutorSet(rtModel_t model, uint8_t flags); - -/** - * @ingroup rt_model - * @brief abort model - * @param [in] model model to abort - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelAbort(rtModel_t model); - -/** - * @ingroup rt_model - * @brief end graph task to model default stream - * @param [in] model model to execute - * @param [in] end graph stream - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelExit(rtModel_t model, rtStream_t stream); - -/** - * @ingroup rt_model - * @brief bind queue - * @param [in] model model to bind - * @param [in] queueId queueId to bind - * @param [in] flag - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelBindQueue(rtModel_t model, uint32_t queueId, rtModelQueueFlag_t flag); - -/** - * @ingroup rt_model - * @brief get model id - * @param [in] model - * @param [out] modelId model id - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtModelGetId(rtModel_t model, uint32_t *modelId); - -/* - * @ingroup rt_model - * @brief enable debug for dump overflow exception - * @param [in] addr: ddr address of kernel exception dumpped - * @param [in] model: model handle - * @param [in] flag: debug flag - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDebugRegister(rtModel_t model, uint32_t flag, const void *addr, - uint32_t *streamId, uint32_t *taskId); - -/* - * @ingroup rt_model - * @brief disable debug for dump overflow exception - * @param [in] model: model handle - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtDebugUnRegister(rtModel_t model); - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_MODEL_H__ diff --git a/inc/runtime/stream.h b/inc/runtime/stream.h deleted file mode 100644 index 6b9f80ae1..000000000 --- a/inc/runtime/stream.h +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -#ifndef __CCE_RUNTIME_STREAM_H__ -#define __CCE_RUNTIME_STREAM_H__ - -#include "base.h" -#include "event.h" - -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -extern "C" { -#endif - -/** - * @ingroup stream_flags - * @brief stream op bit flags - */ -#define RT_STREAM_DEFAULT (0x00) -#define RT_STREAM_PERSISTENT (0x01) -#define RT_STREAM_FORCE_COPY (0x02) -#define RT_STREAM_HUGE (0x04) -#define RT_STREAM_AICPU (0x08) -#define RT_STREAM_FORBIDDEN_DEFAULT (0x10) -#define RT_STREAM_HEAD (0x20) -#define RT_STREAM_PRIMARY_DEFAULT (0x40) -#define RT_STREAM_PRIMARY_FIRST_DEFAULT (0x80) - -/** - * @ingroup stream_type - * @brief stream type - */ -#define RT_NORMAL_STREAM (0x00) -#define RT_HUGE_STREAM (0x01) - -/** - * priority level default value when create a stream - */ -#define RT_STREAM_PRIORITY_DEFAULT (0) - -/** - * @ingroup dvrt_stream - * @brief create stream instance - * @param [in|out] stream created stream - * @param [in] priority stream priority - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamCreate(rtStream_t *stream, int32_t priority); - -/** - * @ingroup dvrt_stream - * @brief create stream instance - * @param [in|out] stream created stream - * @param [in] priority stream priority - * @param [in] flags stream op flags - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamCreateWithFlags(rtStream_t *stream, int32_t priority, uint32_t flags); - -/** - * @ingroup dvrt_stream - * @brief destroy stream instance. - * @param [in] stream the stream to destroy - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamDestroy(rtStream_t stream); - -/** - * @ingroup dvrt_stream - * @brief wait an recorded event for stream - * @param [in] stream the wait stream - * @param [in] event the event to wait - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamWaitEvent(rtStream_t stream, rtEvent_t event); - -/** - * @ingroup dvrt_stream - * @brief wait stream to be complete - * @param [in] stream stream to wait - * @return RT_ERROR_NONE for ok - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamSynchronize(rtStream_t stream); - -/** - * @ingroup dvrt_stream - * @brief queries an asynchronous stream for completion status - * @param [in] stream stream to query - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_STREAM_NOT_COMPLETE for not complete - */ -RTS_API rtError_t rtStreamQuery(rtStream_t stream); - -/** - * @ingroup dvrt_stream - * @brief get stream id from a stream handle - * @param [in] stream stream hadle - * @param [in] streamId stream id - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetStreamId(rtStream_t stream, int32_t *streamId); - -/** - * @ingroup dvrt_stream - * @brief inquire max stream count and max task count per stream - * @param [in] streamType Stream Type - * @param [in] MaxStrCount Max stream count - * @param [in] MaxTaskCount max task count per stream - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtGetMaxStreamAndTask(uint32_t streamType, uint32_t *maxStrCount, uint32_t *maxTaskCount); - -/** - * @ingroup dvrt_stream - * @brief Name a stream - * @param [in] stream stream to be named - * @param [in] name identification name - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtNameStream(rtStream_t stream, const char *name); - -/** - * @ingroup dvrt_stream - * @brief switch to the corresponding stream according to the contents of the ptr - * @param [in] ptr Determine the address where the value of the true and false branches is located - * @param [in] condition switch condition - * @param [in] value switch value - * @param [in] trueStream Stream that needs to be activated when the value is non-zero - * @param [in] stream input stream to init task - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamSwitch(void *ptr, rtCondition_t condition, int64_t value, rtStream_t trueStream, - rtStream_t stream); - -/** - * @brief execute extensible stream switch task - * @param [in] ptr pointer of value - * @param [in] condition judge condition - * @param [in] value_ptr pointer of target value - * @param [in] true_stream stream to be activated when value is not zero - * @param [in] stream stream id - * @param [in] dataType data type of target value - * @return RT_ERROR_NONE for complete - */ -RTS_API rtError_t rtStreamSwitchEx(void *ptr, rtCondition_t condition, void *valuePtr, rtStream_t trueStream, - rtStream_t stream, rtSwitchDataType_t dataType); - -/** - * @ingroup dvrt_stream - * @brief Active a stream - * @param [in] activeStream stream to be activated - * @param [in] stream input stream to init task - * @return RT_ERROR_NONE for complete - * @return RT_ERROR_INVALID_VALUE for error input - */ -RTS_API rtError_t rtStreamActive(rtStream_t activeStream, rtStream_t stream); - -/** - * @brief execute extensible stream case switch task - * @param [in] ptr pointer of value - * @param [in] size pointer num of value - * @param [in] valuePtr pointer of target value, length = size * elementSize - * @param [in] trueStreamPtr streams to be activated - * @param [in] elementSize size of to be activated true streams - * @param [in] stream input stream to init task - * @param [in] dataType data type of target value - * @return RT_ERROR_NONE for complete - */ -RTS_API rtError_t rtStreamSwitchN(void *ptr, uint32_t size, void *valuePtr, rtStream_t *trueStreamPtr, - uint32_t elementSize, rtStream_t stream, rtSwitchDataType_t dataType); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) -} -#endif - -#endif // __CCE_RUNTIME_STREAM_H__ diff --git a/inc/tdt/data_common.h b/inc/tdt/data_common.h deleted file mode 100644 index 7b1d631bd..000000000 --- a/inc/tdt/data_common.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef HOST_INNER_INC_DATA_COMMON_H_ -#define HOST_INNER_INC_DATA_COMMON_H_ - -namespace tdt { -#ifndef TDT_DATA_TYPE -#define TDT_DATA_TYPE - -/** - * @ingroup Tdt data. - * - * Tdt data type. - */ -enum TdtDataType { - TDT_IMAGE_LABEL = 0, /**< Image label*/ - TDT_TFRECORD, /**< TF Record*/ - TDT_DATA_LABEL, /**< Data label*/ - TDT_END_OF_SEQUENCE, /**< End of Sequence*/ - TDT_TENSOR, /**< Tensor*/ - TDT_ABNORMAL, /**< ABNORMAL*/ - TDT_DATATYPE_MAX /**< Max*/ -}; -#endif - -/** - * @ingroup Tdt data. - * - * Tdt push data between host and device. - */ -struct TdtDataItem { - TdtDataType dataType_; /**< Input data type*/ - uint64_t label_; /**< Input data label*/ - uint64_t dataLen_; /**< Input data type length*/ - uint64_t realDataLen_; /**< Real Input data type length*/ - std::string tensorShape_; /**< Tensor shape*/ - std::string tensorType_; /**< Tensor type*/ - uint32_t cnt_; /**< Data count*/ - uint32_t currentCnt_; /**< Data current count*/ - uint64_t index_; /**< Data inde*/ - std::string tensorName_; /**< Tensor name*/ - uint64_t md5ValueHead_; /**< Data md5*/ - uint64_t md5ValueTail_; /**< Data md5*/ - std::shared_ptr dataPtr_; /**< Data pointer*/ - std::string headMD5_; /**< MD5 header, 8byte*/ - std::string tailMD5_; /**< MD5 tail, 8byte*/ -}; - -/** - * @ingroup Tdt data. - * - * Tdt push data for queuedataset ort mind-data. - */ -struct DataItem { - TdtDataType dataType_; /**< Input data type*/ - std::string tensorName_; /**< Tensor name*/ - std::string tensorShape_; /**< Tensor shape*/ - std::string tensorType_; /**< Tensor type*/ - uint64_t dataLen_; /**< Input data type length*/ - std::shared_ptr dataPtr_; /**< Data pointer*/ -}; - -/** - * @ingroup Tsdclient. - * - * tsdclient func type; - */ -enum TsdCmdType { - TSDCLOSE = 0, - TSDOPEN = 1 -}; - -/** - * @ingroup Tsdclient. - * - * tsdclient func input value object. - */ -enum InputItem { - OPEN_DEVICEID = 0, - OPEN_RANKSIZE, - CLOSE_DEVICEID -}; - -} // namespace tdt -#endif // HOST_INNER_INC_DATA_COMMON_H_ diff --git a/inc/tdt/status.h b/inc/tdt/status.h deleted file mode 100644 index 5afc89ace..000000000 --- a/inc/tdt/status.h +++ /dev/null @@ -1,755 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_TDT_STATUS_H_ -#define INC_TDT_STATUS_H_ - -#include - -#ifdef __cplusplus -#include -#include -#include -#include -#else -#include -#endif - -#ifdef __cplusplus -using TDT_StatusT = uint32_t; -#else -typedef uint32_t TDT_StatusT; -#endif - -#ifndef TDT_LIB_EXPORT -#define TDT_LIB_EXPORT __attribute__((visibility("default"))) -#endif -/** - * @ingroup tdt status. - * - * Tdt debug level - */ -enum { - TDT_DEBUG = 0, /**< Debug*/ - TDT_INFO = 1, /**< Info*/ - TDT_WARNING = 2, /**< Warning*/ - TDT_ERROR = 3, /**< Error*/ - TDT_FATAL = 4, /**< Fatal*/ - TDT_EVENT = 5, /**< Event*/ - TDT_OPLOG = 6, /**< Oplog*/ - TDT_TRACE = 7 /**< Trace*/ -}; - -enum { - TDT_OK_CODE = 0, - TDT_DEBUG_INFO_CODE, - TDT_INTERNAL_ERROR_CODE, - TDT_COMMON_WARNING_CODE, - TDT_PREFETCH_STOPED_CODE, - TDT_FILE_SIZE_TOO_LARGE_CODE, - TDT_FILE_INVALID_PATH_CODE, - TDT_MEMORY_EXHAUSTED_CODE, - TDT_INTERGER_REVERSED_CODE, - TDT_FILE_NOT_EXIST_CODE, - TDT_DEFAULT_CONFIG_FILE_NOT_EXIST_CODE, - TDT_INSTANCE_NOT_INITIALED_CODE, - TDT_INITIAL_FAILED_CODE, - TDT_INSTANCE_NOT_FOUND_CODE, - TDT_HDC_CREATE_SESSION_FAILED_CODE, - TDT_HDC_DESTROY_SESSION_FAILED_CODE, - TDT_HDC_SESSION_DO_NOT_EXIST_CODE, - TDT_PID_IS_EXIST_CODE, - TDT_HDC_SRV_INIT_ERROR_CODE, - TDT_HDC_SRV_CREATE_ERROR_CODE, - TDT_HDC_SRV_DESTROY_ERROR_CODE, - TDT_HDC_SRV_ACCEPT_ERROR_CODE, - TDT_HDC_SRV_CLOSED_ERROR_CODE, - TDT_HDC_INTERNAL_ERROR_CODE, - TDT_HDC_INFO_CODE, - TDT_HDC_SEND_ERROR_CODE, - TDT_MESSAGE_PARSE_ERROR_CODE, - TDT_HDC_SEG_SIZE_ERROR_CODE, - TDT_HDC_MESSAGE_NULL_CODE, - TDT_HDC_SEARFUNC_IS_NULL_CODE, - TDT_HDC_SENDMSG_FAILED_CODE, - TDT_HDC_SRV_CLOSE_CHILD_SESSION_ERROR_CODE, - TDT_HDC_SRV_CLOSE_SERVER_SESSION_ERROR_CODE, - TDT_HDC_SRV_HEART_BEAT_TIMEOUT_CODE, // 30 - TDT_HDC_DRV_ERROR_CODE, - TDT_HDC_SERVER_CLIENT_SOCKET_CLOSED_CODE, - TDT_TSD_START_FAIL_CODE, - TDT_TSD_CLEANPROC_FIRST_GETPID_FAILED_CODE, - TDT_TSD_CLEANPROC_KILL_PROCESS_FAILED_CODE, - TDT_TSD_CLEANPROC_SECOND_GETPID_FAILED_CODE, - TDT_TSD_CLEANPROC_FINAL_FAILED_CODE, - TDT_TSD_INIT_STATE_FAILED_CODE, - TDT_TSD_INIT_HDCSERVER_FAILED_CODE, - TDT_TSD_SEND_HEARTBEAT_FAILED_CODE, - TDT_TSD_CLEAN_RESOURCE_FAILED_CODE, - TDT_TSD_SEND_MSG_FAILED_CODE, - TDT_TSD_AICPU_SD_PROCESS_ABNORMAL_CODE, - TDT_TSD_CUSTOM_PROCESS_ABNORMAL_CODE, - TDT_PPC_DRIVER_INIT_FAIL_CODE, - TDT_PPC_SERVER_CLIENT_CREATE_FAIL_CODE, - TDT_PPC_SERVER_CLIENT_DESTORY_FAIL_CODE, - TDT_PPC_SERVER_CLOSE_CODE, - TDT_PPC_GET_SET_MSG_BUFFER_FAIL_CODE, - TDT_PPC_SESSION_CONNECT_FAIL_CODE, // 40 - TDT_PPC_SESSION_NOT_EXISTED_CODE, - TDT_PPC_SEND_RECEIVE_MSG_FAIL_CODE, - TDT_PPC_MSG_FREE_FAIL_CODE, - TDT_PPC_ALLOC_MSG_FAIL_CODE, - TDT_PPC_MSG_LEN_NOT_MATCH_CODE, - TDT_PPC_MSG_BUF_NULL_CODE, - TDT_PPC_CLIENT_INVALID_PARAM_CODE, - TDT_PPC_SERVER_INVALID_PARAM_CODE, - TDT_PPC_CLIENT_RECVDATA_CONTINUE_CODE, - TDT_PPC_SERVER_CLIENT_SOCKET_CLOSED_CODE, // 50 - TDT_PPC_RECV_MSG_ERROR_CODE, - TDT_PPC_SESSION_CLOSE_ERROR_CODE, - TDT_SHUFFLE_SHUFFLE_SIZE_ILLEGAL_CODE, - TDT_SHUFFLE_ONLINE_UNIQUE_SEED_ILLEGAL_CODE, - TDT_SHUFFLE_UNABLE_TO_CREATE_SHUFFLE_LIST_CODE, - TDT_SHUFFLE_ILLEGAL_SHUFFLE_TYPE_CODE, - TDT_PREFETCH_ILLEGAL_DATATYPE_CODE, - TDT_SUPERVISOR_UNKOWN_JOB_STATE_CODE, - TDT_MAP_BUFFER_ERROR_CODE, - TDT_ALLOC_BUFFER_FAILED_CODE, - TDT_FREE_HDC_BUFFER_FAILED_CODE, - TDT_DATA_SIZE_WRONG_CODE, - TDT_MEMORY_POOL_INITED_CODE, - TDT_SENDMSG_FAILED_CODE, - TDT_INVALID_VALUE_CODE, - TDT_NO_USEFUL_MEMORY_CODE, - TDT_MESSAGE_NULL_CODE, - TDT_MEMORY_POOL_STOPPED_CODE, - TDT_HDC_MEMORY_ADDR_NOT_ALIGN_CODE, - TDT_MEMORY_POOL_GET_NULL_CODE, - TDT_MEMORY_POOL_NOT_EXISTED_CODE, - TDT_RECOVER_DATA_FAILED_CODE, - TDT_MEMORY_STATUS_ERROR_CODE, - TDT_MEMORY_POOL_UPDATE_FAILED_CODE, - TDT_MEMORY_POOL_RESIZE_FAILED_CODE, - TDT_MEMORY_DESTROY_FAILED_CODE, - TDT_EXCEED_MAX_THREAD_CODE, - TDT_WARNING_SET_THREAD_NAME_FAILED_CODE, - TDT_WRONG_PRIORITY_CODE, - TDT_JOIN_TASK_ERROR_CODE, - TDT_NULL_FUNC_CODE, - TDT_INIT_FAIL_CODE, - TDT_EXISTED_FUNC_CODE, - TDT_FILE_GET_FILE_STATE_FAIL_CODE, - TDT_FILE_OPEN_FILE_FAIL_CODE, - TDT_FILE_FILE_DESTROYED_CODE, - TDT_FILE_UNABLE_TO_GET_FILE_MEMORY_CODE, - TDT_PREFETCH_UNABLE_TO_GET_TDTDATAITEM_CODE, - TDT_HDCSERVER_DO_NOT_EXIST_CODE, - TDT_HDCSESSIONID_NOT_AVAILABLE_CODE, - TDT_SET_HDCSESSION_REFERENCE_FAILED_CODE, - TDT_HDC_RECV_MSG_ERROR_CODE, - TDT_HDC_SEND_MSG_ERROR_CODE, - TDT_FILE_CONTENT_EMPTY_CODE, - TDT_TDTSEVER_ACCEPT_FAILED_CODE, - TDT_CHANNEL_DO_NOT_EXIST_CODE, - TDT_NULL_POINTER_MSG_CODE, - TDT_TRAN_UNKNOWN_RSP_CODE, - TDT_TRAN_TIMEOUT_CODE, - TDT_TRAN_NOT_EXIST_CODE, - TDT_TRAN_ID_GEN_ERROR_CODE, - TDT_SEND_CHANNEL_FAILED_CODE, - TDT_SEND_CHANNEL_TIMEOUT_CODE, - TDT_QUEUE_STOPPED_CODE, - TDT_QUEUE_POP_FAILED_CODE, - TDT_QUEUE_PUSH_FAILED_CODE, - TDT_QUEUE_NOT_FIND_CODE, - TDT_QUEUE_CREATE_FAILED_CODE, - TDT_QUEUE_FULL_CODE, - TDT_QUEUE_EMPTY_CODE, - TDT_DATA_ENTO_CP_FAILED_CODE, - TDT_STOP_CP_QUEUE_FAILED_CODE, - TDT_RECV_MSG_NO_CHANNEL_INFO_ERROR_CODE, - TDT_CHANNEL_HAS_NO_SESSION_ERROR_CODE, - TDT_PREFETCH_SAMPLE_HAS_NO_LABEL_CODE, - TDT_HDC_CLIENT_INIT_ERROR_CODE, - TDT_HDC_CLIENT_CREATE_SESSION_ERROR_CODE, - TDT_HDC_CLIENT_DO_NOT_EXIST_CODE, - TDT_HDC_CLIENT_DESTROY_ERROR_CODE, - TDT_BIND_CPUCORE_FAILED_CODE, - TDT_HDC_CLIENT_CLOSED_CODE, - TDT_HDC_SRV_CLOSED_CODE, - TDT_HDC_SRV_TYPE_ERROR_CODE, - TDT_TSD_CLT_OPEN_FAILED_CODE, - TDT_TSD_CLT_CLOSE_FAILED_CODE, - TDT_TSD_CLT_UPDATE_PROFILING_FAILED_CODE, - TDT_TSD_CLT_INTERFACE_NOT_SUPPORT_CODE, - TDT_SUPERVISOR_ILLEGAL_HEARTBEAT_TIME_CODE, - TDT_SUPERVISOR_INOTIFY_READ_SIZE_ERROR_CODE, - TDT_SUPERVISOR_INOTIFY_INTERRUPT_CODE, - TDT_SUPERVISOR_INOTIFY_INIT_ERROR_CODE, - TDT_SUPERVISOR_CLOSE_INOTIFYFD_FAIL_CODE, - TDT_SUPERVISOR_INOTIFY_WATCH_ERROR_CODE, - TDT_TRANSFER_CANNOT_OPEN_CONFIGFILE_CODE, - TDT_TRANSFER_PARSE_FILE_FAILED_CODE, - TDT_TRANSFER_NO_CHANNEL_DATA_CODE, - TDT_PREFETCH_CREATE_FAILED_CODE, - TDT_TRANSFER_NO_PARAMETER_CODE, - TDT_TRANSFER_NO_PARAMETER_ARG_CODE, - TDT_FILE_TYPE_UNSUPPORT_CODE, - TDT_FILE_DIR_IS_NULL_CODE, - TDT_FILE_GET_DIR_TREE_ERROR_CODE, - TDT_FILE_CANNOT_OPEN_DIR_CODE, - TDT_PREFETCH_SAMPLE_CANNOT_BE_READ_CODE, - TDT_PREFETCH_DATA_QUEUE_IS_CLOSED_CODE, - TDT_PREFETCH_GET_SHUFFLE_RESULT_FAIL_CODE, - TDT_FILE_CANNOT_DFREE_FILE_MEMORY_CODE, - TDT_TRANSFER_CREATE_DELIVER_FAILED_CODE, - TDT_TRANSFER_TRAIN_DATA_DELIVER_IS_NULLPTR_CODE, - TDT_TRANSFER_EMPTY_GROUPNAME_IN_MULTI_GROUPS_CODE, - TDT_TRANSFER_DUPLICATE_GROUPNAME_CODE, - TDT_TRANSFER_DUPLICATE_DEVICE_CODE, - TDT_TRANSFER_FIND_DEVICE_FAIL_CODE, - TDT_SUPERVISOR_FAIL_TO_WRITE_PID_FILE_CODE, - TDT_SUPERVISOR_HEARTBEAT_FILE_NOT_INITED_CODE, - TDT_SUPERVISOR_JOB_COMMAND_FILE_NOT_INITED_CODE, - TDT_SUPERVISOR_JOB_STATE_FILE_NOT_INITED_CODE, - TDT_PREFETCH_LABEL_FILE_NOT_INITED_CODE, - TDT_PREFETCH_SAMPLE_FILE_DIR_NOT_INITED_CODE, - TDT_PREFETCH_NOT_INITED_CODE, - TDT_PREFETCH_SHUFFLER_NOT_CREATED_CODE, - TDT_SHUFFLE_NOT_INITED_CODE, - TDT_PREFETCH_SHUFFLED_ITEM_OUT_OF_FILE_LIST_CODE, - TDT_TRANSFER_INIT_FAILED_CODE, - TDT_TRANSFER_START_FAILED_CODE, - TDT_FOLDER_CANNOT_BE_CREATED_CODE, - TDT_CANNOT_GET_STAT_OF_FOLDER_CODE, - TDT_FOLDER_IS_FILE_CODE, - TDT_TRANSFER_CONFIG_FIEL_SYNTAX_ERROR_CODE, - TDT_CHECKSUM_ILLEGAL_MD5_PARAM_CODE, - TDT_CHECKSUM_MD5_INIT_FAILED_CODE, - TDT_CHECKSUM_MD5_UPDATE_FAILED_CODE, - TDT_CHECKSUM_MD5_FINAL_FAILED_CODE, - TDT_TRANSFER_DELIVER_IS_NONE_CODE, - TDT_SUPERVISOR_FAIL_TO_DEL_JOB_CMD_FILE_CODE, - TDT_TRANSFER_FAIL_TO_GET_ENV_VARIABLE_CODE, - TDT_MONITOR_INOTIFY_INIT_ERROR_CODE, - TDT_MONITOR_INOTIFY_WATCH_ERROR_CODE, - TDT_MONITOR_CLOSE_INOTIFYFD_FAIL_CODE, - TDT_MONITOR_INOTIFY_READ_SIZE_ERROR_CODE, - TDT_MONITOR_UNSUPPORT_CFGITEM_CODE, - TDT_MONITOR_FAIL_TO_SET_CFGITEM_CODE, - TDT_MONITOR_READ_FILE_FAIL_CODE, - TDT_MONITOR_CONFIG_FILE_FORMAT_ERROR_CODE, - TDT_MONITOR_STRCAT_FAILED_CODE, - TDT_MONITOR_CREATE_CONFIG_FILE_FAIL_CODE, - TDT_PREFETCH_FAIL_TO_GENERATE_MD5_CODE, - TDT_RECV_MSG_MD5_WRONG_CODE, - TDT_RECV_MSG_FAIL_TO_GENERATE_MD5_CODE, - TDT_RECV_MSG_SEQUENCE_ERROR_CODE, - TDT_SERVER_MEMORY_COPY_FAILED_CODE, - TDT_DEVICEID_ERROR_CODE, - TDT_MEMORY_DATA_TYPE_FACTORY_MAKE_SHARED_FAILED_CODE, - TDT_PREFETCH_FILELIST_NOT_EXIST_CODE, - TDT_PREFETCH_SAMPLE_FILE_NOT_FOUND_CODE, - TDT_PREFETCH_FILE_OPEN_FAIL_CODE, - TDT_PREFETCH_FILE_STAT_FAIL_CODE, - TDT_PREFETCH_FILE_MMAP_FAIL_CODE, - TDT_PREFETCH_FILE_UNMAP_FAIL_CODE, - TDT_PREFETCH_FILE_CLOSE_FAIL_CODE, - TDT_PREFETCH_FILE_PARSE_FAIL_CODE, - TDT_PREFETCH_CRC32_SIZE_FAIL_CODE, - TDT_PREFETCH_CRC32_DATA_FAIL_CODE, - TDT_PREFETCH_DATA_QUEUE_CLOSED_CODE, - TDT_PREFETCH_INITIALIZE_FAILED_CODE, - TDT_PREFETCH_MAP_INSERT_FAILED_CODE, - TDT_PREFETCH_INVALID_FILELIST_LINE_CODE, - TDT_FILE_STRINGSTREAM_TO_VALUE_FAILED_CODE, - TDT_LIST_ID_OFFSET_LENGTH_POSITIVE_INTEGER_FAILED_CODE, - TDT_SHUFFLE_ILLEGAL_SHUFFLE_PARAM_CODE, - TDT_FILE_SHUFFLER_CREATE_FAILED_CODE, - TDT_FILE_UPLOADER_CREATE_FAILED_CODE, - TDT_FILE_DOWNLOADER_CREATE_FAILED_CODE, - TDT_OBS_CONFIG_INFORMATION_FAIL_CODE, - TDT_OBS_CALLBACK_ARGUMENT_FAIL_CODE, - TDT_OBS_DOWNLOAD_CREATE_THREAD_FAILED_CODE, - TDT_OBS_DOWNLOAD_FILE_FAIL_CODE, - TDT_OBS_DOWNLOAD_INIT_FAIL_CODE, - TDT_OBS_DOWNLOAD_METADATA_FAIL_CODE, - TDT_OBS_LIST_BUCKET_OBJECTS_FAIL_CODE, - TDT_MEMORY_MEMCPY_FAILED_CODE, - TDT_MEMORY_MEMSET_FAILED_CODE, - TDT_MKDIR_CMD_FAILED_CODE, - TDT_CP_CMD_FAILED_CODE, - TDT_HOST_INIT_FAILED_CODE, - TDT_HOST_CHANNEL_NAME_EMPTY_CODE, - TDT_HOST_ALLOCATE_MEMORY_FAILED_CODE, - TDT_HOST_MEMORY_COPY_FAILED_CODE, - TDT_HOST_UNABLE_GET_TDTDATAELEM_CODE, - TDT_HOST_PUSH_NOT_INIT_CODE, - TDT_TUNING_DATA_TRANSFER_INIT_FAILED_CODE, - TDT_TUNING_DATA_RECEIVE_CHECK_PARA_ERROR_CODE, - TDT_TUNING_DATA_TRANSFER_PARAMETER_ERROR_CODE, - TDT_RECV_MSG_CHECKSUM_WRONG_ERROR_CODE, - TDT_SVM_INIT_FAILED_CODE, - TDT_SVM_FREE_PIN_FAILED_CODE, - TDT_SVM_FREE_SVM_FAILED_CODE, - TDT_SVM_ADD_BUFFER_MAP_FAILED_CODE, - TDT_STATUS_CODE_TOTAL -}; - -/** - * @ingroup Tdt status - * @brief Regiter error code - * @param moduleId [IN] Module ID - * @param logLevel [IN] Log level - * @param CODE_NAME [out] Error name - * @param codeDesc [IN] Error description - */ -#ifdef __cplusplus -#define TDT_DEF_ERROR_CODE(moduleId, logLevel, CODE_NAME, codeDesc) \ - constexpr TDT_StatusT CODE_NAME = ((0xFFFF & ((uint16_t)moduleId)) << 16) | \ - (0xF000 & (((uint16_t)logLevel) << 12)) | (0x0FFF & (CODE_NAME##_CODE)); \ - const tdt::ErrorNoRegisterar g_##CODE_NAME##_errorno(CODE_NAME, codeDesc); -#else -#define TDT_DEF_ERROR_CODE(moduleId, logLevel, CODE_NAME, codeDesc) \ - static const TDT_StatusT CODE_NAME = \ - ((0xFFFF & ((uint16_t)moduleId)) << 16) | (0xF000 & (((uint16_t)logLevel) << 12)) | (0x0FFF & CODE_NAME##_CODE); -#endif - -/** - * @ingroup Tdt status - * @brief Get error level according error name - * @param CODE_NAME [IN] Error code - * @param codeDesc [OUT] Error description - */ -#define TDT_GET_ERROR_LEVEL(CODE_NAME) ((CODE_NAME & 0x0000F000) >> 12) - -#ifdef __cplusplus -#define TDT_GET_ERROR_STR(CODE_NAME) (tdt::StatusFactory::GetInstance()->GetErrDesc(CODE_NAME)) -#endif - -// Register module id: 0xAABB, AA means system level number, BB means module level number -constexpr uint16_t MODID_TDT_CLIENT = 0x0101; // TDT_CLIENT module ID -constexpr uint16_t MODID_TSD_SERVER = 0x0102; // TSD_SERVER -constexpr uint16_t MODID_HDC = 0x0103; // HDC_SERVER -constexpr uint16_t MODID_TDT_SHUFFLE = 0x0104; // TDT shuffle module ID -constexpr uint16_t MODID_TDT_PREFETCH = 0x0105; // TDT prefetch module ID -constexpr uint16_t MODID_TDT_TRANSFER = 0x0106; // TDT TrainDataTransfer module ID -constexpr uint16_t MODID_TDT_SUPERVISOR = 0x0107; // TDT supervisor模块ID -constexpr uint16_t MODID_MEM_POOL = 0x0108; // MEMORY_POOL -constexpr uint16_t MODID_PPC = 0x0109; // TDT PPC -constexpr uint16_t MODID_TDT_FILE = 0x0110; // TDT file operation module ID -constexpr uint16_t MODID_HDC_SERVER = 0x0111; // HDC_SERVER module ID -constexpr uint16_t MODID_TDT_SERVER = 0x0112; // TDTServer module ID -constexpr uint16_t MODID_HDC_CLIENT = 0x0113; // HDC_CLIENT module ID -constexpr uint16_t MODID_TSD_CLIENT = 0x0114; // TSD_CLIENT module ID -constexpr uint16_t MODID_CHECKSUM = 0x0115; // Checksum module ID -constexpr uint16_t MODID_TDT_MONITOR = 0x0116; // TDT monitor module ID -constexpr uint16_t MODID_TDT_HOST = 0x0117; // GE adapts the TDT HOST module ID -constexpr uint16_t MODID_SVM = 0x0118; // SVM Driver module ID - -constexpr uint32_t TDT_API_MAX_SUB_VERSION = 100; -static const int32_t TDT_INVAILED_DEVICE_ID = 0xFFFFFFFF; - -typedef enum tdt_api_version { - TDT_API_VERSION_V1_00 = 100, - TDT_API_VERSION_V1_01 = 101, - TDT_API_VERSION_V2_00 = 200 -} TDT_API_VERSION; - -#ifdef __cplusplus -namespace tdt { -class StatusFactory { - public: - /** - * @ingroup hiaiengine - * @brief Get a pointer to StatusFactory - * @param [in]: - * @return StatusFactory pointer - */ - TDT_LIB_EXPORT static StatusFactory *GetInstance(); - - /** - * @ingroup hiaiengine - * @brief Registration error code - * @param [in]err error code - * @param [in]desc Description string of the error code - */ - TDT_LIB_EXPORT void RegisterErrorNo(const uint32_t err, const std::string &desc); - - /** - * @ingroup hiaiengine - * @brief Get error code description string - * @param [in]err error code - */ - std::string GetErrDesc(const uint32_t err); - - /** - * @ingroup hiaiengine - * @brief Static function: Get error code description string - * @param [in]err error code - * return : If there is a problem, return the empty string "" - */ - static std::string GetErrCodeDesc(uint32_t errCode); - - protected: - /** - * @ingroup hiaiengine - * @brief Constructor - * @param [in] void - */ - StatusFactory(); - - /** - * @ingroup hiaiengine - * @brief Destructor - * @param [in] void - */ - ~StatusFactory() {} - - StatusFactory(const StatusFactory &) = delete; - StatusFactory(StatusFactory &&) = delete; - StatusFactory &operator=(const StatusFactory &) = delete; - StatusFactory &operator=(StatusFactory &&) = delete; - - static std::mutex &GetMutex(); - - private: - std::mutex rwMutex_; - std::map errDesc_; -}; - -class ErrorNoRegisterar { - public: - /** - * @ingroup hiaiengine - * @brief Registration error code - * @param [in]err error code - * @param [in]desc Description of the registration error code - */ - ErrorNoRegisterar(const uint32_t &err, const std::string &desc) { - StatusFactory::GetInstance()->RegisterErrorNo(err, desc); - } - - ~ErrorNoRegisterar() {} - ErrorNoRegisterar(const ErrorNoRegisterar &) = delete; - ErrorNoRegisterar(ErrorNoRegisterar &&) = delete; - ErrorNoRegisterar &operator=(const ErrorNoRegisterar &) = delete; - ErrorNoRegisterar &operator=(ErrorNoRegisterar &&) = delete; -}; -} // namespace tdt -#endif - -// register error code -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_INFO, TDT_OK, "running ok"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_DEBUG, TDT_DEBUG_INFO, "debug info"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_INTERNAL_ERROR, "internal error"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_WARNING, TDT_COMMON_WARNING, "warnging"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_INFO, TDT_PREFETCH_STOPED, "stopped"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_FILE_NOT_EXIST, "File is not existed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_DEFAULT_CONFIG_FILE_NOT_EXIST, "Default config file not exist"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_FILE_SIZE_TOO_LARGE, "file size is too large"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_FILE_INVALID_PATH, "file path is invalid"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_MEMORY_EXHAUSTED, "memory exhausted error"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_INTERGER_REVERSED, "interger reached reverse"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_INSTANCE_NOT_INITIALED, - "call member function before instance initialed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_INITIAL_FAILED, "initial failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_INSTANCE_NOT_FOUND, "instance not found"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_HDC_CREATE_SESSION_FAILED, "create hdc session failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_HDC_DESTROY_SESSION_FAILED, "destory hdc session failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_HDC_SESSION_DO_NOT_EXIST, "hdc session id do not exist"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_PID_IS_EXIST, "tdtMain pid is exist"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_INIT_ERROR, "hdc server init error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_CREATE_ERROR, "hdc server create error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_DESTROY_ERROR, "hdc server destroy error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_ACCEPT_ERROR, "hdc server accept error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_CLOSED_ERROR, "hdc server closed error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_INTERNAL_ERROR, "hdc fail"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_DEVICEID_ERROR, "hdc device id error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_CLOSE_CHILD_SESSION_ERROR, "hdc server close child session error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SEARFUNC_IS_NULL, "serarfunc is null"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SENDMSG_FAILED, "hdc send msg failed"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_CLOSE_SERVER_SESSION_ERROR, - "hdc server close server session error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SRV_HEART_BEAT_TIMEOUT, "hdc server heart beat timeout"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_DRV_ERROR, "hiai drv return error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_INFO, TDT_HDC_INFO, "hdc info"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SEND_ERROR, "hdc send message failed"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_SEG_SIZE_ERROR, "hiai seg size error"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_HDC_MESSAGE_NULL, "Message input is null"); -TDT_DEF_ERROR_CODE(MODID_HDC, TDT_ERROR, TDT_MESSAGE_PARSE_ERROR, "hdc message parse error"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDCSERVER_DO_NOT_EXIST, "hdc server do not exist"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDCSESSIONID_NOT_AVAILABLE, "hdc sessionid vector is empty"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_SET_HDCSESSION_REFERENCE_FAILED, - "hdc set hdc session reference failed"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDC_RECV_MSG_ERROR, "hdc recv message failed"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDC_SEND_MSG_ERROR, "hdc send message failed"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDC_SRV_TYPE_ERROR, "hdc service type is not supported"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDC_SERVER_CLIENT_SOCKET_CLOSED, - "hdc service or client socket closed"); - -/*********************TSDAEMON************************/ -// create TSDAEMON error level error -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_START_FAIL, "Tsdaemon start fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_CLEANPROC_FIRST_GETPID_FAILED, "Tsdaemon first get pid fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_CLEANPROC_KILL_PROCESS_FAILED, "Tsdaemon kill processfail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_CLEANPROC_SECOND_GETPID_FAILED, "Tsdaemon second get pid fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_CLEANPROC_FINAL_FAILED, "Tsdaemon clean process final fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_INIT_STATE_FAILED, "Tsdaemon init state fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_INIT_HDCSERVER_FAILED, "Tsdaemon init hdcserver fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_SEND_HEARTBEAT_FAILED, "Tsdaemon get pid fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_CLEAN_RESOURCE_FAILED, "Tsdaemon clean resource fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_SEND_MSG_FAILED, "Tsdaemon send msg fail"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_ERROR, TDT_TSD_AICPU_SD_PROCESS_ABNORMAL, "aicpu_sd process abnormal"); -TDT_DEF_ERROR_CODE(MODID_TSD_SERVER, TDT_INFO, TDT_TSD_CUSTOM_PROCESS_ABNORMAL, "custom_aicpu_sd process abnormal"); - -/********************* PPC ****************************/ -// create PPC error level error -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_DRIVER_INIT_FAIL, "Init PPC driver fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SERVER_CLIENT_CREATE_FAIL, "Create PPC server or PPC client fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SERVER_CLIENT_DESTORY_FAIL, "Destory PPC server or PPC client fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SERVER_CLOSE, "PPC server is closed"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_GET_SET_MSG_BUFFER_FAIL, "PPC get or set msg buffer fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SESSION_CONNECT_FAIL, "PPC connect is failed"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SESSION_NOT_EXISTED, "PPC session is not existed"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SEND_RECEIVE_MSG_FAIL, "PPC send or receive msg fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_MSG_FREE_FAIL, "PPC msg free fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_ALLOC_MSG_FAIL, "PPC alloc memory for msg fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_MSG_LEN_NOT_MATCH, "PPC message length not match"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_MSG_BUF_NULL, "PPC message buffer is null"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_CLIENT_INVALID_PARAM, "PPC message client invalid param fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SERVER_INVALID_PARAM, "PPC message server invalid param fail"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_CLIENT_RECVDATA_CONTINUE, - "PPC message client receive not expected msg continue"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SERVER_CLIENT_SOCKET_CLOSED, - "PPC message server receive server or client socket closed msg"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_RECV_MSG_ERROR, "PPC receive msg failed"); -TDT_DEF_ERROR_CODE(MODID_PPC, TDT_ERROR, TDT_PPC_SESSION_CLOSE_ERROR, "PPC close session failed"); - -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_GET_FILE_STATE_FAIL, "can not get file state"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_OPEN_FILE_FAIL, "can not open file"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_CONTENT_EMPTY, "file content is empty"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_FILE_DESTROYED, "file is destroyed"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_UNABLE_TO_GET_FILE_MEMORY, "fail to get memory for file"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_TYPE_UNSUPPORT, "file type is not supported"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_DIR_IS_NULL, "pointer to dir is null"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_GET_DIR_TREE_ERROR, "can not get the tree of dir"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_CANNOT_OPEN_DIR, "dir cannot be opened"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_CANNOT_DFREE_FILE_MEMORY, "DFree memory of file failed"); - -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_SHUFFLE_SHUFFLE_SIZE_ILLEGAL, - "shuffle size is less or equal to 0"); -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_SHUFFLE_ONLINE_UNIQUE_SEED_ILLEGAL, - "online unique seed is equal to 0"); -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_SHUFFLE_UNABLE_TO_CREATE_SHUFFLE_LIST, - "unable to create shuffle list"); -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_SHUFFLE_ILLEGAL_SHUFFLE_TYPE, "illegal shuffle type"); -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_SHUFFLE_NOT_INITED, "shuffler has not been inited"); - -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_SAMPLE_HAS_NO_LABEL, "the sample has no label"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_SAMPLE_CANNOT_BE_READ, "the sample cannot be read"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_ILLEGAL_DATATYPE, "illegal data type"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_CREATE_FAILED, "creating prefetcher failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_UNABLE_TO_GET_TDTDATAITEM, "fail to get TDTDataItem"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_DATA_QUEUE_IS_CLOSED, "data queue is closed"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_GET_SHUFFLE_RESULT_FAIL, "fail to get shuffle result"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_LABEL_FILE_NOT_INITED, "label file has not been inited"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_SAMPLE_FILE_DIR_NOT_INITED, - "directory of sample files has not been inited"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_NOT_INITED, "prefetcher in deliver has not been inited"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_SHUFFLER_NOT_CREATED, - "shuffler in prefetcher has not been created"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_SHUFFLED_ITEM_OUT_OF_FILE_LIST, - "shuffled item is out of file list"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FAIL_TO_GENERATE_MD5, "fail to generate md5 of data"); -TDT_DEF_ERROR_CODE(MODID_CHECKSUM, TDT_ERROR, TDT_CHECKSUM_ILLEGAL_MD5_PARAM, "params to generate md5 is illegal"); -TDT_DEF_ERROR_CODE(MODID_CHECKSUM, TDT_ERROR, TDT_CHECKSUM_MD5_INIT_FAILED, "md5_init failed"); -TDT_DEF_ERROR_CODE(MODID_CHECKSUM, TDT_ERROR, TDT_CHECKSUM_MD5_UPDATE_FAILED, "md5_update failed"); -TDT_DEF_ERROR_CODE(MODID_CHECKSUM, TDT_ERROR, TDT_CHECKSUM_MD5_FINAL_FAILED, "md5_final failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_CANNOT_OPEN_CONFIGFILE, "can not open config file"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_PARSE_FILE_FAILED, "parse file failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_NO_CHANNEL_DATA, - "no channel can be found in config file"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_NO_PARAMETER, "no parameter can be found"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_NO_PARAMETER_ARG, - "the argment is not --configfile or stop"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_CREATE_DELIVER_FAILED, - "fail to create train data deliver"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_TRAIN_DATA_DELIVER_IS_NULLPTR, - "train data deliver in the list is nullptr"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_INIT_FAILED, "train data deliver init failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_START_FAILED, "train data deliver start failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_CONFIG_FIEL_SYNTAX_ERROR, - "config file has syntax error"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_DELIVER_IS_NONE, "no deliver is existed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_MKDIR_CMD_FAILED, "mkdir cmd failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_CP_CMD_FAILED, "cp cmd failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_EMPTY_GROUPNAME_IN_MULTI_GROUPS, "empty group_name"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_DUPLICATE_GROUPNAME, - "the same group_name already exists"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_DUPLICATE_DEVICE, "the same device already exists"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_ERROR, TDT_TRANSFER_FIND_DEVICE_FAIL, "cannot find device"); - -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_WARNING, TDT_SUPERVISOR_INOTIFY_INTERRUPT, "inotify is interrupted"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_UNKOWN_JOB_STATE, "unknow job state"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_ILLEGAL_HEARTBEAT_TIME, "illegal heartbeat time"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_INOTIFY_READ_SIZE_ERROR, - "read size of inotify is error"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_INOTIFY_INIT_ERROR, - "Initialization of inotify failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_CLOSE_INOTIFYFD_FAIL, "Close inotifyFd failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_INOTIFY_WATCH_ERROR, "Add watch of inotify failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_FAIL_TO_WRITE_PID_FILE, "fail to write pid file"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_HEARTBEAT_FILE_NOT_INITED, - "heart beat file has not been inited"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_JOB_COMMAND_FILE_NOT_INITED, - "job command file has not been inited"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_JOB_STATE_FILE_NOT_INITED, - "job state file has not been inited"); -TDT_DEF_ERROR_CODE(MODID_TDT_SUPERVISOR, TDT_ERROR, TDT_SUPERVISOR_FAIL_TO_DEL_JOB_CMD_FILE, - "fail to delete job command file"); -TDT_DEF_ERROR_CODE(MODID_TDT_TRANSFER, TDT_WARNING, TDT_TRANSFER_FAIL_TO_GET_ENV_VARIABLE, - "can not get environment variable"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_INOTIFY_INIT_ERROR, "Initialization of inotify failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_INOTIFY_WATCH_ERROR, "Add watch of inotify failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_CLOSE_INOTIFYFD_FAIL, "Close inotifyFd failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_WARNING, TDT_MONITOR_INOTIFY_READ_SIZE_ERROR, - "read size of inotify is not correct"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_WARNING, TDT_MONITOR_UNSUPPORT_CFGITEM, "unsupported config item"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_WARNING, TDT_MONITOR_FAIL_TO_SET_CFGITEM, "can not set local config item"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_READ_FILE_FAIL, "read file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_CONFIG_FILE_FORMAT_ERROR, - "config file is incorrectly formatted"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_STRCAT_FAILED, "strcat failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_MONITOR, TDT_ERROR, TDT_MONITOR_CREATE_CONFIG_FILE_FAIL, - "create ConfigFile pointer failed"); - -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MAP_BUFFER_ERROR, "host buffer map to device failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_ALLOC_BUFFER_FAILED, "memory pool alloc buffer failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_DATA_SIZE_WRONG, "Input datasize is wrong"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_FREE_HDC_BUFFER_FAILED, "memory pool free buffer failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_INVALID_VALUE, "invalid parameter"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_NO_USEFUL_MEMORY, "no usable memory in memory pool"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MESSAGE_NULL, "recv msg is null"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_SENDMSG_FAILED, "send msg failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_POOL_STOPPED, "mempool has stopped"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_HDC_MEMORY_ADDR_NOT_ALIGN, "buffer not aligned"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_POOL_INITED, "memory pool has inited"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_POOL_GET_NULL, "mempool not exist"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_POOL_NOT_EXISTED, "mempool not exist"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_RECOVER_DATA_FAILED, "Recover recv data failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_STATUS_ERROR, "Memory status error"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_POOL_UPDATE_FAILED, "update memory pool status failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_POOL_RESIZE_FAILED, "resize memory pool status failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_EXCEED_MAX_THREAD, "thread size is too large"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_WARNING, TDT_WARNING_SET_THREAD_NAME_FAILED, "rename thread failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_WARNING, TDT_WRONG_PRIORITY, "priority is invalid"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_WARNING, TDT_JOIN_TASK_ERROR, "join task failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_WARNING, TDT_NULL_FUNC, "func is null"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_WARNING, TDT_INIT_FAIL, "sear/dear init failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_WARNING, TDT_EXISTED_FUNC, "func has already existed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_DESTROY_FAILED, "mempool destroy failed"); -TDT_DEF_ERROR_CODE(MODID_MEM_POOL, TDT_ERROR, TDT_MEMORY_DATA_TYPE_FACTORY_MAKE_SHARED_FAILED, - "data type factory make shared failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TDTSEVER_ACCEPT_FAILED, "tdt server accept hdc session failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_CHANNEL_DO_NOT_EXIST, "channel do not exist"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_NULL_POINTER_MSG, "message is null"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TRAN_UNKNOWN_RSP, "transcation status error"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TRAN_TIMEOUT, "transcation time out"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TRAN_NOT_EXIST, "transcation requst id is not exist"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TRAN_ID_GEN_ERROR, "transcation generateid failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_SEND_CHANNEL_FAILED, "send channel info failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_SEND_CHANNEL_TIMEOUT, "send channel info time out"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_INFO, TDT_QUEUE_STOPPED, "queue has been stopped"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_INFO, TDT_QUEUE_POP_FAILED, "failed to pop data from queue"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_INFO, TDT_QUEUE_PUSH_FAILED, "failed to push data from queue"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_QUEUE_CREATE_FAILED, "queue create fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_QUEUE_NOT_FIND, "queue not find"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_QUEUE_FULL, "queue is full"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_QUEUE_EMPTY, "queue is empty"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_DATA_ENTO_CP_FAILED, "enqueue to computer process failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_STOP_CP_QUEUE_FAILED, "stop computer process queue failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_RECV_MSG_NO_CHANNEL_INFO_ERROR, "no channel in first msg"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_RECV_MSG_MD5_WRONG, "md5 of recv msg is wrong"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_RECV_MSG_CHECKSUM_WRONG_ERROR, "checksum of recv msg is wrong"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_RECV_MSG_FAIL_TO_GENERATE_MD5, "md5 of recv msg is wrong"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_RECV_MSG_SEQUENCE_ERROR, "sequence recv msg is wrong"); -TDT_DEF_ERROR_CODE(MODID_TDT_SERVER, TDT_ERROR, TDT_SERVER_MEMORY_COPY_FAILED, "memory copy failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_CHANNEL_HAS_NO_SESSION_ERROR, "channel has no session"); -TDT_DEF_ERROR_CODE(MODID_HDC_CLIENT, TDT_ERROR, TDT_HDC_CLIENT_INIT_ERROR, "hdc client init error"); -TDT_DEF_ERROR_CODE(MODID_HDC_CLIENT, TDT_ERROR, TDT_HDC_CLIENT_CREATE_SESSION_ERROR, "hdc client create error"); -TDT_DEF_ERROR_CODE(MODID_HDC_CLIENT, TDT_ERROR, TDT_HDC_CLIENT_DO_NOT_EXIST, "hdc client do not exist"); -TDT_DEF_ERROR_CODE(MODID_HDC_CLIENT, TDT_ERROR, TDT_HDC_CLIENT_DESTROY_ERROR, "hdc server destroy error"); -TDT_DEF_ERROR_CODE(MODID_HDC_CLIENT, TDT_ERROR, TDT_HDC_CLIENT_CLOSED, "hdc client has been closed"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_BIND_CPUCORE_FAILED, "thread function bind cpu core failed"); -TDT_DEF_ERROR_CODE(MODID_HDC_SERVER, TDT_ERROR, TDT_HDC_SRV_CLOSED, "hdc server has been closed"); -TDT_DEF_ERROR_CODE(MODID_TSD_CLIENT, TDT_ERROR, TDT_TSD_CLT_OPEN_FAILED, "tsd client open failed"); -TDT_DEF_ERROR_CODE(MODID_TSD_CLIENT, TDT_ERROR, TDT_TSD_CLT_CLOSE_FAILED, "tsd client close failed"); -TDT_DEF_ERROR_CODE(MODID_TSD_CLIENT, TDT_ERROR, TDT_TSD_CLT_UPDATE_PROFILING_FAILED,"tsd client update profiling failed"); -TDT_DEF_ERROR_CODE(MODID_TSD_CLIENT, TDT_ERROR, TDT_TSD_CLT_INTERFACE_NOT_SUPPORT, "tsd client func not support"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILELIST_NOT_EXIST, "tdt filelist open failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_SAMPLE_FILE_NOT_FOUND, "tdt sample file is empty"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILE_OPEN_FAIL, "tdt open sample file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILE_STAT_FAIL, "tdt stat sample file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILE_MMAP_FAIL, "tdt mmap sample file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILE_UNMAP_FAIL, "tdt unmap sample file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILE_CLOSE_FAIL, "tdt close sample file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_FILE_PARSE_FAIL, "tdt parse sample file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_CRC32_SIZE_FAIL, "tdt crc32 of size mismatch"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_CRC32_DATA_FAIL, "tdt crc32 of data mismatch"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_DATA_QUEUE_CLOSED, "tdt prefetch data queue closed"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_MAP_INSERT_FAILED, "map insert fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_INITIALIZE_FAILED, "prefetch init fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_PREFETCH_INVALID_FILELIST_LINE, "invalid filelist line"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_STRINGSTREAM_TO_VALUE_FAILED, "string to value fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_LIST_ID_OFFSET_LENGTH_POSITIVE_INTEGER_FAILED, - "value positive integer fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_SHUFFLE_ILLEGAL_SHUFFLE_PARAM, "Illegal shuffle parameter"); -TDT_DEF_ERROR_CODE(MODID_TDT_SHUFFLE, TDT_ERROR, TDT_FILE_SHUFFLER_CREATE_FAILED, "Create file shuffler fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_UPLOADER_CREATE_FAILED, "Create uploader fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FILE_DOWNLOADER_CREATE_FAILED, "Create downloader fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FOLDER_CANNOT_BE_CREATED, "folder cannot been created"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_CANNOT_GET_STAT_OF_FOLDER, "cannot get stat of folder"); -TDT_DEF_ERROR_CODE(MODID_TDT_FILE, TDT_ERROR, TDT_FOLDER_IS_FILE, "folder is a file"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_CONFIG_INFORMATION_FAIL, "OBS configuration fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_CALLBACK_ARGUMENT_FAIL, "OBS callback argument fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_DOWNLOAD_CREATE_THREAD_FAILED, - "OBS download create thread fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_DOWNLOAD_FILE_FAIL, "OBS download file fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_DOWNLOAD_INIT_FAIL, "OBS download init fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_DOWNLOAD_METADATA_FAIL, "OBS download metadata fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_OBS_LIST_BUCKET_OBJECTS_FAIL, "OBS list bucket fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_MEMORY_MEMCPY_FAILED, "tdt securec memcpy fail"); -TDT_DEF_ERROR_CODE(MODID_TDT_PREFETCH, TDT_ERROR, TDT_MEMORY_MEMSET_FAILED, "tdt securec memset fail"); -// TDT HOST -TDT_DEF_ERROR_CODE(MODID_TDT_HOST, TDT_ERROR, TDT_HOST_INIT_FAILED, "tdt host init failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_HOST, TDT_ERROR, TDT_HOST_CHANNEL_NAME_EMPTY, "channel name is empty"); -TDT_DEF_ERROR_CODE(MODID_TDT_HOST, TDT_ERROR, TDT_HOST_ALLOCATE_MEMORY_FAILED, "allocate memory failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_HOST, TDT_ERROR, TDT_HOST_MEMORY_COPY_FAILED, "memory copy failed"); -TDT_DEF_ERROR_CODE(MODID_TDT_HOST, TDT_WARNING, TDT_HOST_UNABLE_GET_TDTDATAELEM, "can not get data element"); -TDT_DEF_ERROR_CODE(MODID_TDT_HOST, TDT_WARNING, TDT_HOST_PUSH_NOT_INIT, "push data but not init"); - -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TUNING_DATA_TRANSFER_INIT_FAILED, - "failed to init the channel of tuning-data"); - -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TUNING_DATA_RECEIVE_CHECK_PARA_ERROR, "the index is error"); - -TDT_DEF_ERROR_CODE(MODID_TDT_CLIENT, TDT_ERROR, TDT_TUNING_DATA_TRANSFER_PARAMETER_ERROR, "the parameter is error"); -TDT_DEF_ERROR_CODE(MODID_SVM, TDT_ERROR, TDT_SVM_INIT_FAILED, "SVM driver init failed"); -TDT_DEF_ERROR_CODE(MODID_SVM, TDT_ERROR, TDT_SVM_FREE_PIN_FAILED, "SVM driver free host pin memory failed"); -TDT_DEF_ERROR_CODE(MODID_SVM, TDT_ERROR, TDT_SVM_FREE_SVM_FAILED, "SVM driver free device svm memory failed"); -TDT_DEF_ERROR_CODE(MODID_SVM, TDT_ERROR, TDT_SVM_ADD_BUFFER_MAP_FAILED, "add svm buffer info to map failed"); -#endif // INC_TDT_STATUS_H_ diff --git a/inc/tdt/tdt_host_interface.h b/inc/tdt/tdt_host_interface.h deleted file mode 100644 index 06841ff55..000000000 --- a/inc/tdt/tdt_host_interface.h +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef HOST_INNER_INC_TDT_HOST_INTERFACE_H_ -#define HOST_INNER_INC_TDT_HOST_INTERFACE_H_ - -#include -#include -#include -#include "tdt/data_common.h" - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -namespace tdt { -/** -* @ingroup TdtHostInit -* @brief Initialize the interface, start and initialize various general thread, log and other services -* -* @par Function -* Initialize the interface, start and initialize various general thread, log and other services -* -* @param deviceId [IN] type #unsigned int. Physical device ID -* @retval #0 Success -* @retval #Not 0 Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtHostInit(uint32_t deviceId); - -/** -* @ingroup TdtHostPushData -* @brief Blocking queue. When the queue is full, the Push interface will block. -* -* @par Function -* Blocking queue. When the queue is full, the Push interface will block. -* -* @param channelName [IN] type #String. queue channel name -* @param items [IN] type #vector DataItem is defined in data_common.h. input data -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -* @li data_common.h: Header file where 'DataItem' defined -*/ -int32_t TdtHostPushData(const std::string &channelName, std::vector &item, uint32_t deviceId = 0); - -/** -* @ingroup TdtHostDestroy -* @brief Notify TDT component to close related resources -* -* @par Function -* Notify TDT component to close related resources -* -* @param NA -* @retval 0 Success -* @retval OtherValues Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtHostDestroy(); - -/** -* @ingroup TdtHostPreparePopData -* @brief Prepare pop data from Tdt data storage queue -* -* @par Function -* Prepare pop data from Tdt data storage queue -* -* @param NA -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -* @li data_common.h: Header file where 'DataItem' defined -*/ -int32_t TdtHostPreparePopData(); - -/** -* @ingroup TdtHostPopData -* @brief POP data from Tdt data storage queue -* -* @par Function -* POP data from Tdt data storage queue -* -* @param channelName [IN] type #String. queue channel name -* @param items [IN] type #vector DataItem is defined in data_common.h. input data -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -* @li data_common.h: Header file where 'DataItem' defined -*/ -int32_t TdtHostPopData(const std::string &channelName, std::vector &item); - -/** -* @ingroup TdtHostStop -* @brief Activate the thread that reads data externally from Tdt and -* send end of sequence data so that the external thread can exit -* -* @par Function -* Activate the thread that reads data externally from Tdt and send -* end of sequence data so that the external thread can exit -* -* @param channelName [IN] type #String. queue channel name -* @retval 0 Success -* @retval OtherValues Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtHostStop(const std::string &channelName); - -/** -* @ingroup TdtInFeedInit -* @brief Initialize the interface, start and initialize various general thread, log and other services -* -* @par Function -* Initialize the interface, start and initialize various general thread, log and other services -* -* @param deviceId [IN] type #unsigned int. logic device ID -* @retval #0 Success -* @retval #Not 0 Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtInFeedInit(uint32_t deviceId); - -/** -* @ingroup TdtOutFeedInit -* @brief Initialize the interface, start and initialize various general thread, log and other services -* -* @par Function -* Initialize the interface, start and initialize various general thread, log and other services -* -* @param deviceId [IN] type #unsigned int. logic device ID -* @retval #0 Success -* @retval #Not 0 Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtOutFeedInit(uint32_t deviceId); - -/** -* @ingroup TdtInFeedDestroy -* @brief Notify TDT component to close related resources -* -* @par Function -* Notify TDT component to close related resources -* -* @param NA -* @retval 0 Success -* @retval OtherValues Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtInFeedDestroy(uint32_t deviceId); - -/** -* @ingroup TdtOutFeedDestroy -* @brief Notify TDT component to close related resources -* -* @par Function -* Notify TDT component to close related resources -* -* @param NA -* @retval 0 Success -* @retval OtherValues Fail -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tdt_host_interface.h: Header file where the interface declaration is located. -*/ -int32_t TdtOutFeedDestroy(); - -} // namespace tdt -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HOST_INNER_INC_TDT_HOST_INTERFACE_H_ diff --git a/inc/tdt/tsd_client.h b/inc/tdt/tsd_client.h deleted file mode 100644 index 155788832..000000000 --- a/inc/tdt/tsd_client.h +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TDT_HOST_INNER_INC_TSD_CLIENT_H_ -#define TDT_HOST_INNER_INC_TSD_CLIENT_H_ - -#include -#include -#include -#include -#include "tdt/status.h" -#include "tdt/data_common.h" -#include "toolchain/prof_callback.h" - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** -* @ingroup Open -* @brief Used for the Framework process to communicate with the TSDDaemon process, -* and notify TSD to complete the initialization of other processes -* -* @par Function -* Used for the Framework process to communicate with the TSDDaemon process, -* and notify TSD to complete the initialization of other processes -* -* @param phyDeviceId [IN] type #unsigned int. Physical device ID -* @param rankSize [IN] type #unsigned int. The rankSize of the training. -* The default value is 1. When rankSize is greater than 1, -* HCCP will be pulled to perform set communication related operations. -* @retval TDT_OK Success -* @retval OtherValues Failure -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tsd_client.h: Header file where the interface declaration is located. -* @li data_common.h: Header file where 'TDT_StatusT' defined -*/ -TDT_LIB_EXPORT TDT_StatusT TsdOpen(const uint32_t logicDeviceId, const uint32_t rankSize); - -/** -* @ingroup Close -* @brief notify TSDClient close resource -* -* @par Function -* notify TSDClient close resource -* -* @param NA -* @retval TDT_OK Success -* @retval OtherValues Failure -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li tsd_client.h: Header file where the interface declaration is located. -* @li data_common.h: Header file where 'TDT_StatusT' defined -*/ -TDT_LIB_EXPORT TDT_StatusT TsdClose(const uint32_t logicDeviceId); - -TDT_LIB_EXPORT TDT_StatusT UpdateProfilingMode(const uint32_t logicDeviceId, const uint32_t flag); -TDT_LIB_EXPORT TDT_StatusT TsdSetMsprofReporterCallback(MsprofReporterCallback callback); - -/** -* @ingroup CreateCmdParameterObj -* @brief creat tsdclient func parameter obj. -* -* @par Function -* creat tsdclient func parameter obj. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT CreateCmdParameterObj(tdt::TsdCmdType type, void **cmdParameterObj); - -/** -* @ingroup SetCmdParameterObjAttribute -* @brief set cmdParameterObj input value. -* -* @par Function -* set cmdParameterObj input value. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @param itemType [IN] type tdt::InputItem, func input type. -* @param valuePtr [IN] type const void *, input value. -* @param valueLength [IN] type int, input value length. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT SetCmdParameterObjAttribute(tdt::TsdCmdType type, void *cmdParameterObj, tdt::InputItem itemType, const void *valuePtr, int valueLength); - -/** -* @ingroup GetCmdParameterObjAttribute -* @brief set cmdParameterObj input value. -* -* @par Function -* set cmdParameterObj input value. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @param itemType [IN] type tdt::InputItem, func input type. -* @param valuePtr [IN] type const void *, input value. -* @param valueLength [IN] type int, input value length. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT GetCmdParameterObjAttribute(tdt::TsdCmdType type, void *cmdParameterObj, tdt::InputItem itemType, void *valuePtr, int &valueLength); - -/** -* @ingroup TsdClientCmd -* @brief creat tsdclient func parameter obj. -* -* @par Function -* creat tsdclient func parameter obj. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT TsdClientCmd(tdt::TsdCmdType cmd, void *cmdParameterObj); - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // TDT_HOST_INNER_INC_TSD_CLIENT_H_ diff --git a/inc/toolchain/adx_datadump_server.h b/inc/toolchain/adx_datadump_server.h deleted file mode 100644 index a1c39a51a..000000000 --- a/inc/toolchain/adx_datadump_server.h +++ /dev/null @@ -1,36 +0,0 @@ -/** -* @file adx_datadump_server.h -* -* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ - -#ifndef ADX_DATADUMP_SERVER_H -#define ADX_DATADUMP_SERVER_H -#ifdef __cplusplus -extern "C" { -#endif -/** - * @brief initialize server for normal datadump function. - * @return - * IDE_DAEMON_OK: datadump server init success - * IDE_DAEMON_ERROR: datadump server init failed - */ -int AdxDataDumpServerInit(); - -/** - * @brief uninitialize server for normal datadump function. - * @return - * IDE_DAEMON_OK: datadump server uninit success - * IDE_DAEMON_ERROR: datadump server uninit failed - */ -int AdxDataDumpServerUnInit(); - -#ifdef __cplusplus -} -#endif -#endif - diff --git a/inc/toolchain/plog.h b/inc/toolchain/plog.h deleted file mode 100644 index 6134c3e6b..000000000 --- a/inc/toolchain/plog.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _PLOG_H_ -#define _PLOG_H_ - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -#ifndef LINUX -#define LINUX 0 -#endif // LINUX - -#ifndef WIN -#define WIN 1 -#endif - -#ifndef OS_TYPE -#define OS_TYPE 0 -#endif // OS_TYPE - -#if (OS_TYPE == LINUX) -#define DLL_EXPORT __attribute__((visibility("default"))) -#else -#define DLL_EXPORT _declspec(dllexport) -#endif - -/** - * @ingroup plog - * @brief DlogReportInitialize: init log in service process before all device setting. - * @return: 0: SUCCEED, others: FAILED - */ -DLL_EXPORT int DlogReportInitialize(); - -/** - * @ingroup plog - * @brief DlogReportFinalize: release log resource in service process after all device reset. - * @return: 0: SUCCEED, others: FAILED - */ -DLL_EXPORT int DlogReportFinalize(); - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // D_PLOG_H_ diff --git a/inc/toolchain/prof_callback.h b/inc/toolchain/prof_callback.h deleted file mode 100644 index 69a3e6c95..000000000 --- a/inc/toolchain/prof_callback.h +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright 2020-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @file prof_callback.h - * @brief declaraion of profiling callbacks - */ - -#ifndef MSPROFILER_PROF_CALLBACK_H_ -#define MSPROFILER_PROF_CALLBACK_H_ - -#ifdef __cplusplus -#include -#include -extern "C" { -#endif // __cplusplus - -/** - * @name MsprofErrorCode - * @brief error code - */ -enum MsprofErrorCode { - MSPROF_ERROR_NONE = 0, - MSPROF_ERROR_MEM_NOT_ENOUGH, - MSPROF_ERROR_GET_ENV, - MSPROF_ERROR_CONFIG_INVALID, - MSPROF_ERROR_ACL_JSON_OFF, - MSPROF_ERROR, -}; - -#define MSPROF_ENGINE_MAX_TAG_LEN (31) - -/** - * @name ReporterData - * @brief struct of data to report - */ -struct ReporterData { - char tag[MSPROF_ENGINE_MAX_TAG_LEN + 1]; // the sub-type of the module, data with different tag will be writen - int deviceId; // the index of device - size_t dataLen; // the length of send data - unsigned char *data; // the data content -}; - -/** - * @name MsprofReporterModuleId - * @brief module id of data to report - */ -enum MsprofReporterModuleId { - MSPROF_MODULE_DATA_PREPROCESS = 0, // DATA_PREPROCESS - MSPROF_MODULE_HCCL, // HCCL - MSPROF_MODULE_ACL, // AclModule - MSPROF_MODULE_FRAMEWORK, // Framework - MSPROF_MODULE_RUNTIME // runtime -}; - -/** - * @name MsprofReporterCallbackType - * @brief reporter callback request type - */ -enum MsprofReporterCallbackType { - MSPROF_REPORTER_REPORT = 0, // report data - MSPROF_REPORTER_INIT, // init reporter - MSPROF_REPORTER_UNINIT, // uninit reporter -}; - -/** - * @name MsprofReporterCallback - * @brief callback to start reporter/stop reporter/report date - * @param moduleId [IN] enum MsprofReporterModuleId - * @param type [IN] enum MsprofReporterCallbackType - * @param data [IN] callback data (nullptr on INTI/UNINIT) - * @param len [IN] callback data size (0 on INIT/UNINIT) - * @return enum MsprofErrorCode - */ -typedef int32_t (*MsprofReporterCallback)(uint32_t moduleId, uint32_t type, void *data, uint32_t len); - - -#define MSPROF_OPTIONS_DEF_LEN_MAX (2048) - -/** - * @name MsprofGeOptions - * @brief struct of MSPROF_CTRL_INIT_GE_OPTIONS - */ -struct MsprofGeOptions { - char jobId[MSPROF_OPTIONS_DEF_LEN_MAX]; - char options[MSPROF_OPTIONS_DEF_LEN_MAX]; -}; - -/** - * @name MsprofCtrlCallbackType - * @brief ctrl callback request type - */ -enum MsprofCtrlCallbackType { - MSPROF_CTRL_INIT_ACL_ENV = 0, // start profiling with acl env - MSPROF_CTRL_INIT_ACL_JSON, // start profiling with acl.json - MSPROF_CTRL_INIT_GE_OPTIONS, // start profiling with ge env and options - MSPROF_CTRL_FINALIZE // stop profiling -}; - -/** - * @name MsprofCtrlCallback - * @brief callback to start/stop profiling - * @param type [IN] enum MsprofCtrlCallbackType - * @param data [IN] callback data - * @param len [IN] callback data size - * @return enum MsprofErrorCode - */ -typedef int32_t (*MsprofCtrlCallback)(uint32_t type, void *data, uint32_t len); - -/** - * @name MsprofSetDeviceCallback - * @brief callback to notify set/reset device - * @param devId [IN] device id - * @param isOpenDevice [IN] true: set device, false: reset device - */ -typedef void (*MsprofSetDeviceCallback)(uint32_t devId, bool isOpenDevice); - -#ifdef __cplusplus -} -#endif - -#endif // MSPROFILER_PROF_CALLBACK_H_ diff --git a/inc/toolchain/prof_engine.h b/inc/toolchain/prof_engine.h deleted file mode 100644 index 0e757dcfb..000000000 --- a/inc/toolchain/prof_engine.h +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MSPROF_ENGINE_PROF_ENGINE_H_ -#define MSPROF_ENGINE_PROF_ENGINE_H_ -#define MSVP_PROF_API __attribute__((visibility("default"))) - -#include -#include -#include "prof_reporter.h" - -/** - * @file prof_engine.h - * @defgroup ModuleJobConfig the ModuleJobConfig group - * This is the ModuleJobConfig group - */ -namespace Msprof { -namespace Engine { -/** - * @ingroup ModuleJobConfig - * @brief struct ModuleJobConfig - * record config info - */ -struct ModuleJobConfig { - std::map switches; /**< key is the config name, value is the config value(on or off) */ -}; - -/** - * @defgroup PluginIntf the pluginInf group - * This is the pluginInf group - */ - -/** - * @ingroup PluginIntf - * @brief class PluginIntf - */ -class MSVP_PROF_API PluginIntf { - public: - virtual ~PluginIntf() {} - - public: - /** - * @ingroup PluginIntf - * @name : Init - * @brief : API of user plugin, libmsporf call this API to send a Reporter to user plugin - * @par description : - * API of user plugin, libmsporf call this API to send a Reporter to user plugin. - * @param reporter [IN] const Reporter* the Reporter from libmsprof - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see UnInit - */ - virtual int Init(const Reporter *reporter) = 0; - - /** - * @ingroup PluginIntf - * @name : OnNewConfig - * @brief : API of user plugin, libmsprof call this API to send config info to user plugin \n - If the user plugin needn't config, no need to redefine this function - * @param config [IN] const ModuleJobConfig * the config from libmsprof - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see Init | UnInit - */ - virtual int OnNewConfig(const ModuleJobConfig *config) { return 0; } - - /** - * @ingroup PluginIntf - * @name : UnInit - * @brief : API of user plugin, libmsprof call this API to notify plugin stop to send data - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see Init - */ - virtual int UnInit() = 0; -}; - -/** - * @defgroup EngineIntf the EngineIntf group - * This is the EngineIntf group - */ - -/** - * @ingroup EngineIntf - * @brief class EngineIntf - */ -class MSVP_PROF_API EngineIntf { - public: - virtual ~EngineIntf() {} - - public: - /** - * @ingroup EngineIntf - * @name : CreatePlugin - * @brief : API of user engine, libmsporf call this API to get a plugin - * @retval PluginIntf * The pointer of the new plugin - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see ReleasePlugin - */ - virtual PluginIntf *CreatePlugin() = 0; - - /** - * @ingroup EngineIntf - * @name : ReleasePlugin - * @brief : API of user engine, libmsprof call this API to release a plugin - * @param plugin [IN] PluginIntf * the plugin to release - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see CreatePlugin - */ - virtual int ReleasePlugin(PluginIntf *plugin) = 0; -}; - -/** - * @defgroup EngineMgr the EngineMgr group - * This is the EngineMgr group - */ - -/** - * @ingroup EngineMgr - * @name : RegisterEngine - * @brief : API of libmsprof, register an engine with a name - * @param module [IN] const std::string the name of plugin - * @param engine [IN] const EngineIntf* the plugin - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - */ -MSVP_PROF_API int RegisterEngine(const std::string &module, const EngineIntf *engine); - -/** - * @ingroup EngineMgr - * @name : Init - * @brief : API of libmsprof, init an engine with a name - * @param module [IN] const std::string the name of plugin - * @param module [IN] const EngineIntf* the plugin - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see UnInit - */ -MSVP_PROF_API int Init(const std::string &module, const EngineIntf *engine); - -/** - * @ingroup EngineMgr - * @name : Init - * @brief : API of libmsprof, uninit an engine with a name - * @param module [IN] const std::string the name of plugin - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_engine.h - * @since c60 - * @see Init - */ -MSVP_PROF_API int UnInit(const std::string &module); -} // namespace Engine -} // namespace Msprof - -#endif // MSPROF_ENGINE_PROF_ENGINE_H_ \ No newline at end of file diff --git a/inc/toolchain/prof_mgr_core.h b/inc/toolchain/prof_mgr_core.h deleted file mode 100644 index 4f013eef0..000000000 --- a/inc/toolchain/prof_mgr_core.h +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MSPROF_ENGINE_PROF_MGR_CORE_H_ -#define MSPROF_ENGINE_PROF_MGR_CORE_H_ -#define MSVP_PROF_API __attribute__((visibility("default"))) - -#include -#include - -/** - * @file prof_mgr_core.h - * @brief : struct ProfMgrCfg - */ -struct ProfMgrCfg { - std::string startCfg; /**< start cfg. json format */ -}; - -/** - * @name : ProfMgrConf - * @brief : struct ProfMgrConf for example [{"ai_core_events":"0xa"}].the vector size means Number of iterations - */ -struct ProfMgrConf { - std::vector conf; /**< for op trace.Ge call this api to get each iteration profiling cfg.json format.*/ -}; - -/** - * @name : ProfMgrStartUP - * @brief : start Profiling task - * @param cfg [IN]ProfMgrCfg cfg : config of start_up profiling - * @retval void * (success) - * @retval nullptr (failed) - * - * @par depend: - * @li libmsprof - * @li prof_mgr_core.h - * @since c60 - * @see ProfMgrStop - */ -MSVP_PROF_API void *ProfMgrStartUp(const ProfMgrCfg *cfg); - -/** - * @name : ProfMgrStop - * @brief : stop Profiling task - * @param handle [in] void * handle return by ProfMgrStartUP - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_mgr_core.h - * @since c60 - * @see ProfMgrStartUp - */ -MSVP_PROF_API int ProfMgrStop(void *handle); - -/** - * @name : ProfMgrGetConf - * @brief : get profiler events conf - * @param conf [OUT]ProfMgrConf * return by ProfMgrGetConf - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * @par depend: - * @li libmsprof - * @li prof_mgr_core.h - * @since c60 - * @see ProfMgrStartUp - */ -MSVP_PROF_API int ProfMgrGetConf(const std::string &aicoreMetricsType, ProfMgrConf *conf); - -#endif // MSPROF_ENGINE_PROF_MGR_CORE_H_ \ No newline at end of file diff --git a/inc/toolchain/prof_reporter.h b/inc/toolchain/prof_reporter.h deleted file mode 100644 index ff91351b6..000000000 --- a/inc/toolchain/prof_reporter.h +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MSPROF_ENGINE_PROF_REPORTER_H_ -#define MSPROF_ENGINE_PROF_REPORTER_H_ -#ifndef OS_TYPE -#define OS_TYPE 0 -#endif // OS_TYPE - -#if (OS_TYPE != LINUX) -#define MSVP_PROF_API __declspec(dllexport) -#else -#define MSVP_PROF_API __attribute__((visibility("default"))) -#endif - -#include "prof_callback.h" - -/** - * @file prof_reporter.h - * @defgroup reporter the reporter group - * This is the reporter group - */ -namespace Msprof { -namespace Engine { -/** - * @ingroup reporter - * @brief class Reporter - * the Reporter class .used to send data to profiling - */ -class MSVP_PROF_API Reporter { - public: - virtual ~Reporter() {} - - public: - /** - * @ingroup reporter - * @name : Report - * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n - The data will be firstly appended to cache, if the cache is full, data will be ignored - * @param data [IN] const ReporterData * the data send to libmsporf - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see Flush - */ - virtual int Report(const ReporterData *data) = 0; - - /** - * @ingroup reporter - * @name : Flush - * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n - The all datas of cache will be write to file or send to host - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see ProfMgrStop - */ - virtual int Flush() = 0; -}; - -} // namespace Engine -} // namespace Msprof - -#endif // MSPROF_ENGINE_PROF_REPORTER_H_ diff --git a/inc/toolchain/slog.h b/inc/toolchain/slog.h deleted file mode 100644 index 7c4f7be24..000000000 --- a/inc/toolchain/slog.h +++ /dev/null @@ -1,510 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef D_SYSLOG_H_ -#define D_SYSLOG_H_ - -#ifdef __cplusplus -#ifndef LOG_CPP -extern "C" { -#endif -#endif // __cplusplus - -#ifndef LINUX -#define LINUX 0 -#endif // LINUX - -#ifndef WIN -#define WIN 1 -#endif - -#ifndef OS_TYPE -#define OS_TYPE 0 -#endif // OS_TYPE - -#if (OS_TYPE == LINUX) -#define DLL_EXPORT __attribute__((visibility("default"))) -#else -#define DLL_EXPORT _declspec(dllexport) -#endif - -/** - * @ingroup slog - * - * debug level id - */ -#define DLOG_DEBUG 0 - -/** - * @ingroup slog - * - * info level id - */ -#define DLOG_INFO 1 - -/** - * @ingroup slog - * - * warning level id - */ -#define DLOG_WARN 2 - -/** - * @ingroup slog - * - * error level id - */ -#define DLOG_ERROR 3 - -/** - * @ingroup slog - * - * don't print log - */ -#define DLOG_NULL 4 - -/** - * @ingroup slog - * - * trace log print level id - */ -#define DLOG_TRACE 5 - -/** - * @ingroup slog - * - * oplog log print level id - */ -#define DLOG_OPLOG 6 - -/** - * @ingroup slog - * - * event log print level id - */ -#define DLOG_EVENT 0x10 - -/** - * @ingroup slog - * - * max log length - */ -#define MSG_LENGTH 1024 -#define DEBUG_LOG_MASK (0x00010000) -#define SECURITY_LOG_MASK (0x00100000) -#define RUN_LOG_MASK (0x01000000) -#define OPERATION_LOG_MASK (0x10000000) -#define RESERVERD_LENGTH 52 - -typedef struct tagDCODE { - const char *cName; - int cVal; -} DCODE; - -typedef struct tagKV { - char *kname; - char *value; -} KeyValue; - -typedef enum { - APPLICATION = 0, - SYSTEM -} ProcessType; - -typedef struct { - ProcessType type; - unsigned int pid; - unsigned int deviceId; - char reserved[RESERVERD_LENGTH]; -} LogAttr; - -/** - * @ingroup slog - * - * module id - */ -enum { - SLOG, /**< Slog */ - IDEDD, /**< IDE daemon device */ - IDEDH, /**< IDE daemon host */ - HCCL, /**< HCCL */ - FMK, /**< Framework */ - HIAIENGINE, /**< Matrix */ - DVPP, /**< DVPP */ - RUNTIME, /**< Runtime */ - CCE, /**< CCE */ -#if (OS_TYPE == LINUX) - HDC, /**< HDC */ -#else - HDCL, -#endif // OS_TYPE - DRV, /**< Driver */ - MDCFUSION, /**< Mdc fusion */ - MDCLOCATION, /**< Mdc location */ - MDCPERCEPTION, /**< Mdc perception */ - MDCFSM, - MDCCOMMON, - MDCMONITOR, - MDCBSWP, /**< MDC base software platform */ - MDCDEFAULT, /**< MDC undefine */ - MDCSC, /**< MDC spatial cognition */ - MDCPNC, - MLL, - DEVMM, /**< Dlog memory managent */ - KERNEL, /**< Kernel */ - LIBMEDIA, /**< Libmedia */ - CCECPU, /**< ai cpu */ - ASCENDDK, /**< AscendDK */ - ROS, /**< ROS */ - HCCP, - ROCE, - TEFUSION, - PROFILING, /**< Profiling */ - DP, /**< Data Preprocess */ - APP, /**< User Application */ - TS, /**< TS module */ - TSDUMP, /**< TSDUMP module */ - AICPU, /**< AICPU module */ - LP, /**< LP module */ - TDT, - FE, - MD, - MB, - ME, - IMU, - IMP, - GE, /**< Fmk */ - MDCFUSA, - CAMERA, - ASCENDCL, - TEEOS, - ISP, - SIS, - HSM, - DSS, - PROCMGR, // Process Manager, Base Platform - BBOX, - AIVECTOR, - TBE, - FV, - MDCMAP, - TUNE, - INVLID_MOUDLE_ID -}; - -/** - * @ingroup slog - * @brief External log interface, which called by modules - */ -DLL_EXPORT void dlog_init(void); - -/** - * @ingroup slog - * @brief dlog_getlevel: get module loglevel and enableEvent - * - * @param [in]moduleId: moudule id(see slog.h, eg: CCE), others: invalid - * @param [out]enableEvent: 1: enable; 0: disable - * @return: module level(0: debug, 1: info, 2: warning, 3: error, 4: null output) - */ -DLL_EXPORT int dlog_getlevel(int moduleId, int *enableEvent); - -/** - * @ingroup slog - * @brief dlog_setlevel: set module loglevel and enableEvent - * - * @param [in]moduleId: moudule id(see slog.h, eg: CCE), -1: all modules, others: invalid - * @param [in]level: log level(0: debug, 1: info, 2: warning, 3: error, 4: null output) - * @param [in]enableEvent: 1: enable; 0: disable, others:invalid - * @return: 0: SUCCEED, others: FAILED - */ -DLL_EXPORT int dlog_setlevel(int moduleId, int level, int enableEvent); - -/** - * @ingroup slog - * @brief CheckLogLevel: check module level enable or not - * users no need to call it because all dlog interface(include inner interface) has already called - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]logLevel: eg: DLOG_EVENT/DLOG_ERROR/DLOG_WARN/DLOG_INFO/DLOG_DEBUG - * @return: 1:enable, 0:disable - */ -DLL_EXPORT int CheckLogLevel(int moduleId, int logLevel); - -/** - * @ingroup slog - * @brief DlogSetAttr: set log attr, default pid is 0, default device id is 0, default process type is APPLICATION - * @param [in]logAttr: attr info, include pid(must be larger than 0), process type and device id(chip ID) - * @return: 0: SUCCEED, others: FAILED - */ -DLL_EXPORT int DlogSetAttr(LogAttr logAttr); - -/** - * @ingroup slog - * @brief dlog_error: print error log - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]fmt: log content - */ -#define dlog_error(moduleId, fmt, ...) \ - do { \ - DlogErrorInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } while (0) - -/** - * @ingroup slog - * @brief dlog_warn: print warning log - * call CheckLogLevel in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]fmt: log content - */ -#define dlog_warn(moduleId, fmt, ...) \ - do { \ - if(CheckLogLevel(moduleId, DLOG_WARN) == 1) { \ - DlogWarnInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief dlog_info: print info log - * call CheckLogLevel in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]fmt: log content - */ -#define dlog_info(moduleId, fmt, ...) \ - do { \ - if(CheckLogLevel(moduleId, DLOG_INFO) == 1) { \ - DlogInfoInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief dlog_debug: print debug log - * call CheckLogLevel in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]fmt: log content - */ -#define dlog_debug(moduleId, fmt, ...) \ - do { \ - if(CheckLogLevel(moduleId, DLOG_DEBUG) == 1) { \ - DlogDebugInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief dlog_event: print event log - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]fmt: log content - */ -#define dlog_event(moduleId, fmt, ...) \ - do { \ - DlogEventInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } while (0) - -/** - * @ingroup slog - * @brief Dlog: print log, need caller to specify level - * call CheckLogLevel in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]level(0: debug, 1: info, 2: warning, 3: error, 5: trace, 6: oplog, 16: event) - * @param [in]fmt: log content - */ -#define Dlog(moduleId, level, fmt, ...) \ - do { \ - if(CheckLogLevel(moduleId, level) == 1) { \ - DlogInner(moduleId, level, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief DlogSub: print log, need caller to specify level and submodule - * call CheckLogLevel in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]submodule: eg: engine - * @param [in]level(0: debug, 1: info, 2: warning, 3: error, 5: trace, 6: oplog, 16: event) - * @param [in]fmt: log content - */ -#define DlogSub(moduleId, submodule, level, fmt, ...) \ - do { \ - if(CheckLogLevel(moduleId, level) == 1) { \ - DlogInner(moduleId, level, "[%s:%d][%s]" fmt, __FILE__, __LINE__, submodule, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief DlogWithKV: print log, need caller to specify level and other paramters - * call CheckLogLevel in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]level(0: debug, 1: info, 2: warning, 3: error, 5: trace, 6: oplog, 16: event) - * @param [in]pstKVArray: key-value array - * @param [in]kvNum: key-value element num in array - * @param [in]fmt: log content - */ -#define DlogWithKV(moduleId, level, pstKVArray, kvNum, fmt, ...) \ - do { \ - if(CheckLogLevel(moduleId, level) == 1) { \ - DlogWithKVInner(moduleId, level, pstKVArray, kvNum, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief DlogFlush: flush log buffer to file - */ -DLL_EXPORT void DlogFlush(void); - -/** - * @ingroup slog - * @brief Internal log interface, other modules are not allowed to call this interface - */ -void DlogErrorInner(int moduleId, const char *fmt, ...); -void DlogWarnInner(int moduleId, const char *fmt, ...); -void DlogInfoInner(int moduleId, const char *fmt, ...); -void DlogDebugInner(int moduleId, const char *fmt, ...); -void DlogEventInner(int moduleId, const char *fmt, ...); -void DlogInner(int moduleId, int level, const char *fmt, ...); -void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...); - -#ifdef __cplusplus -#ifndef LOG_CPP -} -#endif // LOG_CPP -#endif // __cplusplus - -#ifdef LOG_CPP -#ifdef __cplusplus -extern "C" { -#endif -/** - * @ingroup slog - * @brief DlogGetlevelForC: get module loglevel and enableEvent - * - * @param [in]moduleId: moudule id(see slog.h, eg: CCE), others: invalid - * @param [out]enableEvent: 1: enable; 0: disable - * @return: module level(0: debug, 1: info, 2: warning, 3: error, 4: null output) - */ -DLL_EXPORT int DlogGetlevelForC(int moduleId, int *enableEvent); - -/** - * @ingroup slog - * @brief DlogSetlevelForC: set module loglevel and enableEvent - * - * @param [in]moduleId: moudule id(see slog.h, eg: CCE), -1: all modules, others: invalid - * @param [in]level: log level(0: debug, 1: info, 2: warning, 3: error, 4: null output) - * @param [in]enableEvent: 1: enable; 0: disable, others:invalid - * @return: 0: SUCCEED, others: FAILED - */ -DLL_EXPORT int DlogSetlevelForC(int moduleId, int level, int enableEvent); - -/** - * @ingroup slog - * @brief CheckLogLevelForC: check module level enable or not - * users no need to call it because all dlog interface(include inner interface) has already called - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]logLevel: eg: DLOG_EVENT/DLOG_ERROR/DLOG_WARN/DLOG_INFO/DLOG_DEBUG - * @return: 1:enable, 0:disable - */ -DLL_EXPORT int CheckLogLevelForC(int moduleId, int logLevel); - -/** - * @ingroup slog - * @brief DlogSetAttrForC: set log attr, default pid is 0, default device id is 0, default process type is APPLICATION - * @param [in]logAttr: attr info, include pid(must be larger than 0), process type and device id(chip ID) - * @return: 0: SUCCEED, others: FAILED - */ -DLL_EXPORT int DlogSetAttrForC(LogAttr logAttr); - -/** - * @ingroup slog - * @brief DlogForC: print log, need caller to specify level - * call CheckLogLevelForC in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]level(0: debug, 1: info, 2: warning, 3: error, 5: trace, 6: oplog, 16: event) - * @param [in]fmt: log content - */ -#define DlogForC(moduleId, level, fmt, ...) \ - do { \ - if(CheckLogLevelForC(moduleId, level) == 1) { \ - DlogInnerForC(moduleId, level, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief DlogSubForC: print log, need caller to specify level and submodule - * call CheckLogLevelForC in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]submodule: eg: engine - * @param [in]level(0: debug, 1: info, 2: warning, 3: error, 5: trace, 6: oplog, 16: event) - * @param [in]fmt: log content - */ -#define DlogSubForC(moduleId, submodule, level, fmt, ...) \ - do { \ - if(CheckLogLevelForC(moduleId, level) == 1) { \ - DlogInnerForC(moduleId, level, "[%s:%d][%s]" fmt, __FILE__, __LINE__, submodule, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief DlogWithKVForC: print log, need caller to specify level and other paramters - * call CheckLogLevelForC in advance to optimize performance, call interface with fmt input take time - * - * @param [in]moduleId: module id, eg: CCE - * @param [in]level(0: debug, 1: info, 2: warning, 3: error, 5: trace, 6: oplog, 16: event) - * @param [in]pstKVArray: key-value array - * @param [in]kvNum: key-value element num in array - * @param [in]fmt: log content - */ -#define DlogWithKVForC(moduleId, level, pstKVArray, kvNum, fmt, ...) \ - do { \ - if(CheckLogLevelForC(moduleId, level) == 1) { \ - DlogWithKVInnerForC(moduleId, level, pstKVArray, kvNum, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } \ - } while (0) - -/** - * @ingroup slog - * @brief DlogFlushForC: flush log buffer to file - */ -DLL_EXPORT void DlogFlushForC(void); - -/** - * @ingroup slog - * @brief Internal log interface, other modules are not allowed to call this interface - */ -void DlogInnerForC(int moduleId, int level, const char *fmt, ...); -void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...); - -#ifdef __cplusplus -} -#endif -#endif // LOG_CPP -#endif // D_SYSLOG_H_ diff --git a/inc/toolchain/tuning_tool/aoe_tuning_api.h b/inc/toolchain/tuning_tool/aoe_tuning_api.h deleted file mode 100644 index 4af5725f4..000000000 --- a/inc/toolchain/tuning_tool/aoe_tuning_api.h +++ /dev/null @@ -1,84 +0,0 @@ -/** - * @file aoe_tuning_api.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.\n - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n - * 描述:mstune调优接口头文件 - */ - -#ifndef AOE_TUNING_API_H -#define AOE_TUNING_API_H -#include -#include -#include "ge/ge_api.h" -#include "aoe_types.h" -#include "graph/ascend_string.h" - -namespace Aoe { -using SessionId = uint64_t; -using AoeStatus = int32_t; -/** - * @brief : initialize aoe tuning api - * @param [in] : map &globalOptions - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeInitialize(const std::map &globalOptions); - -/** - * @brief : fialize aoe tuning api - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeFinalize(); - -/** - * @brief : destroy aoe session - * @param [out] : SessionId SessionId session id - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeDestroySession(SessionId SessionId); - -/** - * @brief : create aoe session - * @param [in] : map &sessionOptions session options - * @param [out] : SessionId SessionId session id - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeCreateSession(SessionId &SessionId); - -/** - * @brief : set ge session for session id - * @param [in] : SessionId SessionId session id - * @param [in] : ge::Session* geSession ge session handle - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeSetGeSession(SessionId SessionId, ge::Session* geSession); - -/** - * @brief : set depend graphs for session id - * @param [in] : SessionId SessionId session id -* @param [in] : std::vector &dependGraph depend graphs - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeSetDependGraphs(SessionId SessionId, std::vector &dependGraph); - -/** - * @brief : set tuning graphs for session id - * @param [in] : SessionId SessionId session id -* @param [in] : ge::Graph &tuningGraph tuning graph - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeSetTuningGraph(SessionId SessionId, ge::Graph &tuningGraph); - -/** - * @brief : tuning graph - * @param [in] : SessionId SessionId session id -* @param [in] : map &tuningOptions tuning options - * @return : == AOE_SUCESS : sucess,!= AOE_SUCESS : failed - */ -extern "C" AoeStatus AoeTuningGraph(SessionId SessionId, - const std::map &tuningOptions); -} // namespace Aoe -#endif diff --git a/inc/toolchain/tuning_tool/aoe_types.h b/inc/toolchain/tuning_tool/aoe_types.h deleted file mode 100644 index d125dd706..000000000 --- a/inc/toolchain/tuning_tool/aoe_types.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * @file aoe_types.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.\n - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n - * 描述:mstune调优接口头文件 - */ -/** @defgroup mstune mstune调优接口 */ -#ifndef AOE_TYPES_H -#define AOE_TYPES_H - -#include -#include -#include "graph/graph.h" - -namespace Aoe { -using SessionId = uint64_t; -using AoeStatus = int32_t; -const AoeStatus AOE_SUCCESS = 0; -const AoeStatus AOE_FALLURE = -1; -// 此枚举量需要与aoe保持一致 -const AoeStatus AOE_ERROR_NO_AICORE_GRAPH = 8; -} - - -/** - * @ingroup mstune - * - * mstune status - */ -enum MsTuneStatus { - MSTUNE_SUCCESS, /** tune success */ - MSTUNE_FAILED, /** tune failed */ -}; - -// Option key: for train options sets -const std::string MSTUNE_SELF_KEY = "mstune"; -const std::string MSTUNE_GEINIT_KEY = "initialize"; -const std::string MSTUNE_GESESS_KEY = "session"; - -#endif \ No newline at end of file diff --git a/tf_adapter/kernels/geop_npu.h b/tf_adapter/kernels/geop_npu.h index aa7e4390a..bf102b2ff 100644 --- a/tf_adapter/kernels/geop_npu.h +++ b/tf_adapter/kernels/geop_npu.h @@ -40,10 +40,10 @@ using AoeFinalizeFunc = AoeStatus (*)(); using AoeCreateSessionFunc = AoeStatus (*)(SessionId &); using AoeDestroySessionFunc = AoeStatus (*)(SessionId); using AoeSetGeSessionFunc = AoeStatus (*)(SessionId, ge::Session*); -using AoeSetDependGraphFunc = AoeStatus (*)(SessionId, std::vector&); -using AoeSetDependGraphsInputsFunc = AoeStatus (*)(SessionId, std::vector> &); -using AoeSetTuningGraphInputFunc = AoeStatus (*)(SessionId, std::vector &); -using AoeSetTuningGraphFunc = AoeStatus (*)(SessionId, ge::Graph &); +using AoeSetDependGraphFunc = AoeStatus (*)(SessionId, const std::vector&); +using AoeSetDependGraphsInputsFunc = AoeStatus (*)(SessionId, const std::vector> &); +using AoeSetTuningGraphInputFunc = AoeStatus (*)(SessionId, const std::vector &); +using AoeSetTuningGraphFunc = AoeStatus (*)(SessionId, const ge::Graph &); using AoeTuningGraphFunc = AoeStatus (*)(SessionId, const std::map &); class GeOp : public AsyncOpKernel { diff --git a/tf_adapter/tests/depends/aoe/CMakeLists.txt b/tf_adapter/tests/depends/aoe/CMakeLists.txt index 30727690e..d2ed742e3 100644 --- a/tf_adapter/tests/depends/aoe/CMakeLists.txt +++ b/tf_adapter/tests/depends/aoe/CMakeLists.txt @@ -19,9 +19,8 @@ file(GLOB_RECURSE SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "src/*.cc" ) -include_directories(${TFADAPTER_DIR}/inc/) - add_library(aoe_tuning SHARED ${SRC_FILES}) target_link_libraries(aoe_tuning PRIVATE $ + ge_runner_stub ) \ No newline at end of file diff --git a/tf_adapter/tests/depends/aoe/src/aoe_stub.cc b/tf_adapter/tests/depends/aoe/src/aoe_stub.cc index 2c7dc8c0f..7aede95f1 100644 --- a/tf_adapter/tests/depends/aoe/src/aoe_stub.cc +++ b/tf_adapter/tests/depends/aoe/src/aoe_stub.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "toolchain/tuning_tool/aoe_tuning_api.h" +#include "aoe_tuning_api.h" namespace Aoe { extern "C" AoeStatus AoeInitialize(const std::map &globalOptions) { @@ -25,43 +25,43 @@ extern "C" AoeStatus AoeFinalize() { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeCreateSession(SessionId &SessionId) { +extern "C" AoeStatus AoeCreateSession(uint64_t &sessionId) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeDestroySession(SessionId SessionId) { - if (SessionId >= 9999) { - return Aoe::AOE_FALLURE; +extern "C" AoeStatus AoeDestroySession(uint64_t sessionId) { + if (sessionId >= 9999) { + return Aoe::AOE_FAILURE; } return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetGeSession(SessionId SessionId, ge::Session* geSession) { - if (SessionId >= 9999) { - return Aoe::AOE_FALLURE; +extern "C" AoeStatus AoeSetGeSession(uint64_t sessionId, ge::Session *geSession) { + if (sessionId >= 9999) { + return Aoe::AOE_FAILURE; } return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetDependGraphs(SessionId SessionId, std::vector &dependGraph) { +extern "C" AoeStatus AoeSetDependGraphs(uint64_t sessionId, const std::vector &dependGraphs) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetTuningGraph(SessionId SessionId, ge::Graph &tuningGraph) { +extern "C" AoeStatus AoeSetTuningGraph(uint64_t sessionId, const ge::Graph &tuningGraph) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeTuningGraph(SessionId SessionId, - const std::map &tuningOptions) { +extern "C" AoeStatus AoeTuningGraph(uint64_t sessionId, + const std::map &tuningOptions) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetDependGraphsInputs(SessionId SessionId, - std::vector> &input) { +extern "C" AoeStatus AoeSetDependGraphsInputs(uint64_t sessionId, + const std::vector> &inputs) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetTuningGraphInput(SessionId SessionId, std::vector &input) { +extern "C" AoeStatus AoeSetTuningGraphInput(uint64_t sessionId, const std::vector &input) { return Aoe::AOE_SUCCESS; } } // namespace Aoe \ No newline at end of file diff --git a/tf_adapter/tests/depends/ge_runner/CMakeLists.txt b/tf_adapter/tests/depends/ge_runner/CMakeLists.txt index 4f98e09b7..3681db1d7 100644 --- a/tf_adapter/tests/depends/ge_runner/CMakeLists.txt +++ b/tf_adapter/tests/depends/ge_runner/CMakeLists.txt @@ -24,15 +24,7 @@ set(BASE_DIR ${TOP_DIR}/../) add_library(ge_runner_stub SHARED ${SRC_FILES}) target_include_directories(ge_runner_stub PRIVATE - ${TFADAPTER_DIR}/inc/ ${TF_INSTALL_LIB_PATH}/include/ - ${TFADAPTER_DIR}/inc/external/ - ${TFADAPTER_DIR}/inc/common/ - ${TFADAPTER_DIR}/inc/graphengine/inc/ - ${TFADAPTER_DIR}/inc/graphengine/inc/external/ - ${TFADAPTER_DIR}/inc/graphengine/inc/framework/ - ${TFADAPTER_DIR}/metadef/inc/ - ${TFADAPTER_DIR}/metadef/inc/external/ ${TFADAPTER_DIR}/tf_adapter/tests/depends/ascendcl/src ${TFADAPTER_DIR}/tf_adapter/common ) diff --git a/tf_adapter/tests/st/util/testcase/util_test.cc b/tf_adapter/tests/st/util/testcase/util_test.cc index a7d847fb5..aa046f141 100644 --- a/tf_adapter/tests/st/util/testcase/util_test.cc +++ b/tf_adapter/tests/st/util/testcase/util_test.cc @@ -1,7 +1,7 @@ #include "tf_adapter/util/util.h" #include #include "gtest/gtest.h" -#include "inc/metadef/inc/graph/def_types.h" +#include "graph/def_types.h" namespace tensorflow { namespace { diff --git a/tf_adapter/tests/ut/util/testcase/util_test.cc b/tf_adapter/tests/ut/util/testcase/util_test.cc index a7d847fb5..aa046f141 100644 --- a/tf_adapter/tests/ut/util/testcase/util_test.cc +++ b/tf_adapter/tests/ut/util/testcase/util_test.cc @@ -1,7 +1,7 @@ #include "tf_adapter/util/util.h" #include #include "gtest/gtest.h" -#include "inc/metadef/inc/graph/def_types.h" +#include "graph/def_types.h" namespace tensorflow { namespace { diff --git a/tf_adapter/util/mbuf_allocator.cc b/tf_adapter/util/mbuf_allocator.cc index 9326f4e3d..1be52a2aa 100644 --- a/tf_adapter/util/mbuf_allocator.cc +++ b/tf_adapter/util/mbuf_allocator.cc @@ -23,7 +23,7 @@ #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/types.h" #include "tf_adapter/common/adapter_logger.h" -#include "inc/external/acl/error_codes/rt_error_codes.h" +#include "acl/error_codes/rt_error_codes.h" #include "runtime/rt_mem_queue.h" #include "infershape_util.h" #include "runtime/dev.h" diff --git a/tf_adapter/util/util.cc b/tf_adapter/util/util.cc index 9a1bb8bf4..a763e1c51 100644 --- a/tf_adapter/util/util.cc +++ b/tf_adapter/util/util.cc @@ -22,7 +22,7 @@ #include "tf_adapter/common/adapter_logger.h" #include "tf_adapter/common/common.h" #include "tf_adapter/common/compat_tf1_tf2.h" -#include "inc/metadef/inc/graph/def_types.h" +#include "graph/def_types.h" #include "securec.h" namespace tensorflow { namespace { diff --git a/tf_adapter/util/util.h b/tf_adapter/util/util.h index 0390d1ad1..017799fca 100644 --- a/tf_adapter/util/util.h +++ b/tf_adapter/util/util.h @@ -20,7 +20,7 @@ #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/graph.h" -#include "inc/tdt/data_common.h" +#include "tdt/data_common.h" #include "tf_adapter/util/host_queue.h" namespace tensorflow { diff --git a/tf_adapter_2.x/cmake/aoe/module.cmake b/tf_adapter_2.x/cmake/aoe/module.cmake index e2889b7ff..513e2c5e5 100644 --- a/tf_adapter_2.x/cmake/aoe/module.cmake +++ b/tf_adapter_2.x/cmake/aoe/module.cmake @@ -1,13 +1,13 @@ add_library(aoe_libs INTERFACE) if (DEFINED ASCEND_INSTALLED_PATH) - include_directories(${CMAKE_CURRENT_LIST_DIR}/../../../inc/toolchain) - include_directories(${CMAKE_CURRENT_LIST_DIR}/../../../inc/toolchain/tuning_tool) + include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/aoe) target_link_libraries(aoe_libs INTERFACE ${ASCEND_INSTALLED_PATH}/tools/aoe/lib64/libaoe_tuning.so) else () - include_directories(${ASCEND_CI_BUILD_DIR}/asl/tfadaptor/inc/toolchain) - include_directories(${ASCEND_CI_BUILD_DIR}/asl/tfadaptor/inc/toolchain/tuning_tool) + include_directories(${ASCEND_CI_BUILD_DIR}/asl/aoetools/inc/aoe) + include_directories(${ASCEND_CI_BUILD_DIR}/abl/slog/inc) + include_directories(${ASCEND_CI_BUILD_DIR}/abl/msprof/inc) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_fake.cc COMMAND touch ${CMAKE_CURRENT_BINARY_DIR}/_fake.cc diff --git a/tf_adapter_2.x/cmake/graph_engine/module.cmake b/tf_adapter_2.x/cmake/graph_engine/module.cmake index 499aa6c9e..7f042725e 100644 --- a/tf_adapter_2.x/cmake/graph_engine/module.cmake +++ b/tf_adapter_2.x/cmake/graph_engine/module.cmake @@ -1,10 +1,10 @@ add_library(ge_libs INTERFACE) if(DEFINED ASCEND_INSTALLED_PATH) - include_directories(${CMAKE_CURRENT_LIST_DIR}/../../../inc/graphengine/inc) - include_directories(${CMAKE_CURRENT_LIST_DIR}/../../../inc/graphengine/inc/external) - include_directories(${CMAKE_CURRENT_LIST_DIR}/../../../inc/metadef/inc) - include_directories(${CMAKE_CURRENT_LIST_DIR}/../../../inc/metadef/inc/external) + include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/air) + include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/air/external) + include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/metadef) + include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/metadef/external) target_link_libraries(ge_libs INTERFACE ${ASCEND_INSTALLED_PATH}/compiler/lib64/libge_runner.so ${ASCEND_INSTALLED_PATH}/compiler/lib64/libfmk_parser.so) diff --git a/tf_adapter_2.x/compat_v1/CMakeLists.txt b/tf_adapter_2.x/compat_v1/CMakeLists.txt index ba9bad364..7a03f969c 100644 --- a/tf_adapter_2.x/compat_v1/CMakeLists.txt +++ b/tf_adapter_2.x/compat_v1/CMakeLists.txt @@ -51,12 +51,29 @@ add_dependencies(_tf_adapter generate_compat_headers) target_compile_definitions(_tf_adapter PRIVATE LOG_CPP TF_VERSION_TF2) -target_include_directories(_tf_adapter PRIVATE +if (DEFINED ASCEND_CI_BUILD_DIR) + target_include_directories(_tf_adapter PRIVATE + ${CMAKE_CURRENT_BINARY_DIR} + ${ADAPTER_ROOT}/ + ${ASCEND_CI_BUILD_DIR}/abl/mmpa/inc + ${ASCEND_CI_BUILD_DIR}/asl/aoetools/inc/aoe + ${ASCEND_CI_BUILD_DIR}/abl/slog/inc + ${ASCEND_CI_BUILD_DIR}/ace/npuruntime/inc + ${ASCEND_CI_BUILD_DIR}/abl/msprof/inc + ${ASCEND_CI_BUILD_DIR}/parser/inc/external + ) +else () + target_include_directories(_tf_adapter PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${ADAPTER_ROOT}/ ${ADAPTER_ROOT}/inc/ - ${ADAPTER_ROOT}/inc/toolchain/tuning_tool - ) + ${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include + ${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/aoe + ${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/slog + ${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/runtime + ${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/msprof + ) +endif () add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/_fake.cc diff --git a/tf_adapter_2.x/npu_device/core/npu_aoe.cpp b/tf_adapter_2.x/npu_device/core/npu_aoe.cpp index d1b47a77f..3cffa2a0e 100644 --- a/tf_adapter_2.x/npu_device/core/npu_aoe.cpp +++ b/tf_adapter_2.x/npu_device/core/npu_aoe.cpp @@ -83,7 +83,8 @@ tensorflow::Status NpuAoe::AoeTuningInitialize(const std::string &work_path, con DLOG() << "Start to run aoe initialize"; handle_ = dlopen("libaoe_tuning.so", RTLD_NOW); - NPU_REQUIRES(handle_ != nullptr, tensorflow::errors::Internal("libaoe_tuning.so dlopen failed")); + NPU_REQUIRES(handle_ != nullptr, + tensorflow::errors::Internal("libaoe_tuning.so dlopen failed, dlerror: ", dlerror())); NPU_REQUIRES_OK(LoadAoeFunc()); diff --git a/tf_adapter_2.x/npu_device/core/npu_aoe.h b/tf_adapter_2.x/npu_device/core/npu_aoe.h index d3fe68a25..b52c958e8 100644 --- a/tf_adapter_2.x/npu_device/core/npu_aoe.h +++ b/tf_adapter_2.x/npu_device/core/npu_aoe.h @@ -29,10 +29,10 @@ using AoeFinalizeFunc = AoeStatus (*)(); using AoeCreateSessionFunc = AoeStatus (*)(SessionId &); using AoeDestroySessionFunc = AoeStatus (*)(SessionId); using AoeSetGeSessionFunc = AoeStatus (*)(SessionId, ge::Session *); -using AoeSetDependGraphFunc = AoeStatus (*)(SessionId, std::vector &); -using AoeSetDependGraphsInputsFunc = AoeStatus (*)(SessionId, std::vector> &); -using AoeSetTuningGraphInputFunc = AoeStatus (*)(SessionId, std::vector &); -using AoeSetTuningGraphFunc = AoeStatus (*)(SessionId, ge::Graph &); +using AoeSetDependGraphFunc = AoeStatus (*)(SessionId, const std::vector &); +using AoeSetDependGraphsInputsFunc = AoeStatus (*)(SessionId, const std::vector> &); +using AoeSetTuningGraphInputFunc = AoeStatus (*)(SessionId, const std::vector &); +using AoeSetTuningGraphFunc = AoeStatus (*)(SessionId, const ge::Graph &); using AoeTuningGraphFunc = AoeStatus (*)(SessionId, const std::map &); struct AoeFunc { diff --git a/tf_adapter_2.x/tests/CMakeLists.txt b/tf_adapter_2.x/tests/CMakeLists.txt index 033d33f34..4ca02cae9 100644 --- a/tf_adapter_2.x/tests/CMakeLists.txt +++ b/tf_adapter_2.x/tests/CMakeLists.txt @@ -14,12 +14,14 @@ link_libraries(-lgcov) if (NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/COMPILE_FLAGS OR NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/TF_INSTALLED_PATH OR NOT EXISTS + ${CMAKE_CURRENT_LIST_DIR}/tools/ASCEND_INSTALLED_PATH OR NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/PYTHON_BIN_PATH OR NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/PYTHON_LD_LIBRARY) message(FATAL_ERROR "No validate configuration found. Did you forget to configure first?") endif () file(STRINGS "${CMAKE_CURRENT_LIST_DIR}/tools/TF_INSTALLED_PATH" TF_INSTALLED_PATH) +file(STRINGS "${CMAKE_CURRENT_LIST_DIR}/tools/ASCEND_INSTALLED_PATH" ASCEND_INSTALLED_PATH) file(STRINGS "${CMAKE_CURRENT_LIST_DIR}/tools/PYTHON_BIN_PATH" PYTHON_BIN_PATH) file(STRINGS "${CMAKE_CURRENT_LIST_DIR}/tools/COMPILE_FLAGS" CUSTOM_COMPILE_FLAGS) file(STRINGS "${CMAKE_CURRENT_LIST_DIR}/tools/PYTHON_LD_LIBRARY" PYTHON_LD_LIBRARY) @@ -28,8 +30,8 @@ foreach (COMPILE_FLAG ${CUSTOM_COMPILE_FLAGS}) set(CMAKE_CXX_FLAGS "${COMPILE_FLAG} ${CMAKE_CXX_FLAGS}") endforeach (COMPILE_FLAG) -include_directories(${CMAKE_CURRENT_LIST_DIR}/../../inc/external) # just for acl -include_directories(${CMAKE_CURRENT_LIST_DIR}/../../inc/toolchain/tuning_tool) +include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/ascendcl/external) # just for acl +include_directories(${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/aoe) include_directories(${CMAKE_CURRENT_LIST_DIR}/stub/include) include_directories(${CMAKE_CURRENT_LIST_DIR}/../npu_device/core) diff --git a/tf_adapter_2.x/tests/cmake/acl/module.cmake b/tf_adapter_2.x/tests/cmake/acl/module.cmake index c48a25ef2..5a05d64d2 100644 --- a/tf_adapter_2.x/tests/cmake/acl/module.cmake +++ b/tf_adapter_2.x/tests/cmake/acl/module.cmake @@ -3,7 +3,7 @@ add_library(acl_libs INTERFACE) add_library(acl_stub STATIC ${CMAKE_CURRENT_LIST_DIR}/../../stub/acl_stub.cpp) target_include_directories(acl_stub PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/../../../inc/external + ${ASCEND_INSTALLED_PATH}/opensdk/opensdk/include/ascendcl/external ${CMAKE_CURRENT_LIST_DIR}/../../stub/include) target_link_libraries(acl_libs INTERFACE acl_stub) diff --git a/tf_adapter_2.x/tests/cmake/aoe/module.cmake b/tf_adapter_2.x/tests/cmake/aoe/module.cmake index 71cdb0dde..ee8507856 100644 --- a/tf_adapter_2.x/tests/cmake/aoe/module.cmake +++ b/tf_adapter_2.x/tests/cmake/aoe/module.cmake @@ -2,7 +2,4 @@ add_library(aoe_tuning INTERFACE) add_library(aoe_stub STATIC ${CMAKE_CURRENT_LIST_DIR}/../../stub/aoe_stub.cpp) -target_include_directories(aoe_stub PRIVATE - ${CMAKE_CURRENT_LIST_DIR}/../../../../inc/toolchain/tuning_tool) - target_link_libraries(aoe_tuning INTERFACE aoe_stub) diff --git a/tf_adapter_2.x/tests/configure.py b/tf_adapter_2.x/tests/configure.py index 62937426a..d64e4eaf4 100644 --- a/tf_adapter_2.x/tests/configure.py +++ b/tf_adapter_2.x/tests/configure.py @@ -30,6 +30,7 @@ except ImportError: _COMPAT_TENSORFLOW_VERSION = "2.6" _PYTHON_BIN_PATH_ENV = "ADAPTER_TARGET_PYTHON_PATH" +_ASCEND_INSTALLED_PATH_ENV = "ASCEND_HOME_PATH" def run_command(cmd): @@ -113,9 +114,35 @@ def setup_python(env_path): break +def setup_ascend(env_path): + """Get ascend install path.""" + default_ascend_path = os.path.realpath("/usr/local/Ascend/latest") + ask_ascend_path = ('Please specify the location of ascend. [Default is ' + '%s]\n(You can make this quiet by set env [ASCEND_INSTALLED_PATH]): ') % default_ascend_path + custom_ascend_path = env_path + while True: + if not custom_ascend_path: + ascend_path = get_input(ask_ascend_path) + else: + ascend_path = custom_ascend_path + custom_ascend_path = None + if not ascend_path: + ascend_path = default_ascend_path + # Check if the path is valid + if os.path.isdir(ascend_path) and os.access(ascend_path, os.X_OK): + break + if not os.path.exists(ascend_path): + print('Invalid ascend path: %s cannot be found.' % ascend_path) + + with open(real_config_path('ASCEND_INSTALLED_PATH'), 'w') as f: + f.write(ascend_path) + + def main(): + """Entry point for configuration""" env_snapshot = dict(os.environ) setup_python(env_snapshot.get(_PYTHON_BIN_PATH_ENV)) + setup_ascend(env_snapshot.get(_ASCEND_INSTALLED_PATH_ENV)) if __name__ == '__main__': diff --git a/tf_adapter_2.x/tests/stub/aoe_stub.cpp b/tf_adapter_2.x/tests/stub/aoe_stub.cpp index b5f65909a..d6dc58fca 100644 --- a/tf_adapter_2.x/tests/stub/aoe_stub.cpp +++ b/tf_adapter_2.x/tests/stub/aoe_stub.cpp @@ -23,43 +23,43 @@ extern "C" AoeStatus AoeFinalize() { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeCreateSession(SessionId &SessionId) { +extern "C" AoeStatus AoeCreateSession(uint64_t &sessionId) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeDestroySession(SessionId SessionId) { - if (SessionId >= 9999) { - return Aoe::AOE_FALLURE; +extern "C" AoeStatus AoeDestroySession(uint64_t sessionId) { + if (sessionId >= 9999) { + return Aoe::AOE_FAILURE; } return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetGeSession(SessionId SessionId, ge::Session* geSession) { - if (SessionId >= 9999) { - return Aoe::AOE_FALLURE; +extern "C" AoeStatus AoeSetGeSession(uint64_t sessionId, ge::Session *geSession) { + if (sessionId >= 9999) { + return Aoe::AOE_FAILURE; } return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetDependGraphs(SessionId SessionId, std::vector &dependGraph) { +extern "C" AoeStatus AoeSetDependGraphs(uint64_t sessionId, const std::vector &dependGraphs) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetTuningGraph(SessionId SessionId, ge::Graph &tuningGraph) { +extern "C" AoeStatus AoeSetTuningGraph(uint64_t sessionId, const ge::Graph &tuningGraph) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeTuningGraph(SessionId SessionId, +extern "C" AoeStatus AoeTuningGraph(uint64_t sessionId, const std::map &tuningOptions) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetDependGraphsInputs(SessionId SessionId, - std::vector> &input) { +extern "C" AoeStatus AoeSetDependGraphsInputs(uint64_t sessionId, + const std::vector> &inputs) { return Aoe::AOE_SUCCESS; } -extern "C" AoeStatus AoeSetTuningGraphInput(SessionId SessionId, std::vector &input) { +extern "C" AoeStatus AoeSetTuningGraphInput(uint64_t sessionId, const std::vector &input) { return Aoe::AOE_SUCCESS; } -} // namespace Aoe \ No newline at end of file +} // namespace Aoe -- Gitee