From 8d2dc2e99cf48dddcca27093a4f27140b97a64ba Mon Sep 17 00:00:00 2001 From: HuKang Date: Mon, 28 Nov 2022 09:40:19 +0800 Subject: [PATCH] ad OH demo code --- .../android/classification/.clang-format | 7 + .../android/classification/.clang-tidy | 1 + .../android/classification/.clangd | 8 + .../android/classification/.gitignore | 5 + .../android/classification/AppScope/app.json5 | 11 + .../resources/base/element/string.json | 8 + .../resources/base/media/app_icon.png | Bin 0 -> 6790 bytes .../classification/build-profile.json5 | 40 + .../android/classification/entry/.gitignore | 4 + .../classification/entry/build-profile.json5 | 21 + .../classification/entry/hvigorfile.js | 2 + .../classification/entry/package-lock.json | 12 + .../android/classification/entry/package.json | 16 + .../entry/src/main/cpp/CMakeLists.txt | 42 + .../entry/src/main/cpp/Detection.cpp | 301 + .../entry/src/main/cpp/Detection.h | 266 + .../entry/src/main/cpp/common/plugin_common.h | 26 + .../entry/src/main/cpp/hello.cpp | 525 + .../classification/entry/src/main/cpp/hello.h | 841 + .../cpp/minddata-lite/include/api/allocator.h | 97 + .../include/api/callback/callback.h | 94 + .../include/api/callback/ckpt_saver.h | 33 + .../include/api/callback/loss_monitor.h | 35 + .../include/api/callback/lr_scheduler.h | 51 + .../include/api/callback/time_monitor.h | 34 + .../include/api/callback/train_accuracy.h | 41 + .../main/cpp/minddata-lite/include/api/cell.h | 135 + .../main/cpp/minddata-lite/include/api/cfg.h | 53 + .../cpp/minddata-lite/include/api/context.h | 442 + .../cpp/minddata-lite/include/api/data_type.h | 44 + .../cpp/minddata-lite/include/api/delegate.h | 117 + .../include/api/dual_abi_helper.h | 164 + .../cpp/minddata-lite/include/api/format.h | 46 + .../cpp/minddata-lite/include/api/graph.h | 46 + .../cpp/minddata-lite/include/api/kernel.h | 115 + .../include/api/metrics/accuracy.h | 36 + .../include/api/metrics/metrics.h | 40 + .../cpp/minddata-lite/include/api/model.h | 235 + .../include/api/model_parallel_runner.h | 110 + .../main/cpp/minddata-lite/include/api/net.h | 141 + .../minddata-lite/include/api/serialization.h | 100 + .../cpp/minddata-lite/include/api/status.h | 166 + .../cpp/minddata-lite/include/api/types.h | 360 + .../minddata-lite/include/c_api/context_c.h | 179 + .../minddata-lite/include/c_api/data_type_c.h | 52 + .../minddata-lite/include/c_api/format_c.h | 46 + .../cpp/minddata-lite/include/c_api/model_c.h | 144 + .../minddata-lite/include/c_api/status_c.h | 76 + .../minddata-lite/include/c_api/tensor_c.h | 146 + .../cpp/minddata-lite/include/c_api/types_c.h | 48 + .../main/cpp/minddata-lite/include/context.h | 78 + .../minddata-lite/include/dataset/constants.h | 264 + .../include/dataset/data_helper.h | 460 + .../minddata-lite/include/dataset/datasets.h | 574 + .../minddata-lite/include/dataset/execute.h | 139 + .../minddata-lite/include/dataset/iterator.h | 167 + .../include/dataset/lite_cv/image_process.h | 306 + .../include/dataset/lite_cv/lite_mat.h | 377 + .../minddata-lite/include/dataset/samplers.h | 256 + .../include/dataset/transforms.h | 401 + .../include/dataset/vision_lite.h | 371 + .../cpp/minddata-lite/include/errorcode.h | 74 + .../minddata-lite/include/ir/dtype/type_id.h | 97 + .../minddata-lite/include/kernel_interface.h | 49 + .../cpp/minddata-lite/include/lite_session.h | 236 + .../cpp/minddata-lite/include/lite_types.h | 53 + .../cpp/minddata-lite/include/lite_utils.h | 673 + .../include/mindapi/base/format.h | 47 + .../include/mindapi/base/type_id.h | 105 + .../include/mindapi/base/types.h | 124 + .../main/cpp/minddata-lite/include/model.h | 82 + .../cpp/minddata-lite/include/ms_tensor.h | 134 + .../include/registry/register_kernel.h | 148 + .../registry/register_kernel_interface.h | 109 + .../include/schema/model_generated.h | 2364 +++ .../include/schema/ops_generated.h | 13470 ++++++++++++++++ .../include/schema/ops_types_generated.h | 744 + .../include/third_party/flatbuffers/base.h | 437 + .../third_party/flatbuffers/code_generators.h | 235 + .../third_party/flatbuffers/flatbuffers.h | 2954 ++++ .../include/third_party/flatbuffers/flatc.h | 100 + .../third_party/flatbuffers/flexbuffers.h | 1636 ++ .../include/third_party/flatbuffers/grpc.h | 331 + .../include/third_party/flatbuffers/hash.h | 127 + .../include/third_party/flatbuffers/idl.h | 1198 ++ .../third_party/flatbuffers/minireflect.h | 419 + .../third_party/flatbuffers/pch/flatc_pch.h | 39 + .../include/third_party/flatbuffers/pch/pch.h | 38 + .../third_party/flatbuffers/reflection.h | 502 + .../flatbuffers/reflection_generated.h | 1278 ++ .../third_party/flatbuffers/registry.h | 127 + .../third_party/flatbuffers/stl_emulation.h | 673 + .../include/third_party/flatbuffers/util.h | 698 + .../include/train/accuracy_metrics.h | 49 + .../include/train/accuracy_monitor.h | 47 + .../minddata-lite/include/train/ckpt_saver.h | 53 + .../classification_train_accuracy_monitor.h | 54 + .../include/train/loss_monitor.h | 56 + .../include/train/lr_scheduler.h | 59 + .../cpp/minddata-lite/include/train/metrics.h | 37 + .../minddata-lite/include/train/train_cfg.h | 77 + .../minddata-lite/include/train/train_loop.h | 105 + .../include/train/train_loop_callback.h | 86 + .../include/train/train_session.h | 50 + .../main/cpp/minddata-lite/include/version.h | 38 + .../src/main/cpp/types/libentry/index.d.ts | 5 + .../src/main/cpp/types/libentry/package.json | 4 + .../main/ets/Application/MyAbilityStage.ts | 9 + .../src/main/ets/MainAbility/MainAbility.ts | 69 + .../src/main/ets/common/ImageGallery.ets | 71 + .../entry/src/main/ets/common/ImageInfo.ets | 71 + .../entry/src/main/ets/common/LogUtils.ts | 44 + .../src/main/ets/model/ClassificationBean.ets | 14 + .../src/main/ets/model/DetectionBean.ets | 17 + .../entry/src/main/ets/model/ImageModel.ts | 128 + .../main/ets/pages/ClassificationResult.ets | 514 + .../src/main/ets/pages/DetectionResult.ets | 216 + .../entry/src/main/ets/pages/Second.ets | 170 + .../entry/src/main/ets/pages/index.ets | 126 + .../entry/src/main/module.json5 | 68 + .../main/resources/base/element/color.json | 8 + .../main/resources/base/element/string.json | 23 + .../main/resources/base/media/ic_camera.png | Bin 0 -> 6971 bytes .../src/main/resources/base/media/icon.png | Bin 0 -> 6790 bytes .../resources/base/profile/main_pages.json | 8 + .../entry/src/main/resources/rawfile/ssd1.ms | Bin 0 -> 17591576 bytes .../src/main/resources/rawfile/testhu.ms | Bin 0 -> 11447840 bytes .../android/classification/hvigorfile.js | 2 + .../android/classification/package-lock.json | 1226 ++ .../android/classification/package.json | 18 + 130 files changed, 41778 insertions(+) create mode 100644 application_example/android/classification/.clang-format create mode 100644 application_example/android/classification/.clang-tidy create mode 100644 application_example/android/classification/.clangd create mode 100644 application_example/android/classification/.gitignore create mode 100644 application_example/android/classification/AppScope/app.json5 create mode 100644 application_example/android/classification/AppScope/resources/base/element/string.json create mode 100644 application_example/android/classification/AppScope/resources/base/media/app_icon.png create mode 100644 application_example/android/classification/build-profile.json5 create mode 100644 application_example/android/classification/entry/.gitignore create mode 100644 application_example/android/classification/entry/build-profile.json5 create mode 100644 application_example/android/classification/entry/hvigorfile.js create mode 100644 application_example/android/classification/entry/package-lock.json create mode 100644 application_example/android/classification/entry/package.json create mode 100644 application_example/android/classification/entry/src/main/cpp/CMakeLists.txt create mode 100644 application_example/android/classification/entry/src/main/cpp/Detection.cpp create mode 100644 application_example/android/classification/entry/src/main/cpp/Detection.h create mode 100644 application_example/android/classification/entry/src/main/cpp/common/plugin_common.h create mode 100644 application_example/android/classification/entry/src/main/cpp/hello.cpp create mode 100644 application_example/android/classification/entry/src/main/cpp/hello.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/allocator.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/callback.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/ckpt_saver.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/loss_monitor.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/lr_scheduler.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/time_monitor.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/train_accuracy.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cell.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cfg.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/context.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/data_type.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/delegate.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/dual_abi_helper.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/format.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/graph.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/kernel.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/accuracy.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/metrics.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model_parallel_runner.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/net.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/serialization.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/status.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/types.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/context_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/data_type_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/format_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/model_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/status_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/tensor_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/types_c.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/context.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/constants.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/data_helper.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/datasets.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/execute.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/iterator.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/image_process.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/lite_mat.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/samplers.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/transforms.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/vision_lite.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/errorcode.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ir/dtype/type_id.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/kernel_interface.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_session.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_types.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_utils.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/format.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/type_id.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/types.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/model.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ms_tensor.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel_interface.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/model_generated.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/ops_generated.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/ops_types_generated.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/base.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/code_generators.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/flatbuffers.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/flatc.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/flexbuffers.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/grpc.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/hash.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/idl.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/minireflect.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/pch/flatc_pch.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/pch/pch.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/reflection.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/reflection_generated.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/registry.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/stl_emulation.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/third_party/flatbuffers/util.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/accuracy_metrics.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/accuracy_monitor.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/ckpt_saver.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/classification_train_accuracy_monitor.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/loss_monitor.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/lr_scheduler.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/metrics.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/train_cfg.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/train_loop.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/train_loop_callback.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/train/train_session.h create mode 100644 application_example/android/classification/entry/src/main/cpp/minddata-lite/include/version.h create mode 100644 application_example/android/classification/entry/src/main/cpp/types/libentry/index.d.ts create mode 100644 application_example/android/classification/entry/src/main/cpp/types/libentry/package.json create mode 100644 application_example/android/classification/entry/src/main/ets/Application/MyAbilityStage.ts create mode 100644 application_example/android/classification/entry/src/main/ets/MainAbility/MainAbility.ts create mode 100644 application_example/android/classification/entry/src/main/ets/common/ImageGallery.ets create mode 100644 application_example/android/classification/entry/src/main/ets/common/ImageInfo.ets create mode 100644 application_example/android/classification/entry/src/main/ets/common/LogUtils.ts create mode 100644 application_example/android/classification/entry/src/main/ets/model/ClassificationBean.ets create mode 100644 application_example/android/classification/entry/src/main/ets/model/DetectionBean.ets create mode 100644 application_example/android/classification/entry/src/main/ets/model/ImageModel.ts create mode 100644 application_example/android/classification/entry/src/main/ets/pages/ClassificationResult.ets create mode 100644 application_example/android/classification/entry/src/main/ets/pages/DetectionResult.ets create mode 100644 application_example/android/classification/entry/src/main/ets/pages/Second.ets create mode 100644 application_example/android/classification/entry/src/main/ets/pages/index.ets create mode 100644 application_example/android/classification/entry/src/main/module.json5 create mode 100644 application_example/android/classification/entry/src/main/resources/base/element/color.json create mode 100644 application_example/android/classification/entry/src/main/resources/base/element/string.json create mode 100644 application_example/android/classification/entry/src/main/resources/base/media/ic_camera.png create mode 100644 application_example/android/classification/entry/src/main/resources/base/media/icon.png create mode 100644 application_example/android/classification/entry/src/main/resources/base/profile/main_pages.json create mode 100644 application_example/android/classification/entry/src/main/resources/rawfile/ssd1.ms create mode 100644 application_example/android/classification/entry/src/main/resources/rawfile/testhu.ms create mode 100644 application_example/android/classification/hvigorfile.js create mode 100644 application_example/android/classification/package-lock.json create mode 100644 application_example/android/classification/package.json diff --git a/application_example/android/classification/.clang-format b/application_example/android/classification/.clang-format new file mode 100644 index 0000000..6d18def --- /dev/null +++ b/application_example/android/classification/.clang-format @@ -0,0 +1,7 @@ +Language: Cpp +# BasedOnStyle: LLVM +ColumnLimit: 0 +SortIncludes: false +TabWidth: 4 +IndentWidth: 4 +UseTab: Never diff --git a/application_example/android/classification/.clang-tidy b/application_example/android/classification/.clang-tidy new file mode 100644 index 0000000..bda79a0 --- /dev/null +++ b/application_example/android/classification/.clang-tidy @@ -0,0 +1 @@ +Checks: 'readability-system-capabilities' diff --git a/application_example/android/classification/.clangd b/application_example/android/classification/.clangd new file mode 100644 index 0000000..fe13376 --- /dev/null +++ b/application_example/android/classification/.clangd @@ -0,0 +1,8 @@ +CompileFlags: + Add: [-Wunreachable-code-aggressive,-Wunused-variable] +Diagnostics: + ClangTidy: + Add: [misc-unused-parameters] + Remove: [] + CheckOptions: + misc-unused-parameters.StrictMode: 1 diff --git a/application_example/android/classification/.gitignore b/application_example/android/classification/.gitignore new file mode 100644 index 0000000..91d237b --- /dev/null +++ b/application_example/android/classification/.gitignore @@ -0,0 +1,5 @@ +/node_modules +/local.properties +/.idea +**/build +/.hvigor \ No newline at end of file diff --git a/application_example/android/classification/AppScope/app.json5 b/application_example/android/classification/AppScope/app.json5 new file mode 100644 index 0000000..881ed31 --- /dev/null +++ b/application_example/android/classification/AppScope/app.json5 @@ -0,0 +1,11 @@ +{ + "app": { + "bundleName": "com.mindspore.myapplication", + "vendor": "example", + "versionCode": 1000000, + "versionName": "1.0.0", + "icon": "$media:app_icon", + "label": "$string:app_name", + "distributedNotificationEnabled": true + } +} diff --git a/application_example/android/classification/AppScope/resources/base/element/string.json b/application_example/android/classification/AppScope/resources/base/element/string.json new file mode 100644 index 0000000..8fc944c --- /dev/null +++ b/application_example/android/classification/AppScope/resources/base/element/string.json @@ -0,0 +1,8 @@ +{ + "string": [ + { + "name": "app_name", + "value": "classification" + } + ] +} diff --git a/application_example/android/classification/AppScope/resources/base/media/app_icon.png b/application_example/android/classification/AppScope/resources/base/media/app_icon.png new file mode 100644 index 0000000000000000000000000000000000000000..ce307a8827bd75456441ceb57d530e4c8d45d36c GIT binary patch literal 6790 zcmX|G1ymHk)?T_}Vd;>R?p|tHQo6fg38|$UVM!6BLrPFWk?s;$LOP{GmJpBl$qoSA!PUg~PA65-S00{{S`XKG6NkG0RgjEntPrmV+?0|00mu7;+5 zrdpa{2QLqPJ4Y{j7=Mrl{BaxrkdY69+c~(w{Fv-v&aR%aEI&JYSeRTLWm!zbv;?)_ ziZB;fwGbbeL5Q}YLx`J$lp~A09KK8t_z}PZ=4ZzgdeKtgoc+o5EvN9A1K1_<>M?MBqb#!ASf&# zEX?<)!RH(7>1P+j=jqG(58}TVN-$psA6K}atCuI!KTJD&FMmH-78ZejBm)0qc{ESp z|LuG1{QnBUJRg_E=h1#XMWt2%fcoN@l7eAS!Es?Q+;XsRNPhiiE=@AqlLkJzF`O18 zbsbSmKN=aaq8k3NFYZfDWpKmM!coBU0(XnL8R{4=i|wi{!uWYM2je{U{B*K2PVdu&=E zTq*-XsEsJ$u5H4g6DIm2Y!DN`>^v|AqlwuCD;w45K0@eqauiqWf7l&o)+YLHm~|L~ z7$0v5mkobriU!H<@mVJHLlmQqzQ3d6Rh_-|%Yy2li*tHO>_vcnuZ7OR_xkAIuIU&x z-|8Y0wj|6|a6_I(v91y%k_kNw6pnkNdxjqG8!%Vz_d%c_!X+6-;1`GC9_FpjoHev5fEV7RhJ>r=mh-jp$fqbqRJ=obwdgLDVP5+s zy1=_DWG0Y-Jb3t^WXmkr(d9~08k-|#Ly zaNOmT(^9tIb&eb4%CzIT zAm3CUtWSr1t4?h1kk#NBi{U|pJslvME{q|_eS^3En>SOqSxyuN1x;Is@8~m?*>}** znrRFArP!K_52RpX*&JHMR<^lVdm8ypJ}0R(SD(51j;6@ni$6bQ+2XL+R^|NnSp5}(kzvMZ^(@4fD_{QVu$(&K6H|C37TG1Am9Re{<<3gd zh@`>;BqkXMW&p0T6rt|iB$)~CvFe(XC)F9WgAZn*0@t$oZo;!*}r@_`h?KKH&6A@3= zISXoQB+~`op>NP-buiA*^0n{@i{_?MRG)&k)c)k_F+-2Lud!S9pc+i`s74NpBCaGF zXN+pHkubw*msGBTY27BKHv)RRh3;nMg4&$fD_6X9Vt~;_4D+5XPH~#Kn-yjcy!$}1 zigv#FNY>TqMhtIBb@UoF!cE~Q8~;!Pek>SQQwHnHuWKoVBosAiOr}q>!>aE*Krc)V zBUMEcJ5NU0g8}-h6i1zpMY9>m4ne?=U2~`w7K7Q0gB_=p@$5K7p6}thw z-~3dMj?YNX2X$lZ+7ngQ$=s}3mizNN@kE%OtB)?c&i~2L55z8^=yz;xMHLmlY>&Q# zJj?!)M#q_SyfkQh)k?j8IfLtB)ZCp|*vf4_B zos?73yd^h-Ac+;?E4*bpf=o*^3x3-`TVjbY4n6!EN10K6o@fxdyps05Vo3PU)otB} z`3kR+2w7_C#8Z!q`J)p{Vh!+m9-UP!$STp+Hb}}#@#_u^SsUQg<}59< zTvH3%XS4G+6FF^(m6bVF&nSUIXcl;nw{=H$%fgeJ>CgDYiLdpDXr{;-AnG z8dvcrHYVMI&`R6;GWekI@Ir3!uo)oz4^{6q0m^}@f2tM9&=YHNi6-?rh0-{+k@cQm zdp`g#YdQn%MDVg2GR>wZ`n2<0l4)9nx1Wfr&!Dvz=bPwU!h2S?ez6MVc5APE4-xLB zi&W9Q8k2@0w!C53g?iAIQ}~p*3O(@zja6KQ=M3zfW*_6o5SwR-)6VBh~m7{^-=MC-owYH5-u40a}a0liho3QZZ5L{bS_xM1)4}19)zTU$$MY zq3eZML1WC{K%YFd`Be0M-rkO^l?h{kM{$2oK1*A@HVJ57*yhDkUF!2WZ&oA4Y-sK( zCY69%#`mBCi6>6uw(x4gbFaP0+FD*JKJ-q!F1E?vLJ+d35!I5d7@^eU?(CS|C^tmI5?lv@s{{*|1F zFg|OzNpZ0hxljdjaW%45O0MOttRrd(Z?h{HYbB-KFUx&9GfFL3b8NwZ$zNu)WbBD` zYkj$^UB5%3Pj1MDr>S2Ejr9pUcgA!;ZG!@{uAy12)vG=*^9-|dNQBc8&`oxBlU~#y zs!anJX&T?57Jdr^sb>e+V`MVfY>Y0ESg7MG<7W0g&bR-ZYzzZ%2H&Etcp zcd6QeXO1D!5A#zM0lx*GH}`M)2~ZFLE;sP^RSB5wVMNfiZXPd(cmO>j=OSA3`o5r& zna(|^jGXbdN7PK)U8b7^zYtYkkeb%<%F~=OqB~kXMQkq}ii|skh@WSRt>5za;cjP0 zZ~nD%6)wzedqE}BMLt~qKwlvTr33))#uP~xyw#*Eaa|DbMQ_%mG0U8numf8)0DX`r zRoG2bM;#g|p-8gWnwRV5SCW0tLjLO&9Z?K>FImeIxlGUgo0Zk`9Qzhj1eco~7XZy+hXc@YF&ZQ=? zn*^1O56yK^x{y}q`j7}blGCx%dydV!c7)g~tJzmHhV=W~jbWRRR{1<^oDK+1clprm zz$eCy7y9+?{E|YgkW~}}iB#I4XoJ*xr8R?i_Hv$=Cof5bo-Nj~f`-DLebH}&0% zfQj9@WGd4;N~Y?mzQsHJTJq6!Qzl^-vwol(+fMt#Pl=Wh#lI5Vmu@QM0=_r+1wHt` z+8WZ~c2}KQQ+q)~2Ki77QvV&`xb|xVcTms99&cD$Zz4+-^R4kvUBxG8gDk7Y`K*)JZ^2rL(+ZWV~%W(@6 z)0bPArG#BROa_PHs~&WplQ_UIrpd)1N1QGPfv!J(Z9jNT#i%H?CE6|pPZb9hJ1JW4 z^q;ft#!HRNV0YgPojzIYT`8LuET2rUe-J|c!9l4`^*;4WtY@Ew@pL>wkjmMgGfN7 ze}}GtmU0@<_#08~I-Suk=^*9GLW=H4xhsml;vAV{%hy5Eegl@!6qKqbG024%n2HHw zCc@ivW_$@5ZoHP70(7D+(`PvgjW1Pd`wsiuv-aCukMrafwDm)B!xXVy*j2opohhoU zcJz%ADmj>i3`-3-$7nQKBQQuGY;2Qt&+(L~C>vSGFj5{Mlv?T_^dql;{zkpe4R1}R z%XfZyQ}wr*sr>jrKgm*PWLjuVc%6&&`Kbf1SuFpHPN&>W)$GmqC;pIoBC`=4-hPY8 zT*>%I2fP}vGW;R=^!1be?ta2UQd2>alOFFbVl;(SQJ4Jk#)4Z0^wpWEVvY4=vyDk@ zqlModi@iVPMC+{?rm=4(n+<;|lmUO@UKYA>EPTS~AndtK^Wy^%#3<;(dQdk3WaUkRtzSMC9}7x2||CNpF#(3T4C)@ z$~RWs`BNABKX|{cmBt>Q=&gkXl&x!!NK_%5hW0LS)Z4PB>%sV?F-{Wyj#s7W%$F{D zXdK^Fp3wvy+48+GP6F_|^PCRx=ddcTO3sG;B23A49~Qaw31SZ0Rc~`r4qqt%#OGW{ zCA_(LG5^N>yzUn&kAgVmxb=EA8s&tBXC}S1CZ(KoW)(%^JjLTPo^fs`Va;`=YlVPgmB$!yB}<(4ym6OeZ3xAJJ#;)2+B%p3P1Wt+d$eo`vz`T zXfUP2))kBDPoscH;Jc7I3NU<({|@wM$&GaDt`n7WLgIY3IA7A6-_R?z8N3mz|}*i z(zl5ot--Oq@f2-nv{X(ujT2T(k1vY_qh93pK@>H-qc%2Xta)IP0Q%zt%bqYgI`o!wv!0QerB`nCN^1n|@$sVOQ!V0teVG!I z_fD%JvfDeT1cK#-{o6Gv7}& zY0#NWin~kVaf$aufV&;63Hbs|`QVZWpDX6IMk1Hj2G}fiH9e-^6u2zf^FIr^BwD<6zjw63+{yUe8PUFvk8v{sJ=R{d#`O!sz`Q13~< zPT$JS(w=yQfU2`zPCNfSw=&zup@DXc(98afjhv@1w_f!m2Z>rMJ19AB&dB%P#Ls3b z=lK7OILM+SQ&VEd=1GN6o&>YVVtIzoZ%=Z_SdqJN2}E43{bE`>w+A;=y->@^k{oCC z$F*WTY&?34;kfyFV?b*Xb1Pq`Z=%OgwEg)Rz)tx=`f%5#w_INP=x&z5!jI;#;N$ma zhO)+MDm;SxOEVL15; zGq(v2pL3&P1Sl)8P*;G-fd{l1QJsv@e@d8)1PK4w2m*M%V3j-V~L^$i|&C@b?D?9tfwE{B^}Z$k8e5FmQ>v7Xz)sG32g9t}YBt zyR$+*_00RmPx+0mW+vVG4mxd(n$(eQf3-w>JPl2UJpafrPaL5@2j}%{VE-) zBI%6Qpj*dsdH<;g!S!avA~bv^0E+ zfyJbSjPb+j;J52U)<|cIcntQBI2T#>2;tOxu{%D?kML476AErF(qN9hPva5Nkc@BF zC-tLF@3ZFb%Kpj)M<{)x*l|*Ia@ECeXo2E4h2f!aV=cHAhi_E_mfUth(sM4^hJq7B zQsGWqdZUm9S%F`$nQ*_#NcuD`&)Ek%_s{&^78{9Hm ztri&rYLOxgFdG>O@+XHy z9#;|&vBCPXH5Mon^I`jSuR$&~ZWtyB67ujzFSj!51>#C}C17~TffQ{c-!QFQkTQ%! zIR^b1`zHx|*1GU?tbBx23weFLz5H?y_Q%N&t$}k?w+``2A=aotj0;2v$~AL z{scF-cL{wsdrmPvf#a9OHyYLcwQD4Kcm)`LLwMh4WT~p29f7M!iafJSU`IV}QY5Wa z(n44-9oA}?J{a+ah*@31WTs#&J#o1`H98#6IQf;Wv0N_!);f&9g7o-k(lW5rWnDUR zQBFIRG+X=6NnsI@mxnwm;tf5;_Uxg?jZ8m-m0}&6+DA!qam(p$mN5R})yA_7m$q@| zFEd|dpS595rxQr-n#GjI5i-AhnUE>Cr;jpCqSrD~EwK_DqI^7%3#p5)%T_od!t3SOmH9MyXeeGO2(UQL;ax|x?Ncixmeo1=$ z{-);Au{*tfzOG?KQ~K|ak8-HQ?`Pekhe2WM(8s{xv-p>Zmu_6{G!-oE$7$mY`MOJorI=+mMx?H;`pr!;fVYz?5~yXBACruWB`Ph zZM}90_<^OBxIhyZ9BW$`>6JvO;%VFpqVr8|7t3~AmxYak6?`Pp#c;**_SYmi`&z23 z`p6_~ePvH)C6x-G9$hgL=eVALq`-AiamN>!3~Lxw&{H(b{B(7xSRm6<3<{%{yXiH# zos5Rv1L+8fUKJLo%P>4I&$}y +#include +#include "Detection.h" +#include "common/plugin_common.h" + +Detection::~Detection(void) {} + +/** + * SSD model util constructor. + * @param srcImgWidth The width of the original input image. + * @param srcImgHeight The height of the original input image. + */ +Detection::Detection(int srcImgWidth, int srcImgHeight) { + inputImageWidth = srcImgWidth; + inputImageHeight = srcImgHeight; + + getDefaultBoxes(); // To fill the vectordefaultboxes. +} + +std::string Detection::getDecodeResult(float *branchScores, float *branchBoxData) { + + LOGE("mindsporeTag Native getDecodeResult>>>>>>>>>>>>>>>>>>>>>>>>>>>>"); + + std::string result = ""; + NormalBox tmpBox[1917] = {0}; + float mScores[1917][sizeof(g_thres_map) / sizeof(int)] = {0}; + + float outBuff[1917][7] = {0}; + + float scoreWithOneClass[1917] = {0}; + int outBoxNum = 0; + YXBoxes decodedBoxes[1917] = {0}; + LOGE("mindsporeTag Native getDecodeResult>>>>>111111111111"); + + // Copy branch outputs box data to tmpBox. + for (int i = 0; i < 1917; ++i) { + tmpBox[i].y = branchBoxData[i * 4 + 0]; + tmpBox[i].x = branchBoxData[i * 4 + 1]; + tmpBox[i].h = branchBoxData[i * 4 + 2]; + tmpBox[i].w = branchBoxData[i * 4 + 3]; + } + + // Copy branch outputs score to mScores. + for (int i = 0; i < 1917; ++i) { + for (int j = 0; j < sizeof(g_thres_map) / sizeof(int); ++j) { + mScores[i][j] = branchScores[i * sizeof(g_thres_map) / sizeof(int) + j]; + } + } + // NMS processing. + ssd_boxes_decode(tmpBox, decodedBoxes, 0.1, 0.2, 1917); + const float nms_threshold = 0.3; + for (int i = 1; i < sizeof(g_thres_map) / sizeof(int); i++) { + std::vector in_indexes; + for (int j = 0; j < 1917; j++) { + scoreWithOneClass[j] = mScores[j][i]; + if (mScores[j][i] > g_thres_map[i]) { + in_indexes.push_back(j); + } + } + LOGE("mindsporeTag in_indexes.size() %{public}d", in_indexes.size()); + + if (in_indexes.size() == 0) { + continue; + } + + sort(in_indexes.begin(), in_indexes.end(), + [&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; }); + std::vector out_indexes; + + nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, &out_indexes, + nms_threshold); + LOGE("mindsporeTag out_indexes.size() %{public}d", out_indexes.size()); + + for (int k = 0; k < out_indexes.size(); k++) { + // image id + outBuff[outBoxNum][0] = out_indexes[k]; + // labelid + outBuff[outBoxNum][1] = i; + // scores + outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; + outBuff[outBoxNum][3] = + decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300; + outBuff[outBoxNum][4] = + decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300; + outBuff[outBoxNum][5] = + decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300; + outBuff[outBoxNum][6] = + decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300; + outBoxNum++; + } + } + LOGE("mindsporeTag Native outBoxNum %{public}d", outBoxNum); + + for (int i = 0; i < outBoxNum; ++i) { + std::string tmpid_str = std::to_string(outBuff[i][0]); + result += tmpid_str; + result += "_"; + LOGD("mindsporeTag Native label_classes i %{public}d, outBuff %{public}d", i, (int)outBuff[i][1]); + tmpid_str = std::to_string(outBuff[i][1]); + // label id + result += tmpid_str; + result += "_"; + tmpid_str = std::to_string(outBuff[i][2]); + // scores + result += tmpid_str; + result += "_"; + tmpid_str = std::to_string(outBuff[i][3]); + // xmin + result += tmpid_str; + result += "_"; + tmpid_str = std::to_string(outBuff[i][4]); + // ymin + result += tmpid_str; + result += "_"; + tmpid_str = std::to_string(outBuff[i][5]); + // xmax + result += tmpid_str; + result += "_"; + tmpid_str = std::to_string(outBuff[i][6]); + // ymax + result += tmpid_str; + result += ";"; + } + + return result; +} + +void Detection::getDefaultBoxes() { + float fk[6] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + std::vector all_sizes; + struct Product mProductData[19 * 19] = {0}; + + for (int i = 0; i < 6; i++) { + fk[i] = config.model_input_height / config.steps[i]; + } + float scale_rate = + (config.max_scale - config.min_scale) / (sizeof(config.num_default) / sizeof(int) - 1); + float scales[7] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0}; + for (int i = 0; i < sizeof(config.num_default) / sizeof(int); i++) { + scales[i] = config.min_scale + scale_rate * i; + } + + for (int idex = 0; idex < sizeof(config.feature_size) / sizeof(int); idex++) { + float sk1 = scales[idex]; + float sk2 = scales[idex + 1]; + float sk3 = sqrt(sk1 * sk2); + struct WHBox tempWHBox; + + all_sizes.clear(); + + // idex == 0时, len(all_sizes) = 3. + if (idex == 0) { + float w = sk1 * sqrt(2); + float h = sk1 / sqrt(2); + + // all_sizes = [(0.1, 0.1), (w, h), (h, w)] + tempWHBox.boxw = 0.1; + tempWHBox.boxh = 0.1; + all_sizes.push_back(tempWHBox); + + tempWHBox.boxw = w; + tempWHBox.boxh = h; + all_sizes.push_back(tempWHBox); + + tempWHBox.boxw = h; + tempWHBox.boxh = w; + all_sizes.push_back(tempWHBox); + } else { + // len(all_sizes) = 6. + tempWHBox.boxw = sk1; + tempWHBox.boxh = sk1; + all_sizes.push_back(tempWHBox); + + for (int j = 0; j < sizeof(config.aspect_ratios[idex]) / sizeof(int); j++) { + float w = sk1 * sqrt(config.aspect_ratios[idex][j]); + float h = sk1 / sqrt(config.aspect_ratios[idex][j]); + + tempWHBox.boxw = w; + tempWHBox.boxh = h; + all_sizes.push_back(tempWHBox); + + tempWHBox.boxw = h; + tempWHBox.boxh = w; + all_sizes.push_back(tempWHBox); + } + + tempWHBox.boxw = sk3; + tempWHBox.boxh = sk3; + all_sizes.push_back(tempWHBox); + } + + for (int i = 0; i < config.feature_size[idex]; i++) { + for (int j = 0; j < config.feature_size[idex]; j++) { + mProductData[i * config.feature_size[idex] + j].x = i; + mProductData[i * config.feature_size[idex] + j].y = j; + } + } + + int productLen = config.feature_size[idex] * config.feature_size[idex]; + + for (int i = 0; i < productLen; i++) { + for (int j = 0; j < all_sizes.size(); j++) { + struct NormalBox tempBox; + + float cx = (mProductData[i].y + 0.5) / fk[idex]; + float cy = (mProductData[i].x + 0.5) / fk[idex]; + + tempBox.y = cy; + tempBox.x = cx; + tempBox.h = all_sizes[j].boxh; + tempBox.w = all_sizes[j].boxw; + + mDefaultBoxes.push_back(tempBox); + } + } + } +} + +void Detection::ssd_boxes_decode(const NormalBox *boxes, + YXBoxes *const decoded_boxes, const float scale0, + const float scale1, const int count) { + LOGE("mindsporeTag Native ssd_boxes_decode*******************"); + + if (mDefaultBoxes.size() == 0) { + LOGE("mindsporeTag Native get default boxes error."); + return; + } + + for (int i = 0; i < count; ++i) { + float cy = boxes[i].y * scale0 * mDefaultBoxes[i].h + mDefaultBoxes[i].y; + float cx = boxes[i].x * scale0 * mDefaultBoxes[i].w + mDefaultBoxes[i].x; + float h = exp(boxes[i].h * scale1) * mDefaultBoxes[i].h; + float w = exp(boxes[i].w * scale1) * mDefaultBoxes[i].w; + decoded_boxes[i].ymin = + std::min(1.0f, std::max(0.0f, cy - h / 2)) * config.model_input_height; + decoded_boxes[i].xmin = + std::min(1.0f, std::max(0.0f, cx - w / 2)) * config.model_input_width; + decoded_boxes[i].ymax = + std::min(1.0f, std::max(0.0f, cy + h / 2)) * config.model_input_height; + decoded_boxes[i].xmax = + std::min(1.0f, std::max(0.0f, cx + w / 2)) * config.model_input_width; + } +} + +void Detection::nonMaximumSuppression(const YXBoxes *const decoded_boxes, + const float *const scores, + const std::vector &in_indexes, + std::vector *out_indexes_p, const float nmsThreshold, + const int count, const int max_results) { + LOGE("mindsporeTag Native nonMaximumSuppression@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + int nR = 0; + std::vector &out_indexes = *out_indexes_p; + std::vector del(count, false); + for (size_t i = 0; i < in_indexes.size(); i++) { + if (!del[in_indexes[i]]) { + out_indexes.push_back(in_indexes[i]); + if (++nR == max_results) { + break; + } + for (size_t j = i + 1; j < in_indexes.size(); j++) { + const auto boxi = decoded_boxes[in_indexes[i]], boxj = decoded_boxes[in_indexes[j]]; + float a[4] = {boxi.xmin, boxi.ymin, boxi.xmax, boxi.ymax}; + float b[4] = {boxj.xmin, boxj.ymin, boxj.xmax, boxj.ymax}; + if (IOU(a, b) > nmsThreshold) { + del[in_indexes[j]] = true; + } + } + } + } +} + +double Detection::IOU(float r1[4], float r2[4]) { + float x1 = std::max(r1[0], r2[0]); + float y1 = std::max(r1[1], r2[1]); + float x2 = std::min(r1[2], r2[2]); + float y2 = std::min(r1[3], r2[3]); + // if max(min) > min(max), there is no intersection + if (x2 - x1 + 1 <= 0 || y2 - y1 + 1 <= 0) + return 0; + double insect_area = (x2 - x1 + 1) * (y2 - y1 + 1); + double union_area = + (r1[2] - r1[0] + 1) * (r1[3] - r1[1] + 1) + (r2[2] - r2[0] + 1) * (r2[3] - r2[1] + 1) - + insect_area; + double iou = insect_area / union_area; + return (iou > 0) ? iou : 0; +} diff --git a/application_example/android/classification/entry/src/main/cpp/Detection.h b/application_example/android/classification/entry/src/main/cpp/Detection.h new file mode 100644 index 0000000..92ec8be --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/Detection.h @@ -0,0 +1,266 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_Detection_H +#define MINDSPORE_Detection_H + +#include +#include + +#include +#include + +class Detection { + public: + // Constructor. + Detection(int srcImageWidth, int srcImgHeight); + + ~Detection(); + + /** + * Return the SSD model post-processing result. + * @param branchScores + * @param branchBoxData + * @return + */ + std::string getDecodeResult(float *branchScores, float *branchBoxData); + + struct NormalBox { + float y; + float x; + float h; + float w; + }; + + struct YXBoxes { + float ymin; + float xmin; + float ymax; + float xmax; + }; + + struct Product { + int x; + int y; + }; + + struct WHBox { + float boxw; + float boxh; + }; + + private: + std::vector mDefaultBoxes; + int inputImageHeight; + int inputImageWidth; + + void getDefaultBoxes(); + + void ssd_boxes_decode(const NormalBox *boxes, + YXBoxes *const decoded_boxes, + const float scale0 = 0.1, const float scale1 = 0.2, + const int count = 1917); + + void nonMaximumSuppression(const YXBoxes *const decoded_boxes, const float *const scores, + const std::vector &in_indexes, std::vector *out_indexes_p, + const float nmsThreshold = 0.6, + const int count = 1917, const int max_results = 100); + + double IOU(float r1[4], float r2[4]); + + // ============= variables =============. + struct network { + int model_input_height = 300; + int model_input_width = 300; + + int num_default[6] = {3, 6, 6, 6, 6, 6}; + int feature_size[6] = {19, 10, 5, 3, 2, 1}; + double min_scale = 0.2; + float max_scale = 0.95; + float steps[6] = {16, 32, 64, 100, 150, 300}; + float prior_scaling[2] = {0.1, 0.2}; + float gamma = 2.0; + float alpha = 0.75; + int aspect_ratios[6][2] = {{2, 0}, + {2, 3}, + {2, 3}, + {2, 3}, + {2, 3}, + {2, 3}}; + } config; + + float g_thres_map[81] = { + 0, + 0.635, + 0.627, + 0.589, + 0.585, + 0.648, + 0.664, + 0.655, + 0.481, + 0.529, + 0.611, + 0.641, + 0.774, + 0.549, + 0.513, + 0.652, + 0.552, + 0.590, + 0.650, + 0.575, + 0.583, + 0.650, + 0.656, + 0.696, + 0.653, + 0.438, + 0.515, + 0.459, + 0.561, + 0.545, + 0.635, + 0.540, + 0.560, + 0.721, + 0.544, + 0.548, + 0.511, + 0.611, + 0.592, + 0.542, + 0.512, + 0.635, + 0.531, + 0.437, + 0.525, + 0.445, + 0.484, + 0.546, + 0.490, + 0.581, + 0.566, + 0.516, + 0.445, + 0.541, + 0.613, + 0.560, + 0.483, + 0.509, + 0.464, + 0.543, + 0.538, + 0.490, + 0.576, + 0.617, + 0.577, + 0.595, + 0.640, + 0.585, + 0.598, + 0.592, + 0.514, + 0.397, + 0.592, + 0.504, + 0.548, + 0.642, + 0.581, + 0.497, + 0.545, + 0.154, + 0.580, + }; + + std::string object_detection[81] = { + " 背景", "人", "自行车", "汽车", "摩托车 ", "飞机 ", "公共汽车 ", + "火车 ", + "卡车 ", + "船 ", + "交通灯 ", + "消防栓 ", + "停车标志 ", + "停车计时器 ", + "长凳 ", + "鸟 ", + "猫 ", + "狗 ", + "马 ", + "绵羊 ", + "牛 ", + "大象 ", + "熊 ", + "斑马 ", + "长颈鹿 ", + "背包 ", + "雨伞 ", + "手提包 ", + "领带 ", + "手提箱 ", + "飞盘 ", + "滑雪板 ", + "雪板 ", + "运动用的球 ", + "风筝 ", + "棒球棒 ", + "棒球手套 ", + "滑板 ", + "冲浪板 ", + "网球拍 ", + "瓶子 ", + "酒杯 ", + "杯 ", + "叉子 ", + "刀 ", + "勺子 ", + "碗 ", + "香蕉 ", + "苹果 ", + "三明治 ", + "橙子 ", + "西兰花 ", + "胡萝卜 ", + "热狗 ", + "披萨 ", + "甜甜圈 ", + "蛋糕 ", + "椅子 ", + "沙发 ", + "盆栽植物 ", + "床 ", + "餐桌 ", + "厕所 ", + "电视 ", + "笔记本电脑 ", + "鼠标 ", + "遥控器 ", + "键盘 ", + "手机 ", + "微波炉 ", + "烤箱 ", + "烤面包机 ", + "水槽 ", + "冰箱 ", + "书 ", + "时钟 ", + "花瓶 ", + "剪刀 ", + "泰迪熊 ", + "吹风机 ", + "牙刷 "}; +}; +#endif diff --git a/application_example/android/classification/entry/src/main/cpp/common/plugin_common.h b/application_example/android/classification/entry/src/main/cpp/common/plugin_common.h new file mode 100644 index 0000000..6d1a5c5 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/common/plugin_common.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _PLUGIN_COMMON_H_ +#define _PLUGIN_COMMON_H_ + +#include + +#define LOGI(...) ((void)OH_LOG_Print(LOG_APP, LOG_INFO, LOG_DOMAIN, "[NativeAPI]", __VA_ARGS__)) +#define LOGD(...) ((void)OH_LOG_Print(LOG_APP, LOG_DEBUG, LOG_DOMAIN, "[NativeAPI]", __VA_ARGS__)) +#define LOGW(...) ((void)OH_LOG_Print(LOG_APP, LOG_WARN, LOG_DOMAIN, "[NativeAPI]", __VA_ARGS__)) +#define LOGE(...) ((void)OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_DOMAIN, "[NativeAPI]", __VA_ARGS__)) + +#endif // _PLUGIN_COMMON_H_ \ No newline at end of file diff --git a/application_example/android/classification/entry/src/main/cpp/hello.cpp b/application_example/android/classification/entry/src/main/cpp/hello.cpp new file mode 100644 index 0000000..9d97859 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/hello.cpp @@ -0,0 +1,525 @@ +#include "napi/native_api.h" + +#include "common/plugin_common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mindspore/context.h" +#include "mindspore/data_type.h" +#include "mindspore/format.h" +#include "mindspore/model.h" +#include "mindspore/status.h" +#include "mindspore/tensor.h" +#include "mindspore/types.h" + +#include "include/api/model.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/serialization.h" +#include "include/dataset/vision_lite.h" +#include "include/dataset/execute.h" +#include "include/dataset/transforms.h" + +#include "api/types.h" +#include "dataset/lite_cv/lite_mat.h" +#include "dataset/lite_cv/image_process.h" +#include "dataset/vision_lite.h" +#include "dataset/execute.h" + +#include "Detection.h" +#include "hello.h" + +using mindspore::Context; +using mindspore::GraphCell; +using mindspore::kSuccess; +using mindspore::Model; +using mindspore::ModelType; +using mindspore::MSTensor; +using mindspore::Serialization; +using mindspore::Status; +using mindspore::dataset::Execute; +using mindspore::dataset::LDataType; +using mindspore::dataset::LiteMat; +using mindspore::dataset::PaddBorderType; +using mindspore::dataset::TensorTransform; +using mindspore::dataset::vision::Crop; +using mindspore::dataset::vision::Decode; +using mindspore::dataset::vision::Normalize; +using mindspore::dataset::vision::Resize; + +using namespace mindspore; +using namespace std; + +static napi_value Add(napi_env env, napi_callback_info info) { + LOGE("mindsporeTag Native Add Add"); + size_t requireArgc = 2; + size_t argc = 2; + napi_value args[2] = {nullptr}; + + napi_get_cb_info(env, info, &argc, args, nullptr, nullptr); + + napi_valuetype valuetype0; + napi_typeof(env, args[0], &valuetype0); + + napi_valuetype valuetype1; + napi_typeof(env, args[1], &valuetype1); + + double value0; + napi_get_value_double(env, args[0], &value0); + + double value1; + napi_get_value_double(env, args[1], &value1); + + napi_value sum; + napi_create_double(env, value0 + value1, &sum); + LOGE("mindsporeTag Native Add sum>>>> %{public}f", sum); + + return sum; +} + +static napi_value InitClassificationModel(napi_env env, napi_callback_info info) { + LOGI("mindsporeTag Native InitClassificationModel"); + size_t requireArgc = 3; + size_t argc = 3; + napi_value args[3] = {nullptr}; + + napi_get_cb_info(env, info, &argc, args, nullptr, nullptr); + + napi_valuetype valuetype0; + napi_typeof(env, args[0], &valuetype0); + + napi_valuetype valuetype1; + napi_typeof(env, args[1], &valuetype1); + + int value1; + napi_get_value_int32(env, args[1], &value1); + + size_t ret = 0; + void *buffer_tmp = (void *)malloc(value1); + void *buffer_ptr = nullptr; + napi_get_arraybuffer_info(env, args[0], &buffer_ptr, &ret); + memcpy(buffer_tmp, buffer_ptr, value1); + LOGE("mindsporeTag Native ********** buffer_ptr>> %{public}u", buffer_ptr); + LOGE("mindsporeTag Native ********** value1>> %{public}d", value1); + + LOGI("mindsporeTag Native ret length>> %{public}u", ret); + + napi_valuetype valuetype2; + napi_typeof(env, args[2], &valuetype0); + + char *imagePath = (char *)malloc(64); + size_t typeLen = 0; + napi_get_value_string_utf8(env, args[2], imagePath, 64, &typeLen); + LOGI("mindsporeTag Native InitImage2 %{public}s", imagePath); + + ifstream ifs(imagePath); + if (!ifs.is_open() || !ifs.good()) { + LOGE("mindsporeTag Native fail to load image, check image path"); + } + + ifs.seekg(0, ios::end); + size_t size = ifs.tellg(); + LOGI("mindsporeTag Native size_t size %{public}d ", size); + + + mindspore::MSTensor *image = MSTensor::CreateTensor("file", mindspore::DataType::kNumberTypeUInt8, {static_cast(size)}, nullptr, 0); + if (image == nullptr) { + LOGE("mindsporeTag image is nullptr"); + } + ifs.seekg(0, ios::beg); + ifs.read(reinterpret_cast(image->MutableData()), size); + ifs.close(); + + shared_ptr decode = make_shared(); + shared_ptr resize = make_shared(vector{256, 256}); + shared_ptr scale = make_shared(vector{0, 0, 0}, vector{255, 255, 255}); + shared_ptr crop = make_shared(vector{16, 16}, vector{224, 224}); + shared_ptr normalize = make_shared( + vector{0.485, 0.456, 0.406}, vector{0.229, 0.224, 0.225}); + + vector> trans_list; + trans_list = {decode, resize, scale, crop, normalize}; + auto executor = Execute(trans_list); + executor(*image, image); + + // Create and init context, add CPU device info + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + if (context == NULL) { + LOGE("mindsporeTag Native_ContextCreate failed."); + } else { + LOGI("mindsporeTag Native_ContextCreate SUCCESS."); + } + const int thread_num = 2; + OH_AI_ContextSetThreadNum(context, thread_num); + OH_AI_ContextSetThreadAffinityMode(context, 1); + + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + if (cpu_device_info == NULL) { + LOGE("mindsporeTag Native_DeviceInfoCreate failed."); + OH_AI_ContextDestroy(&context); + } else { + LOGI("mindsporeTag Native_DeviceInfoCreate SUCCESS."); + } + OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + + // Create model + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + if (model == NULL) { + LOGE("mindsporeTag Native_ModelCreate failed."); + OH_AI_ContextDestroy(&context); + } else { + LOGI("mindsporeTag Native_ModelCreate SUCCESS."); + } + // Build model + int ret2 = OH_AI_ModelBuild(model, buffer_tmp, ret, OH_AI_MODELTYPE_MINDIR, context); + if (ret2 != OH_AI_STATUS_SUCCESS) { + LOGE("mindsporeTag Native_ModelBuild failed, ret: %{public}u", ret2); + OH_AI_ModelDestroy(&model); + } else { + LOGI("mindsporeTag Native_ModelBuild SUCCESS, ret: %{public}u", ret2); + } + // Get Inputs + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + if (inputs.handle_list == NULL) { + LOGE("mindsporeTag Native_ModelGetInputs failed, inputs.handle_num: %{public}d", inputs.handle_num); + OH_AI_ModelDestroy(&model); + } else { + LOGI("mindsporeTag Native_ModelGetInputs SUCCESS, inputs.handle_num: %{public}d", inputs.handle_num); + } + + OH_AI_TensorSetData(inputs.handle_list[0], image->MutableData()); + OH_AI_TensorSetDataType(inputs.handle_list[0], OH_AI_DATATYPE_NUMBERTYPE_FLOAT32); + OH_AI_TensorSetName(inputs.handle_list[0], image->Name().c_str()); + OH_AI_TensorSetFormat(inputs.handle_list[0], OH_AI_FORMAT_NCHW); + + // Get Outputs + OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model); + if (outputs.handle_list == NULL) { + LOGE("mindsporeTag Native_ModelGetOutputs failed, ret: %{public}u", outputs.handle_num); + OH_AI_ModelDestroy(&model); + } else { + LOGI("mindsporeTag Native_ModelGetOutputs SUCCESS."); + } + + // Model Predict + OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, NULL, NULL); + if (predict_ret != OH_AI_STATUS_SUCCESS) { + OH_AI_ModelDestroy(&model); + LOGE("mindsporeTag Native Predict model error "); + } else { + LOGI("mindsporeTag Native Predict model SUCCESS."); + } + + // 获取模型的输出张量,并打印 + OH_AI_TensorHandle tensor = outputs.handle_list[0]; + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + LOGE("mindsporeTag Native Tensor name: %{public}s, tensor size is %{public}u ,elements num: %{public}d", + OH_AI_TensorGetName(tensor), OH_AI_TensorGetDataSize(tensor), element_num); + const float *data = (const float *)OH_AI_TensorGetData(tensor); + + float scores[hello::RET_CATEGORY_SUM]; + for (int i = 0; i < hello::RET_CATEGORY_SUM; ++i) { + scores[i] = data[i]; + } + + const float unifiedThre = 0.5; + const float probMax = 1.0; + + map my_map; + for (int i = 0; i < hello::RET_CATEGORY_SUM; ++i) { + float threshold = hello::g_thres_map[i]; + float tmpProb = scores[i]; + if (tmpProb < threshold) { + tmpProb = tmpProb / threshold * unifiedThre; + } else { + tmpProb = (tmpProb - threshold) / (probMax - threshold) * unifiedThre + unifiedThre; + } + scores[i] = tmpProb; + my_map.insert(pair(hello::category[i], scores[i])); + } + + // Score for each category. + // Converted to text information that needs to be displayed in the APP. + string categoryScore = ""; + for (int i = 0; i < hello::RET_CATEGORY_SUM; ++i) { + categoryScore += to_string(i); + categoryScore += ":"; + string score_str = to_string(scores[i]); + categoryScore += score_str; + categoryScore += ";"; + } + LOGI("mindsporeTag Native categoryScore:%{public}s", categoryScore.c_str()); + + // vector> vtMap; + // for (auto it = my_map.begin(); it != my_map.end(); it++) { + // vtMap.push_back(make_pair(it->first, it->second)); + // } + // sort(vtMap.begin(), vtMap.end(), + // [](const pair &x, const pair &y) -> float { + // return y.second < x.second; + // }); + // + // napi_value obj; + // for (auto n : vtMap) { + // napi_value info; + // + // const char *key = n.first.c_str(); + // napi_value keyApi; + // napi_create_string_utf8(env, key, n.first.length(), &keyApi); + // + // napi_value valueApi; + // napi_create_double(env, n.second, &valueApi); + // + // const char *name = "name"; + // napi_value nameApi; + // napi_create_string_utf8(env, name, strlen(name), &nameApi); + // napi_set_property(env, info, nameApi, keyApi); + // + // const char *score = "score"; + // napi_value scoreApi; + // napi_create_string_utf8(env, score, strlen(score), &scoreApi); + // napi_set_property(env, info, scoreApi, valueApi); + // + // napi_set_property(env, obj, keyApi, valueApi); + // } + + // free(buffer_tmp); + + const char *resultCharData = categoryScore.c_str(); + napi_value result; + napi_create_string_utf8(env, resultCharData, categoryScore.length(), &result); + + OH_AI_ModelDestroy(&model); + + return result; +} + +static napi_value InitDetectionModel(napi_env env, napi_callback_info info) { + LOGI("mindsporeTag Native InitDetectionModel"); + size_t requireArgc = 3; + size_t argc = 3; + napi_value args[3] = {nullptr}; + + napi_get_cb_info(env, info, &argc, args, nullptr, nullptr); + + napi_valuetype valuetype0; + napi_typeof(env, args[0], &valuetype0); + + napi_valuetype valuetype1; + napi_typeof(env, args[1], &valuetype1); + + int value1; + napi_get_value_int32(env, args[1], &value1); + + size_t ret = 0; + void *buffer_tmp = (void *)malloc(value1); + napi_get_arraybuffer_info(env, args[0], &buffer_tmp, &ret); + + LOGI("mindsporeTag Native ret length>> %{public}u", ret); + + napi_valuetype valuetype2; + napi_typeof(env, args[2], &valuetype0); + + char *imagePath = (char *)malloc(64); + size_t typeLen = 0; + napi_get_value_string_utf8(env, args[2], imagePath, 64, &typeLen); + + ifstream ifs(imagePath); + if (!ifs.is_open() || !ifs.good()) { + LOGE("mindsporeTag Native fail to load image, check image path"); + } + + LOGI("mindsporeTag Native ifstream succeed "); + + ifs.seekg(0, ios::end); + size_t size = ifs.tellg(); + LOGI("mindsporeTag Native size_t size %{public}d ", size); + + mindspore::MSTensor image("file", mindspore::DataType::kNumberTypeUInt8, {static_cast(size)}, nullptr, 0); + + if (image == nullptr) { + LOGE("mindsporeTag image is nullptr"); + } + + ifs.seekg(0, ios::beg); + ifs.read(reinterpret_cast(image.MutableData()), size); + ifs.close(); + + auto decode = Decode(); + auto executor = Execute(decode); + executor(image, &image); + + int imgWidth = image.Shape()[0]; + int imgHeght = image.Shape()[1]; + + uint8_t *temp_scores = static_cast(image.MutableData()); + LOGI("mindsporeTag Native image.size() :%{public}d image[0]>>>%{public}d ,1 >>>%{public}d ,2 >>>%{public}d ,3 >>>%{public}d ,4 >>>%{public}d", + image.DataSize(), temp_scores[0], temp_scores[1], temp_scores[2], temp_scores[3], temp_scores[4]); + + shared_ptr resize = make_shared(vector{300, 300}); + shared_ptr scale = make_shared(vector{0, 0, 0}, + vector{255, 255, 255}); + shared_ptr normalize = make_shared( + vector{0.485 * 0.229, 0.456 * 0.224, 0.406 * 0.225}, vector{0.229, 0.224, 0.225}); + + uint8_t *temp_scores2 = static_cast(image.MutableData()); + + mindspore::dataset::Execute ss2({resize, scale, normalize}); + auto ret4 = ss2(image, &image); + float *temp_scores4 = static_cast(image.MutableData()); + LOGI("mindsporeTag Native >>>>>>>>> image.size() :%{public}d image[0]>>>%{public}f ,1 >>>%{public}f ,2 >>>%{public}f ,3 >>>%{public}f ,4 >>>%{public}f", + image.DataSize(), temp_scores4[0], temp_scores4[1], temp_scores4[2], temp_scores4[3], temp_scores4[4]); + + // Create and init context, add CPU device info + OH_AI_ContextHandle context = OH_AI_ContextCreate(); + if (context == NULL) { + LOGE("mindsporeTag Native_ContextCreate failed."); + } else { + LOGI("mindsporeTag Native_ContextCreate SUCCESS."); + } + const int thread_num = 2; + OH_AI_ContextSetThreadNum(context, thread_num); + OH_AI_ContextSetThreadAffinityMode(context, 1); + + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + if (cpu_device_info == NULL) { + LOGE("mindsporeTag Native_DeviceInfoCreate failed."); + OH_AI_ContextDestroy(&context); + } else { + LOGI("mindsporeTag Native_DeviceInfoCreate SUCCESS."); + } + OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false); + OH_AI_ContextAddDeviceInfo(context, cpu_device_info); + + // Create model + OH_AI_ModelHandle model = OH_AI_ModelCreate(); + if (model == NULL) { + LOGE("mindsporeTag Native_ModelCreate failed."); + OH_AI_ContextDestroy(&context); + } else { + LOGI("mindsporeTag Native_ModelCreate SUCCESS."); + } + // Build model + int ret2 = OH_AI_ModelBuild(model, buffer_tmp, ret, OH_AI_MODELTYPE_MINDIR, context); + if (ret2 != OH_AI_STATUS_SUCCESS) { + LOGE("mindsporeTag Native_ModelBuild failed, ret: %{public}u", ret2); + OH_AI_ModelDestroy(&model); + } else { + LOGE("mindsporeTag Native_ModelBuild SUCCESS, ret: %{public}u", ret2); + } + // Get Inputs + OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model); + if (inputs.handle_list == NULL) { + LOGE("mindsporeTag Native_ModelGetInputs failed, inputs.handle_num: %{public}d", inputs.handle_num); + OH_AI_ModelDestroy(&model); + } else { + LOGI("mindsporeTag Native_ModelGetInputs SUCCESS, inputs.handle_num: %{public}d", inputs.handle_num); + } + + OH_AI_TensorSetData(inputs.handle_list[0], image.MutableData()); + OH_AI_TensorSetDataType(inputs.handle_list[0], OH_AI_DATATYPE_NUMBERTYPE_FLOAT32); + OH_AI_TensorSetName(inputs.handle_list[0], image.Name().c_str()); + OH_AI_TensorSetFormat(inputs.handle_list[0], OH_AI_FORMAT_NCHW); + + // Get Outputs + OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model); + if (outputs.handle_list == NULL) { + LOGE("mindsporeTag Native_ModelGetOutputs failed, ret: %{public}u", outputs.handle_num); + OH_AI_ModelDestroy(&model); + } else { + LOGI("mindsporeTag Native_ModelGetOutputs SUCCESS."); + } + + // Model Predict + OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, NULL, NULL); + if (predict_ret != OH_AI_STATUS_SUCCESS) { + OH_AI_ModelDestroy(&model); + LOGE("mindsporeTag Native Predict model error "); + } else { + LOGI("mindsporeTag Native Predict model SUCCESS."); + } + + unordered_map msOutputs; + for (size_t i = 0; i < outputs.handle_num; ++i) { + + OH_AI_TensorHandle tensor = outputs.handle_list[i]; + int64_t element_num = OH_AI_TensorGetElementNum(tensor); + const float *data = (const float *)OH_AI_TensorGetData(tensor); + string name = OH_AI_TensorGetName(tensor); + float *ptr = static_cast(OH_AI_TensorGetMutableData(tensor)); + msOutputs.insert(pair{name, tensor}); + } + + unordered_map::iterator iter; + iter = msOutputs.begin(); + auto branch2_string = iter->first; + auto branch2_tensor = iter->second; + + ++iter; + auto branch1_string = iter->first; + auto branch1_tensor = iter->second; + LOGI("mindsporeTag Native ProcessRunnetResult %{public}s ,ProcessRunnetResult >>>%{public}s", branch1_string.c_str(), branch2_string.c_str()); + + float *tmpscores2 = reinterpret_cast(OH_AI_TensorGetMutableData(branch1_tensor)); + float *tmpdata = reinterpret_cast(OH_AI_TensorGetMutableData(branch2_tensor)); + LOGI("mindsporeTag Native Result %{public}f ,Result >>>%{public}f", *tmpscores2, *tmpdata); + + // Using ssd model util to process model branch outputs. + Detection Detection(500, 500); + LOGI("mindsporeTag Native Result %{public}f ,Result >>>%{public}f", *tmpscores2, *tmpdata); + + string retStr = Detection.getDecodeResult(tmpdata, tmpscores2); + LOGI("mindsporeTag Native ProcessRunnetResult retStr %{public}s", retStr.c_str()); + + free(buffer_tmp); + const char *resultCharData = retStr.c_str(); + napi_value result; + napi_create_string_utf8(env, resultCharData, retStr.length(), &result); + return result; +} + +EXTERN_C_START +static napi_value Init(napi_env env, napi_value exports) { + napi_property_descriptor desc[] = { + {"add", nullptr, Add, nullptr, nullptr, nullptr, napi_default, nullptr}, + {"initClassificationModel", nullptr, InitClassificationModel, nullptr, nullptr, nullptr, napi_default, nullptr}, + {"initDetectionModel", nullptr, InitDetectionModel, nullptr, nullptr, nullptr, napi_default, nullptr}}; + napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc); + return exports; +} +EXTERN_C_END + +static napi_module demoModule = { + .nm_version = 1, + .nm_flags = 0, + .nm_filename = nullptr, + .nm_register_func = Init, + .nm_modname = "hello", + .nm_priv = ((void *)0), + .reserved = {0}, +}; + +extern "C" __attribute__((constructor)) void RegisterHelloModule(void) { + napi_module_register(&demoModule); +} diff --git a/application_example/android/classification/entry/src/main/cpp/hello.h b/application_example/android/classification/entry/src/main/cpp/hello.h new file mode 100644 index 0000000..b5b1024 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/hello.h @@ -0,0 +1,841 @@ +// +// Created on 2022/10/24. +// +// Node APIs are not fully supported. To solve the compilation error of the interface cannot be found, +// please include "napi/native_api.h". +#include +#include + +#include + +#ifndef classification_hello_H +#define classification_hello_H + +using namespace std; + +class hello { + public: + static const int RET_CATEGORY_SUM = 410; + static constexpr const char *category[RET_CATEGORY_SUM]{"放牧", "苹果浏览器", "手镯", + "坐垫", + "台面", + "舞会", + "科", + "体育", + "天空", + "社区", + "轮", + "可乐", + "男士礼服", + "花盆", + "球队", + "电脑", + "独轮车", + "双桅船", + "航空航天工程", + "水肺潜水", + "风镜", + "水果", + "羽毛球", + "马", + "墨镜", + "好玩", + "草原", + "海报", + "旗", + "快艇", + "睫毛", + "面纱", + "移动电话", + "独轮车", + "碟", + "皮革", + "抽屉", + "纸", + "码头", + "水禽", + "紧身衣", + "人力车", + "蔬菜", + "扶手", + "冰", + "金属", + "花", + "翅膀", + "银器", + "事件", + "天际线", + "钱", + "漫画", + "手提包", + "瓷", + "竞技者", + "窗帘", + "瓦", + "人类的嘴巴", + "团队", + "菜单", + "船", + "单板滑雪", + "凯恩狗", + "网", + "粘贴", + "杯子", + "橄榄球", + " 粉", + "帽", + "人的头发", + "冲浪", + "双人沙发", + "博物馆", + "沉船", + "树", + "长毛绒", + "单色", + "火山", + "岩石", + "枕头", + "介绍", + "星云", + "低音炮", + "湖", + "雪橇", + "刘海", + "桌布", + "项链", + "游泳衣", + "站立", + "牛仔裤", + "狂欢", + "垒球", + "中心", + "滑板手", + "蛋糕", + "龙", + "极光", + "滑雪", + "浴室", + "狗", + "针线活", + "雨伞", + "教会", + "火", + "钢琴", + "牛仔布", + "辔", + "内阁", + "口红", + "环", + "电视", + "滚筒", + "密封", + "音乐会", + "产品", + "新闻", + "快餐", + "角(动物)", + "纹身", + "鸟", + "新郎", + "爱", + "头盔", + "恐龙", + "刨冰", + "微型", + "胎", + "玩具", + "冰柱", + "夹克", + "咖啡", + "清真寺", + "划船", + "潜水衣", + "露营", + "水下", + "圣诞", + "格拉托", + "白板", + "领域", + "布偶猫", + "施工", + "灯罩", + "宫", + "膳食", + "厂", + "笼", + "快船(船)", + "体操", + "龟", + "人脚", + "婚姻", + "网页", + "人胡须", + "多雾路段", + "羊毛", + "卡布奇诺咖啡", + "灯塔", + "乐高", + "火花", + "莎丽", + "模型", + "寺庙", + "豆豆", + "建造", + "瀑布", + "企鹅", + "洞穴", + "体育场", + "微笑", + "人的手", + "公园", + "台", + "设得兰群岛牧羊犬", + "酒吧", + "吃", + "氖", + "斑点狗", + "鳄鱼", + "滑水", + "长板", + "路", + "种族", + "厨房", + "里程表", + "悬崖", + "小说", + "学校", + "相互作用", + "斗牛", + "义和团", + "袍", + "水族馆", + "超级英雄", + "馅饼", + "沥青", + "冲浪板", + "芝士汉堡", + "屏幕截图", + "晚餐", + "笑", + "午餐", + "派对", + "冰川", + "长凳", + "祖父母", + "水槽", + "雀鲷科", + "西装外套", + "砖", + "空间", + "背包", + "毛绒玩具", + "寿司", + "闪光", + "篝火", + "城堡", + "马拉松", + "比萨", + "海滩", + "人耳", + "赛车", + "坐着", + "冰山", + "架", + "车辆", + "流行音乐", + "操场", + "小丑", + "汽车", + "收紧", + "毛皮", + "音乐家", + "赌场", + "宝宝", + "醇", + "背带", + "礁", + "气球", + "外套", + "大教堂", + "竞争", + "小丑", + "黑板", + "双层床", + "熊", + "月亮", + "射箭", + "马球", + "河", + "钓鱼", + "摩天轮", + "砂浆板", + "手镯", + "肉", + "雕像", + "农场", + "沙漠", + "链", + "飞机", + "纺织品", + "热狗", + "针织", + "歌手", + "果汁", + "马戏团", + "椅子", + "乐器", + "房间", + "钩针", + "帆船", + "报纸", + "圣诞老人", + "沼泽", + "摩天大楼", + "皮肤", + "火箭", + "航空", + "客机", + "花园", + "废墟", + "风暴", + "眼镜", + "平衡", + "指甲(身体部位)", + "彩虹", + "泥", + "假期", + "胡子", + "桌巾", + "餐饮", + "新娘", + "牛", + "口袋", + "基础设施", + "培养", + "沙鼠", + "烟花", + "宠物", + "坝", + "船员", + "长椅", + "洗澡", + "退出", + "摩托车", + "蝴蝶", + "雪橇", + "水彩颜料", + "漂流", + "纪念碑", + "闪电", + "日落", + "保险杠", + "鞋子", + "滑水", + "球鞋", + "塔", + "昆虫", + "游泳池", + "餐垫", + "飞机", + "植物", + "丛林", + "扶手", + "鸭", + "连衣裙", + "餐具", + "花瓣", + "总线", + "光明节", + "森林", + "帽子", + "谷仓", + "管道", + "浮潜", + "凉", + "炊具和烤盘", + "循环", + "秋千(座位)", + "肌肉", + "猫", + "滑板", + "星", + "脚趾", + "垃圾", + "自行车", + "卧室", + "人", + "砂", + "峡谷", + "领带", + "枝条", + "加拿大无毛猫", + "超级反派", + "夜店", + "牧场", + "模式", + "短裤", + "喜马拉雅山", + "壁", + "绑腿", + "风帆冲浪", + "DJ", + "舞蹈", + "厢式货车", + "便当", + "睡觉", + "葡萄酒", + "野餐", + "休闲", + "沙丘", + "人群", + "皮艇", + "舞厅", + "自拍", + "毕业", + "护卫舰", + "山", + "男人", + "挡风玻璃", + "小艇", + "类", + "围巾", + "公牛", + "足球", + "袋", + "巴吉度猎犬", + "拖拉机", + "游泳的", + "正在运行", + "跟踪", + "直升机", + "沥青", + "时钟", + "歌曲", + "泽西岛", + "楼梯", + "襟翼", + "首饰", + "桥", + "美食", + "面包", + "探洞", + "贝壳", + "花圈", + "屋顶", + "曲奇饼", + "独木舟"}; + static constexpr const float g_thres_map[RET_CATEGORY_SUM] = { + 0.23, + 0.03, + 0.10, + 0.13, + 0.03, + 0.10, + 0.06, + 0.09, + 0.09, + 0.05, + 0.01, + 0.04, + 0.01, + 0.27, + 0.05, + 0.16, + 0.01, + 0.16, + 0.04, + 0.13, + 0.09, + 0.18, + 0.10, + 0.65, + 0.08, + 0.04, + 0.08, + 0.01, + 0.05, + 0.20, + 0.01, + 0.16, + 0.10, + 0.10, + 0.10, + 0.02, + 0.24, + 0.08, + 0.10, + 0.53, + 0.07, + 0.05, + 0.07, + 0.27, + 0.02, + 0.01, + 0.71, + 0.01, + 0.06, + 0.06, + 0.03, + 0.96, + 0.03, + 0.94, + 0.05, + 0.03, + 0.14, + 0.09, + 0.03, + 0.11, + 0.50, + 0.16, + 0.07, + 0.07, + 0.06, + 0.07, + 0.08, + 0.10, + 0.29, + 0.03, + 0.05, + 0.11, + 0.03, + 0.03, + 0.03, + 0.01, + 0.11, + 0.07, + 0.03, + 0.49, + 0.12, + 0.30, + 0.10, + 0.15, + 0.02, + 0.06, + 0.17, + 0.01, + 0.04, + 0.07, + 0.06, + 0.02, + 0.19, + 0.20, + 0.14, + 0.35, + 0.15, + 0.01, + 0.10, + 0.13, + 0.43, + 0.11, + 0.12, + 0.32, + 0.01, + 0.22, + 0.51, + 0.02, + 0.04, + 0.14, + 0.04, + 0.35, + 0.35, + 0.01, + 0.54, + 0.04, + 0.02, + 0.03, + 0.02, + 0.38, + 0.13, + 0.19, + 0.06, + 0.01, + 0.02, + 0.06, + 0.03, + 0.04, + 0.01, + 0.10, + 0.01, + 0.07, + 0.07, + 0.07, + 0.33, + 0.08, + 0.04, + 0.06, + 0.07, + 0.07, + 0.11, + 0.02, + 0.32, + 0.48, + 0.14, + 0.01, + 0.01, + 0.04, + 0.05, + 0.04, + 0.16, + 0.50, + 0.11, + 0.03, + 0.04, + 0.02, + 0.55, + 0.17, + 0.13, + 0.84, + 0.18, + 0.03, + 0.16, + 0.02, + 0.06, + 0.03, + 0.11, + 0.96, + 0.36, + 0.68, + 0.02, + 0.08, + 0.02, + 0.01, + 0.03, + 0.05, + 0.14, + 0.09, + 0.06, + 0.03, + 0.20, + 0.15, + 0.62, + 0.03, + 0.10, + 0.08, + 0.02, + 0.02, + 0.06, + 0.03, + 0.04, + 0.01, + 0.10, + 0.05, + 0.04, + 0.02, + 0.07, + 0.03, + 0.32, + 0.11, + 0.03, + 0.02, + 0.03, + 0.01, + 0.03, + 0.03, + 0.25, + 0.20, + 0.19, + 0.03, + 0.11, + 0.03, + 0.02, + 0.03, + 0.15, + 0.14, + 0.06, + 0.11, + 0.03, + 0.02, + 0.02, + 0.52, + 0.03, + 0.02, + 0.02, + 0.02, + 0.09, + 0.56, + 0.01, + 0.22, + 0.01, + 0.48, + 0.14, + 0.10, + 0.08, + 0.73, + 0.39, + 0.09, + 0.10, + 0.85, + 0.31, + 0.03, + 0.05, + 0.01, + 0.01, + 0.01, + 0.10, + 0.28, + 0.02, + 0.03, + 0.04, + 0.03, + 0.07, + 0.14, + 0.20, + 0.10, + 0.01, + 0.05, + 0.37, + 0.12, + 0.04, + 0.44, + 0.04, + 0.26, + 0.08, + 0.07, + 0.27, + 0.10, + 0.03, + 0.01, + 0.03, + 0.16, + 0.41, + 0.16, + 0.34, + 0.04, + 0.30, + 0.04, + 0.05, + 0.18, + 0.33, + 0.03, + 0.21, + 0.03, + 0.04, + 0.22, + 0.01, + 0.04, + 0.02, + 0.01, + 0.06, + 0.02, + 0.08, + 0.87, + 0.11, + 0.15, + 0.05, + 0.14, + 0.09, + 0.08, + 0.22, + 0.09, + 0.07, + 0.06, + 0.06, + 0.05, + 0.43, + 0.70, + 0.03, + 0.07, + 0.06, + 0.07, + 0.14, + 0.04, + 0.01, + 0.03, + 0.05, + 0.65, + 0.06, + 0.04, + 0.23, + 0.06, + 0.75, + 0.10, + 0.01, + 0.63, + 0.41, + 0.09, + 0.01, + 0.01, + 0.18, + 0.10, + 0.03, + 0.01, + 0.05, + 0.13, + 0.18, + 0.03, + 0.23, + 0.01, + 0.04, + 0.03, + 0.38, + 0.90, + 0.21, + 0.18, + 0.10, + 0.48, + 0.08, + 0.46, + 0.03, + 0.01, + 0.02, + 0.03, + 0.10, + 0.01, + 0.09, + 0.01, + 0.01, + 0.01, + 0.10, + 0.41, + 0.01, + 0.06, + 0.75, + 0.08, + 0.01, + 0.01, + 0.08, + 0.21, + 0.06, + 0.02, + 0.05, + 0.02, + 0.05, + 0.09, + 0.12, + 0.03, + 0.06, + 0.11, + 0.03, + 0.01, + 0.01, + 0.06, + 0.84, + 0.04, + 0.81, + 0.39, + 0.02, + 0.29, + 0.77, + 0.07, + 0.06, + 0.22, + 0.23, + 0.23, + 0.01, + 0.02, + 0.13, + 0.04, + 0.19, + 0.04, + 0.08, + 0.27, + 0.09, + 0.06, + 0.01, + 0.03, + 0.21, + 0.04, + }; +}; + +#endif //classification_hello_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/allocator.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/allocator.h new file mode 100644 index 0000000..9c8a16a --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/allocator.h @@ -0,0 +1,97 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_API_ALLOCATOR_H +#define MINDSPORE_INCLUDE_API_ALLOCATOR_H + +#include +#include "include/api/types.h" + +namespace mindspore { +/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. +class MS_API Allocator { + public: + /// \brief Destructor of MindSpore Allocator. + virtual ~Allocator() = default; + + /// \brief Method to request memory. + /// + /// \param[in] size Define the memory size to request. + virtual void *Malloc(size_t size) = 0; + + /// \brief Method to request memory. + /// + /// \param[in] weight Defines the width of memory to request + /// \param[in] height Defines the height of memory to request + /// \param[in] type Defines the data type of memory to request + virtual void *Malloc(size_t weight, size_t height, DataType type) { + return nullptr; + } + + /// \brief Method to free memory. + /// + /// \param[in] ptr Define the pointer of a certain memory. + virtual void Free(void *ptr) = 0; + + /// \brief Reference count of a certain memory. + /// + /// \param[in] ptr Define the pointer of a certain memory. + /// + /// \return Reference count of a certain memory currently. + virtual int RefCount(void *ptr) = 0; + + /// \brief Set reference count of a certain memory. + /// + /// \param[in] ptr Define the pointer of a certain memory. + /// \param[in] ref_count Define the reference count to set. + /// + /// \return Reference count of a certain memory after setting. + virtual int SetRefCount(void *ptr, int ref_count) = 0; + + /// \brief Decrease the reference count of a certain memory. + /// + /// \param[in] ptr Define the pointer of a certain memory. + /// \param[in] ref_count Define the reference count to reduce. + /// + /// \return Reference count of a certain memory after decreating. + virtual int DecRefCount(void *ptr, int ref_count) = 0; + + /// \brief Increase the reference count of a certain memory. + /// + /// \param[in] ptr Define the pointer of a certain memory. + /// \param[in] ref_count Define the reference count to increase. + /// + /// \return Reference count of a certain memory after increasing. + virtual int IncRefCount(void *ptr, int ref_count) = 0; + + /// \brief Static method to create an allocator. + /// + /// \return Smart pointer of an allocator. + static std::shared_ptr Create(); + + /// \brief Prepare a certain memory. + /// + /// \param[in] ptr Define the pointer of a certain memory to prepare. + /// + /// \return Pointer of ready memory. + virtual void *Prepare(void *ptr) { return ptr; } + + protected: + // memory aligned bytes + size_t aligned_size_ = 32; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_ALLOCATOR_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/callback.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/callback.h new file mode 100644 index 0000000..d10cffe --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/callback.h @@ -0,0 +1,94 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CALLBACK_CALLBACK_H +#define MINDSPORE_INCLUDE_API_CALLBACK_CALLBACK_H + +#include +#include +#include +#include +#include "include/api/data_type.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +class Model; +class ModelImpl; +class CallbackImpl; + +struct TrainCallBackData { + TrainCallBackData(bool train_mode, int epoch, int step, Model *model): train_mode_(train_mode), epoch_(epoch), + step_(step), model_(model) {} + + bool train_mode_; /**< training mode of LiteSession object */ + unsigned int epoch_; /**< the current training epoch (starts at 0) */ + unsigned int step_ = 0; /**< the current step within the epoch */ + Model *model_; /**< pointer to the Model object */ +}; + +enum CallbackRetValue : uint32_t { + kContinue = 0, + kStopTraining = 1, + kExit = 2, + kUnknownRetValue = 0xFFFFFFFF +}; + +class TrainCallBack { + public: + virtual ~TrainCallBack() = default; + + /// \brief This method is called once before the network executing + /// + /// \param[in] cb_data info about current execution + virtual void Begin(const TrainCallBackData &cb_data) {} + + /// \brief This method is called once following the network execution + /// + /// \param[in] cb_data info about current execution + virtual void End(const TrainCallBackData &cb_data) {} + + /// \brief This method is called at the beginning of each epoch + /// + /// \param[in] cb_data info about current execution + virtual void EpochBegin(const TrainCallBackData &cb_data) {} + + /// \brief This method is called after the run of each epoch + /// + /// \param[in] cb_data info about current execution + /// + /// \return indication if to continue in the train loop: + /// RET_CONTINUE -- continue training + /// RET_STOP_TRAINING -- stop training (e.g., due to achieved accuracy) + /// RET_EXIT -- Exit training (due to error of some sort) + virtual CallbackRetValue EpochEnd(const TrainCallBackData &cb_data) { return kContinue; } + + /// \brief This method is called at the beginning of each step + /// + /// \param[in] cb_data info about current execution + virtual void StepBegin(const TrainCallBackData &cb_data) {} + + /// \brief This method is called after each step is ran + /// + /// \param[in] cb_data info about current execution + virtual void StepEnd(const TrainCallBackData &cb_data) {} + + protected: + friend class Model; + friend class ModelImpl; + CallbackImpl* callback_impl_ = nullptr; +}; + +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CALLBACK_CALLBACK_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/ckpt_saver.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/ckpt_saver.h new file mode 100644 index 0000000..2c67d3a --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/ckpt_saver.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CALLBACK_CKPT_SAVER_H +#define MINDSPORE_INCLUDE_API_CALLBACK_CKPT_SAVER_H + +#include +#include +#include +#include +#include "include/api/callback/callback.h" + +namespace mindspore { + +class CkptSaver: public TrainCallBack { + public: + explicit CkptSaver(int save_every_n, const std::string &filename_prefix); + virtual ~CkptSaver(); +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CALLBACK_CKPT_SAVER_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/loss_monitor.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/loss_monitor.h new file mode 100644 index 0000000..48684f3 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/loss_monitor.h @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CALLBACK_LOSS_MONITOR_H +#define MINDSPORE_INCLUDE_API_CALLBACK_LOSS_MONITOR_H + +#include +#include +#include +#include "include/api/callback/callback.h" + +using GraphPoint = std::pair; + +namespace mindspore { + +class LossMonitor: public TrainCallBack { + public: + explicit LossMonitor(int print_every_n_steps = INT_MAX); + virtual ~LossMonitor(); + const std::vector &GetLossPoints(); +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CALLBACK_LOSS_MONITOR_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/lr_scheduler.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/lr_scheduler.h new file mode 100644 index 0000000..2eddc66 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/lr_scheduler.h @@ -0,0 +1,51 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CALLBACK_LR_SCHEDULER_H +#define MINDSPORE_INCLUDE_API_CALLBACK_LR_SCHEDULER_H + +#include +#include +#include +#include +#include "include/api/callback/callback.h" + +namespace mindspore { + +constexpr int DONT_UPDATE_LR = 0; +constexpr int UPDATE_LR = 1; + +using LR_Lambda = std::function; + +/// \brief Multiply the LR by a factor of gamma every epoch +int MultiplicativeLRLambda(float *lr, int epoch, void *multiplication); + +/// \brief Multiply the LR by a factor of gamma every step_size +int StepLRLambda(float *lr, int epoch, void *step_size); +struct StepLRLambda { + StepLRLambda(int step, float g) : step_size(step), gamma(g) {} + + int step_size; // period of LR decay + float gamma; // LR decay factor +}; + +class LRScheduler: public TrainCallBack { + public: + explicit LRScheduler(LR_Lambda lambda_func, void *lr_cb_data = nullptr, int step = 1); + virtual ~LRScheduler(); +}; + +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CALLBACK_LR_SCHEDULER_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/time_monitor.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/time_monitor.h new file mode 100644 index 0000000..7e85784 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/time_monitor.h @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CALLBACK_TIME_MONITOR_H +#define MINDSPORE_INCLUDE_API_CALLBACK_TIME_MONITOR_H + +#include +#include +#include +#include +#include "include/api/callback/callback.h" + +namespace mindspore { + +class TimeMonitor: public TrainCallBack { + public: + virtual ~TimeMonitor() = default; + void EpochBegin(const TrainCallBackData &cb_data) override; + CallbackRetValue EpochEnd(const TrainCallBackData &cb_data) override; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CALLBACK_TIME_MONITOR_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/train_accuracy.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/train_accuracy.h new file mode 100644 index 0000000..0b31cfb --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/callback/train_accuracy.h @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CALLBACK_TRAIN_ACCURACY_H +#define MINDSPORE_INCLUDE_API_CALLBACK_TRAIN_ACCURACY_H + +#include +#include +#include +#include +#include +#include "include/api/callback/callback.h" +#include "include/api/metrics/accuracy.h" + +using GraphPoint = std::pair; + +namespace mindspore { + +class TrainAccuracy: public TrainCallBack { + public: + explicit TrainAccuracy(int print_every_n = INT_MAX, + int accuracy_metrics = METRICS_CLASSIFICATION, + const std::vector &input_indexes = {1}, + const std::vector &output_indexes = {0}); + virtual ~TrainAccuracy(); + const std::vector &GetAccuracyPoints(); +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CALLBACK_TRAIN_ACCURACY_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cell.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cell.h new file mode 100644 index 0000000..8f33aa5 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cell.h @@ -0,0 +1,135 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CELL_H +#define MINDSPORE_INCLUDE_API_CELL_H +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/graph.h" + +namespace mindspore { +class InputAndOutput; +class Context; +using Input = InputAndOutput; +using Output = InputAndOutput; + +class MS_API CellBase { + public: + CellBase() = default; + virtual ~CellBase() = default; + virtual std::vector Construct(const std::vector &inputs) { return {}; } + virtual std::shared_ptr Clone() const = 0; + virtual Status Run(const std::vector &inputs, std::vector *outputs) { return kSuccess; } + std::vector operator()(const std::vector &inputs) const; +}; + +template +class MS_API Cell : public CellBase { + public: + virtual ~Cell() = default; + std::shared_ptr Clone() const override { return std::make_shared(static_cast(*this)); } +}; + +class MS_API ParameterCell final : public Cell { + public: + ParameterCell() = default; + ~ParameterCell() override = default; + + ParameterCell(const ParameterCell &); + ParameterCell &operator=(const ParameterCell &); + + ParameterCell(ParameterCell &&); + ParameterCell &operator=(ParameterCell &&); + + explicit ParameterCell(const MSTensor &); + ParameterCell &operator=(const MSTensor &); + + explicit ParameterCell(MSTensor &&); + ParameterCell &operator=(MSTensor &&); + + MSTensor GetTensor() const { return tensor_; } + + private: + MSTensor tensor_; +}; + +class MS_API OpCellBase : public CellBase { + public: + explicit OpCellBase(const std::string &name) : name_(name) {} + ~OpCellBase() override = default; + const std::string &GetOpType() const { return name_; } + + protected: + std::string name_; +}; + +template +class MS_API OpCell : public OpCellBase, public std::enable_shared_from_this { + public: + explicit OpCell(const std::string &name) : OpCellBase(name) {} + ~OpCell() override = default; + std::shared_ptr Clone() const override { return std::make_shared(static_cast(*this)); } +}; + +class MS_API GraphCell final : public Cell { + public: + class GraphImpl; + + GraphCell() = default; + ~GraphCell() override = default; + + explicit GraphCell(const Graph &); + explicit GraphCell(Graph &&); + explicit GraphCell(const std::shared_ptr &); + + void SetContext(const std::shared_ptr &context); + const std::shared_ptr &GetGraph() const { return graph_; } + Status Run(const std::vector &inputs, std::vector *outputs) override; + std::vector GetInputs(); + std::vector GetOutputs(); + Status Load(uint32_t device_id); + + private: + friend class Model; + + std::shared_ptr graph_; + std::shared_ptr executor_; +}; + +class MS_API InputAndOutput { + public: + InputAndOutput(); + ~InputAndOutput() = default; + + // no explicit + InputAndOutput(const MSTensor &); // NOLINT(runtime/explicit) + InputAndOutput(MSTensor &&); // NOLINT(runtime/explicit) + + InputAndOutput(const std::shared_ptr &, const std::vector &, int32_t index); + + int32_t GetIndex() const { return index_; } + void SetIndex(int32_t index) { index_ = index; } + + private: + std::shared_ptr cell_; + std::vector prev_; + int32_t index_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CELL_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cfg.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cfg.h new file mode 100644 index 0000000..f4438e1 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/cfg.h @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CFG_H +#define MINDSPORE_INCLUDE_API_CFG_H + +#include +#include +#include +#include +#include "include/api/data_type.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { + +class MixPrecisionCfg { + public: + MixPrecisionCfg() { + this->dynamic_loss_scale_ = false; + this->loss_scale_ = 128.0f; + this->num_of_not_nan_iter_th_ = 1000; + } + + bool dynamic_loss_scale_ = false; /**< Enable\disable dynamic loss scale during mix precision training */ + float loss_scale_; /**< Initial loss scale factor */ + uint32_t num_of_not_nan_iter_th_; /**< a threshold for modifying loss scale when dynamic loss scale is enabled */ + bool is_raw_mix_precision_ = false; /**< Is mix precision model export from mindspore */ +}; + +class TrainCfg { + public: + TrainCfg() { this->loss_name_ = "_loss_fn"; } + + OptimizationLevel optimization_level_ = kO0; + std::string loss_name_; /**< Set part of the name that identify a loss kernel */ + MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */ + bool accumulate_gradients_ = false; +}; + +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CFG_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/context.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/context.h new file mode 100644 index 0000000..10f5eea --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/context.h @@ -0,0 +1,442 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CONTEXT_H +#define MINDSPORE_INCLUDE_API_CONTEXT_H + +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +enum DeviceType { + kCPU = 0, + kGPU, + kKirinNPU, + kAscend910, + kAscend310, + // add new type here + kInvalidDeviceType = 100, +}; + +class Allocator; +class Delegate; +class DeviceInfoContext; + +/// \brief Context is used to store environment variables during execution. +class MS_API Context { + public: + Context(); + ~Context() = default; + + /// \brief Set the number of threads at runtime. Only valid for Lite. + /// + /// \param[in] thread_num the number of threads at runtime. + void SetThreadNum(int32_t thread_num); + + /// \brief Get the current thread number setting. Only valid for Lite. + /// + /// \return The current thread number setting. + int32_t GetThreadNum() const; + + /// \brief Set the thread affinity to CPU cores. Only valid for Lite. + /// + /// \param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first + void SetThreadAffinity(int mode); + + /// \brief Get the thread affinity of CPU cores. Only valid for Lite. + /// + /// \return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first + int GetThreadAffinityMode() const; + + /// \brief Set the thread lists to CPU cores. Only valid for Lite. + /// + /// \note If core_list and mode are set by SetThreadAffinity at the same time, the core_list is effective, but the + /// mode is not effective. + /// + /// \param[in] core_list: a vector of thread core lists. + void SetThreadAffinity(const std::vector &core_list); + + /// \brief Get the thread lists of CPU cores. Only valid for Lite. + /// + /// \return core_list: a vector of thread core lists. + std::vector GetThreadAffinityCoreList() const; + + /// \brief Set the status whether to perform model inference or training in parallel. Only valid for Lite. + /// + /// \param[in] is_parallel: true, parallel; false, not in parallel. + void SetEnableParallel(bool is_parallel); + + /// \brief Get the status whether to perform model inference or training in parallel. Only valid for Lite. + /// + /// \return Bool value that indicates whether in parallel. + bool GetEnableParallel() const; + + /// \brief Set Delegate to access third-party AI framework. Only valid for Lite. + /// + /// \param[in] Pointer to the custom delegate. + void SetDelegate(const std::shared_ptr &delegate); + + /// \brief Get the delegate of the third-party AI framework. Only valid for Lite. + /// + /// \return Pointer to the custom delegate. + std::shared_ptr GetDelegate() const; + + /// \brief Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports + /// heterogeneous scenarios with multiple members in the vector. + /// + /// \return Mutable reference of DeviceInfoContext vector in this context. + std::vector> &MutableDeviceInfo(); + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief DeviceInfoContext defines different device contexts. +class MS_API DeviceInfoContext : public std::enable_shared_from_this { + public: + struct Data; + + DeviceInfoContext(); + virtual ~DeviceInfoContext() = default; + + /// \brief Get the type of this DeviceInfoContext. + /// + /// \return Type of this DeviceInfoContext. + virtual enum DeviceType GetDeviceType() const = 0; + + /// \brief A similar function to RTTI is provided when the -fno-rtti compilation option is turned on, which converts + /// DeviceInfoContext to a shared pointer of type T, and returns nullptr if the conversion fails. + /// + /// \param T Type + /// \return A pointer of type T after conversion. If the conversion fails, it will be nullptr. + template + std::shared_ptr Cast() { + static_assert(std::is_base_of::value, "Wrong cast type."); + if (GetDeviceType() != T().GetDeviceType()) { + return nullptr; + } + + return std::static_pointer_cast(shared_from_this()); + } + /// \brief obtain provider's name + /// + /// \return provider's name. + std::string GetProvider() const; + /// \brief set provider's name. + /// + /// \param[in] provider define the provider's name. + + void SetProvider(const std::string &provider); + /// \brief obtain provider's device type. + /// + /// \return provider's device type. + + std::string GetProviderDevice() const; + /// \brief set provider's device type. + /// + /// \param[in] device define the provider's device type.EG: CPU. + void SetProviderDevice(const std::string &device); + + /// \brief set memory allocator. + /// + /// \param[in] allocator define the memory allocator which can be defined by user. + void SetAllocator(const std::shared_ptr &allocator); + + /// \brief obtain memory allocator. + /// + /// \return memory allocator. + std::shared_ptr GetAllocator() const; + + protected: + std::shared_ptr data_; +}; + +/// \brief Derived from DeviceInfoContext, The configuration of the model running on the CPU. This option is only valid +/// for MindSpore Lite. +class MS_API CPUDeviceInfo : public DeviceInfoContext { + public: + /// \brief Get the type of this DeviceInfoContext. + /// + /// \return Type of this DeviceInfoContext. + enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; }; + + /// \brief Set enables to perform the float16 inference + /// + /// \param[in] is_fp16 Enable float16 inference or not. + void SetEnableFP16(bool is_fp16); + + /// \brief Get enables to perform the float16 inference + /// + /// \return Whether enable float16 inference. + bool GetEnableFP16() const; +}; + +/// \brief Derived from DeviceInfoContext, The configuration of the model running on the NPU. This option is only valid +/// for MindSpore Lite. +class MS_API KirinNPUDeviceInfo : public DeviceInfoContext { + public: + /// \brief Get the type of this DeviceInfoContext. + /// + /// \return Type of this DeviceInfoContext. + enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; }; + + /// \brief Set the NPU frequency. + /// + /// \param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme + /// performance), default as 3. + void SetFrequency(int frequency); + + /// \brief Get the NPU frequency. + /// + /// \return NPU frequency + int GetFrequency() const; +}; + +/// \brief Derived from DeviceInfoContext, The configuration of the model running on the GPU. +class MS_API GPUDeviceInfo : public DeviceInfoContext { + public: + /// \brief Get the type of this DeviceInfoContext. + /// + /// \return Type of this DeviceInfoContext. + enum DeviceType GetDeviceType() const override { return DeviceType::kGPU; }; + + /// \brief Set device id. + /// + /// \param[in] device_id The device id. + void SetDeviceID(uint32_t device_id); + + /// \brief Get the device id. + /// + /// \return The device id. + uint32_t GetDeviceID() const; + + /// \brief Set the precision mode. + /// + /// \param[in] precision_mode Optional "origin", "fp16". "origin" is set as default. + inline void SetPrecisionMode(const std::string &precision_mode); + + /// \brief Get the precision mode. + /// + /// \return The precision mode. + inline std::string GetPrecisionMode() const; + + /// \brief Set enables to perform the float16 inference + /// + /// \param[in] is_fp16 Enable float16 inference or not. + void SetEnableFP16(bool is_fp16); + + /// \brief Get enables to perform the float16 inference + /// + /// \return Whether enable float16 inference. + bool GetEnableFP16() const; + + private: + void SetPrecisionMode(const std::vector &precision_mode); + std::vector GetPrecisionModeChar() const; +}; + +void GPUDeviceInfo::SetPrecisionMode(const std::string &precision_mode) { + SetPrecisionMode(StringToChar(precision_mode)); +} +std::string GPUDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); } + +/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend910. This option is +/// invalid for MindSpore Lite. +class MS_API Ascend910DeviceInfo : public DeviceInfoContext { + public: + /// \brief Get the type of this DeviceInfoContext. + /// + /// \return Type of this DeviceInfoContext. + enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; }; + + /// \brief Set device id. + /// + /// \param[in] device_id The device id. + void SetDeviceID(uint32_t device_id); + + /// \brief Get the device id. + /// + /// \return The device id. + uint32_t GetDeviceID() const; +}; + +/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend310. This option is +/// invalid for MindSpore Lite. +class MS_API Ascend310DeviceInfo : public DeviceInfoContext { + public: + /// \brief Get the type of this DeviceInfoContext. + /// + /// \return Type of this DeviceInfoContext. + enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; }; + + /// \brief Set device id. + /// + /// \param[in] device_id The device id. + void SetDeviceID(uint32_t device_id); + + /// \brief Get the device id. + /// + /// \return The device id. + uint32_t GetDeviceID() const; + + /// \brief Set AIPP configuration file path. + /// + /// \param[in] cfg_path AIPP configuration file path. + inline void SetInsertOpConfigPath(const std::string &cfg_path); + + /// \brief Get AIPP configuration file path. + /// + /// \return AIPP configuration file path. + inline std::string GetInsertOpConfigPath() const; + + /// \brief Set format of model inputs. + /// + /// \param[in] format Optional "NCHW", "NHWC", etc. + inline void SetInputFormat(const std::string &format); + + /// \brief Get format of model inputs. + /// + /// \return The format of model inputs. + inline std::string GetInputFormat() const; + + /// \brief Set shape of model inputs. + /// + /// \param[in] shape e.g. "input_op_name1: 1,2,3,4;input_op_name2: 4,3,2,1". + inline void SetInputShape(const std::string &shape); + + /// \brief Get shape of model inputs. + /// + /// \return The shape of model inputs. + inline std::string GetInputShape() const; + + /// \brief Set shape of model inputs. + /// + /// \param[in] shape e.g. {{1, {1,2,3,4}}, {2, {4,3,2,1}}} means the first input shape 1,2,3,4 and the second input + /// shape 4,3,2,1. + void SetInputShapeMap(const std::map> &shape); + + /// \brief Get shape of model inputs. + /// + /// \return The shape of model inputs. + std::map> GetInputShapeMap() const; + + void SetDynamicBatchSize(const std::vector &dynamic_batch_size); + inline std::string GetDynamicBatchSize() const; + + /// \brief Set type of model outputs. + /// + /// \param[in] output_type FP32, UINT8 or FP16, default as FP32. + void SetOutputType(enum DataType output_type); + + /// \brief Get type of model outputs. + /// + /// \return The set type of model outputs. + enum DataType GetOutputType() const; + + /// \brief Set precision mode of model. + /// + /// \param[in] precision_mode Optional "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" and + /// "allow_mix_precision", "force_fp16" is set as default + inline void SetPrecisionMode(const std::string &precision_mode); + + /// \brief Get precision mode of model. + /// + /// \return The set type of model outputs + inline std::string GetPrecisionMode() const; + + /// \brief Set op select implementation mode. + /// + /// \param[in] op_select_impl_mode Optional "high_performance" and "high_precision", "high_performance" is set as + /// default. + inline void SetOpSelectImplMode(const std::string &op_select_impl_mode); + + /// \brief Get op select implementation mode. + /// + /// \return The set op select implementation mode. + inline std::string GetOpSelectImplMode() const; + + inline void SetFusionSwitchConfigPath(const std::string &cfg_path); + inline std::string GetFusionSwitchConfigPath() const; + + // Optional "l1_optimize", "l2_optimize", "off_optimize" or "l1_and_l2_optimize", default as "l2_optimize" + inline void SetBufferOptimizeMode(const std::string &buffer_optimize_mode); + inline std::string GetBufferOptimizeMode() const; + + private: + void SetInsertOpConfigPath(const std::vector &cfg_path); + std::vector GetInsertOpConfigPathChar() const; + + void SetInputFormat(const std::vector &format); + std::vector GetInputFormatChar() const; + + void SetInputShape(const std::vector &shape); + std::vector GetInputShapeChar() const; + + std::vector GetDynamicBatchSizeChar() const; + + void SetPrecisionMode(const std::vector &precision_mode); + std::vector GetPrecisionModeChar() const; + + void SetOpSelectImplMode(const std::vector &op_select_impl_mode); + std::vector GetOpSelectImplModeChar() const; + + void SetFusionSwitchConfigPath(const std::vector &cfg_path); + std::vector GetFusionSwitchConfigPathChar() const; + + void SetBufferOptimizeMode(const std::vector &buffer_optimize_mode); + std::vector GetBufferOptimizeModeChar() const; +}; + +void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) { + SetInsertOpConfigPath(StringToChar(cfg_path)); +} +std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); } + +void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); } +std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); } + +void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); } +std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); } + +std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); } + +void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) { + SetPrecisionMode(StringToChar(precision_mode)); +} +std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); } + +void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) { + SetOpSelectImplMode(StringToChar(op_select_impl_mode)); +} +std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); } + +void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) { + SetFusionSwitchConfigPath(StringToChar(cfg_path)); +} +std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const { + return CharToString(GetFusionSwitchConfigPathChar()); +} + +void Ascend310DeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_mode) { + SetBufferOptimizeMode(StringToChar(buffer_optimize_mode)); +} +std::string Ascend310DeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); } +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CONTEXT_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/data_type.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/data_type.h new file mode 100644 index 0000000..61eb1d5 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/data_type.h @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_ +#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_ + +namespace mindspore { +enum class DataType : int { + kTypeUnknown = 0, + kObjectTypeString = 12, + kObjectTypeList = 13, + kObjectTypeTuple = 14, + kObjectTypeTensorType = 17, + kNumberTypeBegin = 29, + kNumberTypeBool = 30, + kNumberTypeInt8 = 32, + kNumberTypeInt16 = 33, + kNumberTypeInt32 = 34, + kNumberTypeInt64 = 35, + kNumberTypeUInt8 = 37, + kNumberTypeUInt16 = 38, + kNumberTypeUInt32 = 39, + kNumberTypeUInt64 = 40, + kNumberTypeFloat16 = 42, + kNumberTypeFloat32 = 43, + kNumberTypeFloat64 = 44, + kNumberTypeEnd = 46, + // add new enum here + kInvalidType = INT32_MAX, +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/delegate.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/delegate.h new file mode 100644 index 0000000..4c1b28f --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/delegate.h @@ -0,0 +1,117 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_API_DELEGATE_H +#define MINDSPORE_INCLUDE_API_DELEGATE_H + +#include +#include +#include +#include "schema/model_generated.h" +#include "include/api/kernel.h" +#include "include/api/status.h" + +namespace mindspore { +typedef enum { + SCHEMA_INVALID = -1, /**< invalid version */ + SCHEMA_CUR, /**< current version for ms model defined in model.fbs*/ + SCHEMA_V0, /**< previous version for ms model defined in model_v0.fbs*/ +} SchemaVersion; + +using KernelIter = std::vector::iterator; +class MS_API DelegateModel { + public: + /// \brief Constructor of MindSpore Lite DelegateModel. + DelegateModel(std::vector *kernels, const std::vector &inputs, + const std::vector &outputs, + const std::map &primitives, SchemaVersion version) + : kernels_(kernels), inputs_(inputs), outputs_(outputs), primitives_(primitives), version_(version) {} + + /// \brief Destructor of MindSpore Lite DelegateModel. + ~DelegateModel() = default; + + /// \brief Get Primitive of kernel::Kernel. + /// + /// \param[in] a kernel in DelegateModel kernels vector. + /// + /// \return The schema::Primitive of The kernel. + const schema::Primitive *GetPrimitive(kernel::Kernel *kernel) const; + + /// \brief Get the begin iterator of the DelegateModel kernels vector. + /// + /// \return The begin iterator of the DelegateModel kernels vector. + KernelIter BeginKernelIterator(); + + /// \brief Get the end iterator of the DelegateModel kernels vector. + /// + /// \return The end iterator of the DelegateModel kernels vector. + KernelIter EndKernelIterator(); + + /// \brief Replace the continuous kernel supported by the delegate with a delegate graph kernel. + /// + /// \param[in] from Define the begin iterator of continuous kernel supported by the delegate. + /// \param[in] end Define the end iterator of continuous kernel supported by the delegate. + /// + /// \return The next iterator after graph_kernel, point to the next kernel that is not visited. + KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel); + + /// \brief Get the input tensors of DelegateModel. + /// + /// \return The input tensor vector of DelegateModel. + const std::vector &inputs() { return this->inputs_; } + + /// \brief Get the output tensors of DelegateModel. + /// + /// \return The ioutput tensor vector of DelegateModel. + const std::vector &outputs() { return this->outputs_; } + + /// \brief Get the ms model version. + /// + /// \return The schema version for the primitives map. + const SchemaVersion GetVersion() { return version_; } + + protected: + std::vector *kernels_; + const std::vector &inputs_; + const std::vector &outputs_; + const std::map &primitives_; + SchemaVersion version_; +}; + +class MS_API Delegate { + public: + /// \brief Constructor of MindSpore Lite Delegate. + Delegate() = default; + + /// \brief Destructor of MindSpore Lite Delegate. + virtual ~Delegate() = default; + + /// \brief Init delegate. + /// + /// \note Init willed be called in Model::Build. + /// + /// \return Status. If Status is kLiteNotSupport, the program will return to the MindSpore Lite inner inference. + virtual Status Init() = 0; + + /// \brief Build delegate graph for MindSpore Lite model. + /// + /// \note Build willed be called in Model::Build. + /// + /// \param[in] model Define the delegate model to be built. + virtual Status Build(DelegateModel *model) = 0; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DELEGATE_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/dual_abi_helper.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/dual_abi_helper.h new file mode 100644 index 0000000..862830e --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/dual_abi_helper.h @@ -0,0 +1,164 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ +#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace mindspore { +inline std::vector StringToChar(const std::string &s) { return std::vector(s.begin(), s.end()); } + +inline std::string CharToString(const std::vector &c) { return std::string(c.begin(), c.end()); } + +inline std::pair, int32_t> PairStringToChar(const std::pair &s) { + return std::pair, int32_t>(std::vector(s.first.begin(), s.first.end()), s.second); +} + +inline std::pair PairCharToString(const std::pair, int32_t> &c) { + return std::pair(std::string(c.first.begin(), c.first.end()), c.second); +} + +inline std::vector> VectorStringToChar(const std::vector &s) { + std::vector> ret; + std::transform(s.begin(), s.end(), std::back_inserter(ret), + [](auto str) { return std::vector(str.begin(), str.end()); }); + return ret; +} + +inline std::vector VectorCharToString(const std::vector> &c) { + std::vector ret; + std::transform(c.begin(), c.end(), std::back_inserter(ret), + [](auto ch) { return std::string(ch.begin(), ch.end()); }); + return ret; +} + +inline std::set> SetStringToChar(const std::set &s) { + std::set> ret; + std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), + [](auto str) { return std::vector(str.begin(), str.end()); }); + return ret; +} + +inline std::set SetCharToString(const std::set> &c) { + std::set ret; + std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), + [](auto ch) { return std::string(ch.begin(), ch.end()); }); + return ret; +} + +inline std::map, int32_t> MapStringToChar(const std::map &s) { + std::map, int32_t> ret; + std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) { + return std::pair, int32_t>(std::vector(str.first.begin(), str.first.end()), str.second); + }); + return ret; +} + +inline std::map MapCharToString(const std::map, int32_t> &c) { + std::map ret; + std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) { + return std::pair(std::string(ch.first.begin(), ch.first.end()), ch.second); + }); + return ret; +} + +inline std::map, std::vector> UnorderedMapStringToChar( + const std::unordered_map &s) { + std::map, std::vector> ret; + std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) { + return std::pair, std::vector>(std::vector(str.first.begin(), str.first.end()), + std::vector(str.second.begin(), str.second.end())); + }); + return ret; +} + +inline std::unordered_map UnorderedMapCharToString( + const std::map, std::vector> &c) { + std::unordered_map ret; + std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) { + return std::pair(std::string(ch.first.begin(), ch.first.end()), + std::string(ch.second.begin(), ch.second.end())); + }); + return ret; +} + +inline std::vector, std::vector>> ClassIndexStringToChar( + const std::vector>> &s) { + std::vector, std::vector>> ret; + std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) { + return std::pair, std::vector>(std::vector(str.first.begin(), str.first.end()), + str.second); + }); + return ret; +} + +inline std::vector>> ClassIndexCharToString( + const std::vector, std::vector>> &c) { + std::vector>> ret; + std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) { + return std::pair>(std::string(ch.first.begin(), ch.first.end()), ch.second); + }); + return ret; +} + +inline std::vector, int64_t>> PairStringInt64ToPairCharInt64( + const std::vector> &s) { + std::vector, int64_t>> ret; + std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) { + return std::pair, int64_t>(std::vector(str.first.begin(), str.first.end()), str.second); + }); + return ret; +} + +template +inline std::map, T> PadInfoStringToChar(const std::map &s_pad_info) { + std::map, T> ret; + std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) { + return std::pair, T>(std::vector(str.first.begin(), str.first.end()), str.second); + }); + return ret; +} + +template +inline std::map PadInfoCharToString(const std::map, T> &c_pad_info) { + std::map ret; + std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) { + return std::pair(std::string(ch.first.begin(), ch.first.end()), ch.second); + }); + return ret; +} + +template +inline void TensorMapCharToString(const std::map, T> *c, std::unordered_map *s) { + if (c == nullptr || s == nullptr) { + return; + } + for (auto ch : *c) { + auto key = std::string(ch.first.begin(), ch.first.end()); + auto val = ch.second; + s->insert(std::pair(key, val)); + } +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/format.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/format.h new file mode 100644 index 0000000..782760e --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/format.h @@ -0,0 +1,46 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_API_FORMAT_H +#define MINDSPORE_INCLUDE_API_FORMAT_H + +#include + +namespace mindspore { +enum Format : int64_t { + NCHW = 0, + NHWC = 1, + NHWC4 = 2, + HWKC = 3, + HWCK = 4, + KCHW = 5, + CKHW = 6, + KHWC = 7, + CHWK = 8, + HW = 9, + HW4 = 10, + NC = 11, + NC4 = 12, + NC4HW4 = 13, + NUM_OF_FORMAT = 14, + NCDHW = 15, + NWC = 16, + NCW = 17 +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_FORMAT_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/graph.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/graph.h new file mode 100644 index 0000000..f25a621 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/graph.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_GRAPH_H +#define MINDSPORE_INCLUDE_API_GRAPH_H + +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" + +namespace mindspore { +class MS_API Graph { + public: + class GraphData; + Graph(); + explicit Graph(const std::shared_ptr &graph_data); + explicit Graph(std::shared_ptr &&graph_data); + explicit Graph(std::nullptr_t); + ~Graph(); + + enum ModelType ModelType() const; + bool operator==(std::nullptr_t) const; + bool operator!=(std::nullptr_t) const; + + private: + friend class GraphCell; + friend class ModelImpl; + std::shared_ptr graph_data_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_GRAPH_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/kernel.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/kernel.h new file mode 100644 index 0000000..6ec62de --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/kernel.h @@ -0,0 +1,115 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_API_KERNEL_H +#define MINDSPORE_INCLUDE_API_KERNEL_H +#include +#include +#include +#include "schema/model_generated.h" +#include "include/api/types.h" +#include "include/api/context.h" + +namespace mindspore::kernel { +/// \brief The Kernel class is used to define a MindSpore Kernel. +class Kernel { + public: + Kernel() = default; + /// \brief Constructor. + /// + /// \param[in] inputs define the input tensors for kernel. + /// \param[in] outputs define the output tensors for kernel. + /// \param[in] primitive define the primitive of kernel generated by flatbuffers. + /// \param[in] ctx define the context for kernel. + Kernel(const std::vector &inputs, const std::vector &outputs, + const schema::Primitive *primitive, const mindspore::Context *ctx) + : context_(ctx), inputs_(std::move(inputs)), outputs_(std::move(outputs)), primitive_(primitive) { + if (primitive != nullptr) { + type_ = primitive->value_type(); + } + } + /// \brief Destructor. + virtual ~Kernel() = default; + /// \brief prepare for executing kernel. + /// + /// \return result code. + virtual int Prepare() = 0; + /// \brief execute the kernel. + /// + /// \return result code. + virtual int Execute() = 0; + /// \brief resize the kernel input shape, memory need to refresh. + /// + /// \return result code. + virtual int ReSize() = 0; + /// \brief set kernel's input tensors. + /// + /// \param[in] in_tensors define the input tensors. + virtual void set_inputs(const std::vector &in_tensors) { this->inputs_ = in_tensors; } + /// \brief set kernel's input tensor. + /// + /// \param[in] in_tensor define the input tensor. + /// \param[in] index define the index of the input tensor. + virtual void set_input(mindspore::MSTensor in_tensor, int index) { this->inputs_[index] = in_tensor; } + /// \brief set kernel's output tensors. + /// + /// \param[in] out_tensors define the output tensors. + virtual void set_outputs(const std::vector &out_tensors) { this->outputs_ = out_tensors; } + /// \brief set kernel's output tensor. + /// + /// \param[in] out_tensor define the output tensor. + /// \param[in] index define the index of the output tensor. + virtual void set_output(mindspore::MSTensor out_tensor, int index) { this->outputs_[index] = out_tensor; } + /// \brief obtain kernel's input tensors. + /// + /// \return input tensors. + virtual const std::vector &inputs() { return this->inputs_; } + /// \brief obtain kernel's output tensors. + /// + /// \return output tensors. + virtual const std::vector &outputs() { return this->outputs_; } + /// \brief obtain kernel's name. + /// + /// \return kernel's name. + std::string name() const { return this->name_; } + /// \brief set kernel's name. + /// + /// \param[in] name define the kernel's name. + void set_name(const std::string &name) { this->name_ = name; } + /// \brief obtain kernel's context. + /// + /// \return kernel's context. + const mindspore::Context *context() const { return this->context_; } + /// \brief obtain kernel's type. + /// + /// \return kernel's type. + virtual schema::PrimitiveType type() const { return type_; } + /// \brief obtain the primitive of kernel generated by flatbuffers. + /// + /// \return the primitive of kernel generated by flatbuffers. + const schema::Primitive *primitive() const { return this->primitive_; } + + protected: + std::string name_; + const mindspore::Context *context_ = nullptr; + std::vector inputs_; + std::vector outputs_; + schema::PrimitiveType type_ = schema::PrimitiveType_NONE; + const schema::Primitive *primitive_ = nullptr; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_INCLUDE_API_KERNEL_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/accuracy.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/accuracy.h new file mode 100644 index 0000000..1d1732f --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/accuracy.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_METRICS_ACCURACY_H +#define MINDSPORE_INCLUDE_API_METRICS_ACCURACY_H +#include +#include "include/api/metrics/metrics.h" + +namespace mindspore { + +constexpr int METRICS_CLASSIFICATION = 0; +constexpr int METRICS_MULTILABEL = 1; + +class AccuracyMetrics : public Metrics { + public: + explicit AccuracyMetrics(int accuracy_metrics = METRICS_CLASSIFICATION, const std::vector &input_indexes = {1}, + const std::vector &output_indexes = {0}); + virtual ~AccuracyMetrics(); + void Clear() override; + float Eval() override; +}; + +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_METRICS_ACCURACY_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/metrics.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/metrics.h new file mode 100644 index 0000000..7154332 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/metrics/metrics.h @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_METRICS_METRICS_H +#define MINDSPORE_INCLUDE_API_METRICS_METRICS_H +#include +#include "include/api/model.h" + +namespace mindspore { + +class MetricsImpl; +class ModelImpl; +class MSTensor; + +class Metrics { + public: + virtual ~Metrics() = default; + virtual void Clear() {} + virtual float Eval() { return 0.0; } + virtual void Update(std::vector inputs, std::vector outputs) {} + protected: + friend class Model; + friend class ModelImpl; + MetricsImpl* metrics_impl_; +}; + +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_METRICS_METRICS_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model.h new file mode 100644 index 0000000..ac1f9e2 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model.h @@ -0,0 +1,235 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_MODEL_H +#define MINDSPORE_INCLUDE_API_MODEL_H + +#include +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/graph.h" +#include "include/api/context.h" +#include "include/api/callback/callback.h" +#include "include/api/cell.h" +#include "include/api/cfg.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +class ModelImpl; +class Metrics; + +namespace dataset { +class Dataset; +} // namespace dataset +/// \brief The Model class is used to define a MindSpore model, facilitating computational graph management. +class MS_API Model { + public: + Model(); + ~Model(); + Model(const Model &) = delete; + void operator=(const Model &) = delete; + + /// \brief Builds a model so that it can run on a device. + /// + /// \param[in] graph GraphCell is a derivative of Cell. Cell is not available currently. GraphCell can be constructed + /// from Graph, for example, model.Build(GraphCell(graph), context). + /// \param[in] model_context A context used to store options during execution. + /// \param[in] train_cfg A config used by training. + /// + /// \return Status. + Status Build(GraphCell graph, const std::shared_ptr &model_context = nullptr, + const std::shared_ptr &train_cfg = nullptr); + + /// \brief Resizes the shapes of inputs. + /// + /// \param[in] inputs A vector that includes all input tensors in order. + /// \param[in] dims Defines the new shapes of inputs, should be consistent with inputs. + /// + /// \return Status. + Status Resize(const std::vector &inputs, const std::vector> &dims); + + /// \brief Inference model. + /// + /// \param[in] inputs A vector where model inputs are arranged in sequence. + /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence. + /// \param[in] before CallBack before predict. + /// \param[in] after CallBack after predict. + /// + /// \return Status. + Status Predict(const std::vector &inputs, std::vector *outputs, + const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr); + + /// \brief Inference model with preprocess in model. + /// + /// \param[in] inputs A vector where model inputs are arranged in sequence. + /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence. + /// \param[in] whether to use data preprocess in model. + /// \param[in] before CallBack before predict. + /// \param[in] after CallBack after predict. + /// + /// \return Status. + Status PredictWithPreprocess(const std::vector &inputs, std::vector *outputs, + const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr); + + /// \brief Apply data preprocess if it exits in model. + /// + /// \param[in] inputs A vector where model inputs are arranged in sequence. + /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence. + /// + /// \return Status. + Status Preprocess(const std::vector &inputs, std::vector *outputs); + + /// \brief Check if data preprocess exists in model. + /// \return true if data preprocess exists. + bool HasPreprocess(); + + /// \brief Load config file. + /// + /// \param[in] config_path config file path. + /// + /// \return Status. + Status LoadConfig(const std::string &config_path); + + /// \brief Obtains all input tensors of the model. + /// + /// \return The vector that includes all input tensors. + std::vector GetInputs(); + + /// \brief Obtains the input tensor of the model by name. + /// + /// \return The input tensor with the given name, if the name is not found, an invalid tensor is returned. + inline MSTensor GetInputByTensorName(const std::string &tensor_name); + + /// \brief Obtains all gradient tensors of the model. + /// + /// \return The vector that includes all gradient tensors. + std::vector GetGradients() const; + + /// \brief update gradient tensors of the model. + /// + /// \param[in] inputs A vector new gradients. + /// \return Status of operation + Status ApplyGradients(const std::vector &gradients); + + /// \brief Obtains optimizer params tensors of the model. + /// + /// \return The vector that includes all params tensors. + std::vector GetOptimizerParams() const; + + /// \brief update the optimizer parameters + /// + /// \param[in] inputs A vector new optimizer params. + /// \return Status of operation + Status SetOptimizerParams(const std::vector ¶ms); + + Status InitMetrics(std::vector metrics); + std::vector GetMetrics(); + + /// \brief Obtains all output tensors of the model. + /// + /// \return The vector that includes all output tensors. + std::vector GetOutputs(); + + /// \brief Obtains names of all output tensors of the model. + /// + /// \return A vector that includes names of all output tensors. + inline std::vector GetOutputTensorNames(); + + /// \brief Obtains the output tensor of the model by name. + /// + /// \return The output tensor with the given name, if the name is not found, an invalid tensor is returned. + inline MSTensor GetOutputByTensorName(const std::string &tensor_name); + + /// \brief Get output MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \note Deprecated, replace with GetOutputByTensorName + /// + /// \return The vector of output MSTensor. + inline std::vector GetOutputsByNodeName(const std::string &node_name); + + /// \brief Inference model. + /// + /// \param[in] device_type Device type,options are kGPU, kAscend910, etc. + /// \param[in] model_type The type of model file, options are ModelType::kMindIR, ModelType::kOM. + /// + /// \return Is supported or not. + static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type); + + Status SetTrainMode(bool train); + bool GetTrainMode() const; + Status Train(int epochs, std::shared_ptr ds, std::vector cbs); + Status Evaluate(std::shared_ptr ds, std::vector cbs); + + /// \brief Build a model from model buffer so that it can run on a device. Only valid for Lite. + /// + /// \param[in] model_data Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// \param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only + /// ModelType::kMindIR is valid for Lite. + /// \param[in] model_context Define the context used to store options during execution. + /// \param[in] dec_key Define the key used to decrypt the ciphertext model. The key length is 16, 24, or 32. + /// \param[in] dec_mode Define the decryption mode. Options: AES-GCM, AES-CBC. + /// + /// \return Status. + Status Build(const void *model_data, size_t data_size, ModelType model_type, + const std::shared_ptr &model_context = nullptr, const Key &dec_key = {}, + const std::string &dec_mode = kDecModeAesGcm); + + /// \brief Load and build a model from model buffer so that it can run on a device. Only valid for Lite. + /// + /// \param[in] model_path Define the model path. + /// \param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only + /// ModelType::kMindIR is valid for Lite. + /// \param[in] model_context Define the context used to store options during execution. + /// \param[in] dec_key Define the key used to decrypt the ciphertext model. The key length is 16, 24, or 32. + /// \param[in] dec_mode Define the decryption mode. Options: AES-GCM, AES-CBC. + /// + /// \return Status. + Status Build(const std::string &model_path, ModelType model_type, + const std::shared_ptr &model_context = nullptr, const Key &dec_key = {}, + const std::string &dec_mode = kDecModeAesGcm); + + private: + friend class Serialization; + // api without std::string + MSTensor GetInputByTensorName(const std::vector &tensor_name); + std::vector> GetOutputTensorNamesChar(); + MSTensor GetOutputByTensorName(const std::vector &tensor_name); + std::vector GetOutputsByNodeName(const std::vector &node_name); + + std::shared_ptr impl_; +}; + +MSTensor Model::GetInputByTensorName(const std::string &tensor_name) { + return GetInputByTensorName(StringToChar(tensor_name)); +} + +std::vector Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); } + +MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) { + return GetOutputByTensorName(StringToChar(tensor_name)); +} + +std::vector Model::GetOutputsByNodeName(const std::string &node_name) { + return GetOutputsByNodeName(StringToChar(node_name)); +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_MODEL_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model_parallel_runner.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model_parallel_runner.h new file mode 100644 index 0000000..159f4ce --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/model_parallel_runner.h @@ -0,0 +1,110 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_MODEL_PARALLEL_RUNNER_H +#define MINDSPORE_INCLUDE_API_MODEL_PARALLEL_RUNNER_H +#include +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/context.h" +namespace mindspore { +/// \brief The RunnerConfig class is used to store environment variables during execution +/// management. +class RunnerConfig { + public: + struct Data; + RunnerConfig(); + ~RunnerConfig() = default; + + /// \brief Set the number of workers at runtime. Only valid for ModelParallelRunner. + /// + /// \param[in] workers_num the number of workers at runtime. + void SetWorkersNum(int32_t workers_num); + + /// \brief Set the context at runtime. Only valid for ModelParallelRunner. + /// + /// \param[in] context store environment variables at runtime. + void SetContext(const std::shared_ptr &context); + + /// \brief Set the config before runtime. Only valid for ModelParallelRunner. + /// + /// \param[in] config store environment variables before runtime. + void SetConfigInfo(const std::string §ion, const std::map &config); + + /// \brief Get the current config setting. Only valid for ModelParallelRunner. + /// + /// \return The current config setting. + std::map> GetConfigInfo() const; + + /// \brief Get the current operators parallel workers number setting. Only valid for ModelParallelRunner. + /// + /// \return The current operators parallel workers number setting. + int32_t GetWorkersNum() const; + + /// \brief Get the current context setting. Only valid for ModelParallelRunner. + /// + /// \return The current operators context setting. + std::shared_ptr GetContext() const; + + private: + std::shared_ptr data_ = nullptr; +}; + +class ModelPool; + +/// \brief The ModelParallelRunner class is used to define a MindSpore ModelParallelRunner, facilitating Model +/// management. +class MS_API ModelParallelRunner { + public: + ModelParallelRunner() = default; + ~ModelParallelRunner() = default; + + /// \brief build a model parallel runner from model path so that it can run on a device. Only valid for Lite. + /// + /// \param[in] model_path Define the model path. + /// \param[in] runner_config Define the config used to store options during model pool init. + /// + /// \return Status. + Status Init(const std::string &model_path, const std::shared_ptr &runner_config = nullptr); + + /// \brief Obtains all input tensors information of the model. + /// + /// \return The vector that includes all input tensors. + std::vector GetInputs(); + + /// \brief Obtains all output tensors information of the model. + /// + /// \return The vector that includes all output tensors. + std::vector GetOutputs(); + + /// \brief Inference ModelParallelRunner. + /// + /// \param[in] inputs A vector where model inputs are arranged in sequence. + /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence. + /// \param[in] before CallBack before predict. + /// \param[in] after CallBack after predict. + /// + /// \return Status. + Status Predict(const std::vector &inputs, std::vector *outputs, + const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr); + + private: + std::shared_ptr model_pool_ = nullptr; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_MODEL_PARALLEL_RUNNER_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/net.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/net.h new file mode 100644 index 0000000..c7a3a9b --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/net.h @@ -0,0 +1,141 @@ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INCLUDE_API_NET_H +#define MINDSPORE_INCLUDE_API_NET_H + +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/data_type.h" +#include "include/api/cfg.h" + +namespace mindspore { +/// \brief Register node or sub network +#define REG(_name) Register(_name, #_name) + +class Expr; +class NodeImpl; +class NetImpl; +class NodeSet; +class Graph; +class NetData; + +class NetBase { + public: + NetBase() = default; + virtual std::vector operator()(const std::vector &inputs) = 0; + virtual uint32_t type() = 0; +}; + +class Node : public NetBase { + public: + Node(); + virtual ~Node(); + /// \brief Create output expression from node + + /// \param[in] name Name of input (like "labels" etc.) + /// + /// \return Expression + Expr *Create(std::string name); + /// \brief Run node on inputs. This operator is used in Net::construct() + /// + /// \param[in] inputs Inputs expression for the node. + /// \return Output node expression vector + std::vector operator()(const std::vector &inputs) override; + uint32_t type() final; + + private: + friend NodeImpl; + std::shared_ptr impl_ = nullptr; +}; + +class Net : public NetBase, public std::enable_shared_from_this { + public: + Net(); + virtual ~Net(); + explicit Net(std::string name); + explicit Net(const Graph &g); + /// \brief Define the relation between network inputs and outputs + /// + /// \param[in] inputs expression vector + /// + /// \return expression vector + + virtual std::vector construct(const std::vector &inputs); + /// \brief Addition operation + /// + /// \param[in] inputs Two elements to add + /// + /// \return expression vector (single element) + + /// \brief Execution operator. Connect inputs to outputs via user defined construct + /// + /// \return expression vector + + std::vector operator()(const std::vector &inputs); + void Register(Net *net, std::string &&name); + void Register(Node *node, std::string &&name); + /// \brief Find the trainable params for the trained network + /// + /// \return NodeSet for all trainable nodes + std::shared_ptr trainable_params(); + virtual void Add(NetBase *element); + /// \brief Input shape + /// + /// \param[in] idx input index + /// + /// \return Specific input shape vector + const std::vector InputShape(int idx); + /// \brief Output shape + /// + /// \param[in] idx Output index + /// + /// \return Specific output shape vector + const std::vector OutputShape(int idx); + uint32_t type() final; + + private: + friend NetImpl; + friend NetData; + std::shared_ptr impl_; +}; + +class SoftMaxCrossEntropyCfg { + public: + std::string reduction = "mean"; /**< Specifies reduction mode. The optional values are "none", "mean", "sum" */ +}; + +class AdamConfig { + public: + float learning_rate_ = 1e-3; + float beta1_ = 0.9; + float beta2_ = 0.999; + float eps_ = 1e-08; + bool use_nesterov_ = false; +}; + +namespace NN { +Net *NetWithLoss(Net *net, Node *loss); +Graph *GraphWithLoss(Graph *g, Node *loss); +Node *Adam(std::shared_ptr learn, const AdamConfig &cfg); +Node *SoftmaxCrossEntropy(const SoftMaxCrossEntropyCfg &cfg); +std::unique_ptr Input(std::vector dims, DataType data_type = DataType::kNumberTypeFloat32, int fmt = NHWC); +}; // namespace NN +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_NET_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/serialization.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/serialization.h new file mode 100644 index 0000000..613355d --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/serialization.h @@ -0,0 +1,100 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_SERIALIZATION_H +#define MINDSPORE_INCLUDE_API_SERIALIZATION_H + +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "include/api/graph.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +/// \brief The Serialization class is used to summarize methods for reading and writing model files. +class MS_API Serialization { + public: + /// \brief Loads a model file from memory buffer. + /// + /// \param[in] model_data A buffer filled by model file. + /// \param[in] data_size The size of the buffer. + /// \param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM. + /// \param[out] graph The output parameter, an object saves graph data. + /// \param[in] dec_key The decryption key, key length is 16, 24, or 32. + /// \param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC. + /// + /// \return Status. + inline static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph, + const Key &dec_key = {}, const std::string &dec_mode = kDecModeAesGcm); + + /// \brief Loads a model file from path, is not supported on MindSpore Lite. + /// + /// \param[in] file The path of model file. + /// \param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM. + /// \param[out] graph The output parameter, an object saves graph data. + /// \param[in] dec_key The decryption key, key length is 16, 24, or 32. + /// \param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC. + /// + /// \return Status. + inline static Status Load(const std::string &file, ModelType model_type, Graph *graph, const Key &dec_key = {}, + const std::string &dec_mode = kDecModeAesGcm); + + /// \brief Load multiple models from multiple files, MindSpore Lite does not provide this feature. + /// + /// \param[in] files The path of model files. + /// \param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM. + /// \param[out] graph The output parameter, an object saves graph data. + /// \param[in] dec_key The decryption key, key length is 16, 24, or 32. + /// \param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC. + /// + /// \return Status. + inline static Status Load(const std::vector &files, ModelType model_type, std::vector *graphs, + const Key &dec_key = {}, const std::string &dec_mode = kDecModeAesGcm); + static Status SetParameters(const std::map ¶meters, Model *model); + static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data); + static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file, + QuantizationType quantization_type = kNoQuant, bool export_inference_only = true, + std::vector output_tensor_name = {}); + + private: + static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph, const Key &dec_key, + const std::vector &dec_mode); + static Status Load(const std::vector &file, ModelType model_type, Graph *graph); + static Status Load(const std::vector &file, ModelType model_type, Graph *graph, const Key &dec_key, + const std::vector &dec_mode); + static Status Load(const std::vector> &files, ModelType model_type, std::vector *graphs, + const Key &dec_key, const std::vector &dec_mode); +}; + +Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph, + const Key &dec_key, const std::string &dec_mode) { + return Load(model_data, data_size, model_type, graph, dec_key, StringToChar(dec_mode)); +} + +Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph, const Key &dec_key, + const std::string &dec_mode) { + return Load(StringToChar(file), model_type, graph, dec_key, StringToChar(dec_mode)); +} + +Status Serialization::Load(const std::vector &files, ModelType model_type, std::vector *graphs, + const Key &dec_key, const std::string &dec_mode) { + return Load(VectorStringToChar(files), model_type, graphs, dec_key, StringToChar(dec_mode)); +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/status.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/status.h new file mode 100644 index 0000000..e59f16e --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/status.h @@ -0,0 +1,166 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_STATUS_H +#define MINDSPORE_INCLUDE_API_STATUS_H + +#include +#include +#include +#include +#include +#include "include/api/dual_abi_helper.h" +#include "include/api/types.h" + +namespace mindspore { +enum CompCode : uint32_t { + kCore = 0x00000000u, + kMD = 0x10000000u, + kME = 0x20000000u, + kMC = 0x30000000u, + kLite = 0xF0000000u, +}; + +enum StatusCode : uint32_t { + kSuccess = 0, + // Core + kCoreFailed = kCore | 0x1, + + // MD + kMDOutOfMemory = kMD | 1, + kMDShapeMisMatch = kMD | 2, + kMDInterrupted = kMD | 3, + kMDNoSpace = kMD | 4, + kMDPyFuncException = kMD | 5, + kMDDuplicateKey = kMD | 6, + kMDPythonInterpreterFailure = kMD | 7, + kMDTDTPushFailure = kMD | 8, + kMDFileNotExist = kMD | 9, + kMDProfilingError = kMD | 10, + kMDBoundingBoxOutOfBounds = kMD | 11, + kMDBoundingBoxInvalidShape = kMD | 12, + kMDSyntaxError = kMD | 13, + kMDTimeOut = kMD | 14, + kMDBuddySpaceFull = kMD | 15, + kMDNetWorkError = kMD | 16, + kMDNotImplementedYet = kMD | 17, + // Make this error code the last one. Add new error code above it. + kMDUnexpectedError = kMD | 127, + + // ME + kMEFailed = kME | 0x1, + kMEInvalidInput = kME | 0x2, + + // MC + kMCFailed = kMC | 0x1, + kMCDeviceError = kMC | 0x2, + kMCInvalidInput = kMC | 0x3, + kMCInvalidArgs = kMC | 0x4, + + // Lite // Common error code, range: [-1, -100) + kLiteError = kLite | (0x0FFFFFFF & -1), /**< Common error code. */ + kLiteNullptr = kLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ + kLiteParamInvalid = kLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ + kLiteNoChange = kLite | (0x0FFFFFFF & -4), /**< No change. */ + kLiteSuccessExit = kLite | (0x0FFFFFFF & -5), /**< No error but exit. */ + kLiteMemoryFailed = kLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */ + kLiteNotSupport = kLite | (0x0FFFFFFF & -7), /**< Fail to support. */ + kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ + kLiteUninitializedObj = kLite | (0x0FFFFFFF & -9), /**< Object is not initialized. */ + kLiteFileError = kLite | (0x0FFFFFFF & -10), /**< Invalid file. */ + + // Executor error code, range: [-100,-200) + kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ + kLiteInputTensorError = kLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ + kLiteReentrantError = kLite | (0x0FFFFFFF & -102), /**< Exist executor running. */ + + // Graph error code, range: [-200,-300) + kLiteGraphFileError = kLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ + + // Node error code, range: [-300,-400) + kLiteNotFindOp = kLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */ + kLiteInvalidOpName = kLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */ + kLiteInvalidOpAttr = kLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ + kLiteOpExecuteFailure = kLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ + + // Tensor error code, range: [-400,-500) + kLiteFormatError = kLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ + + // InferShape error code, range: [-500,-600) + kLiteInferError = kLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ + kLiteInferInvalid = kLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ + + // User input param error code, range: [-600, 700) + kLiteInputParamInvalid = kLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ +}; + +class MS_API Status { + public: + Status(); + inline Status(enum StatusCode status_code, const std::string &status_msg = ""); // NOLINT(runtime/explicit) + inline Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); + + ~Status() = default; + + enum StatusCode StatusCode() const; + inline std::string ToString() const; + + int GetLineOfCode() const; + inline std::string GetErrDescription() const; + inline std::string SetErrDescription(const std::string &err_description); + + friend std::ostream &operator<<(std::ostream &os, const Status &s); + + bool operator==(const Status &other) const; + bool operator==(enum StatusCode other_code) const; + bool operator!=(const Status &other) const; + bool operator!=(enum StatusCode other_code) const; + + explicit operator bool() const; + explicit operator int() const; + + static Status OK(); + + bool IsOk() const; + + bool IsError() const; + + static inline std::string CodeAsString(enum StatusCode c); + + private: + // api without std::string + Status(enum StatusCode status_code, const std::vector &status_msg); + Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::vector &extra); + std::vector ToCString() const; + std::vector GetErrDescriptionChar() const; + std::vector SetErrDescription(const std::vector &err_description); + static std::vector CodeAsCString(enum StatusCode c); + + struct Data; + std::shared_ptr data_; +}; + +Status::Status(enum StatusCode status_code, const std::string &status_msg) + : Status(status_code, StringToChar(status_msg)) {} +Status::Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra) + : Status(code, line_of_code, file_name, StringToChar(extra)) {} +std::string Status::ToString() const { return CharToString(ToCString()); } +std::string Status::GetErrDescription() const { return CharToString(GetErrDescriptionChar()); } +std::string Status::SetErrDescription(const std::string &err_description) { + return CharToString(SetErrDescription(StringToChar(err_description))); +} +std::string Status::CodeAsString(enum StatusCode c) { return CharToString(CodeAsCString(c)); } +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_STATUS_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/types.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/types.h new file mode 100644 index 0000000..26e2e8f --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/api/types.h @@ -0,0 +1,360 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_TYPES_H +#define MINDSPORE_INCLUDE_API_TYPES_H + +#include +#include +#include +#include +#include +#include "include/api/data_type.h" +#include "include/api/dual_abi_helper.h" +#include "include/api/format.h" + +#ifndef MS_API +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else +#define MS_API __attribute__((visibility("default"))) +#endif +#endif + +namespace mindspore { +enum ModelType : uint32_t { + kMindIR = 0, + kAIR = 1, + kOM = 2, + kONNX = 3, + kFlatBuffer = 4, + // insert new data type here + kUnknownType = 0xFFFFFFFF +}; + +enum QuantizationType : uint32_t { kNoQuant = 0, kWeightQuant = 1, kFullQuant = 2, kUnknownQuantType = 0xFFFFFFFF }; + +enum OptimizationLevel : uint32_t { + kO0 = 0, // Do not change + kO2 = 2, // Cast network to float16, keep batchnorm and loss in float32, + kO3 = 3, // Cast network to float16, including bacthnorm + kAuto = 4, // Choose optimization based on device + kOptimizationType = 0xFFFFFFFF +}; + +struct QuantParam { + int bit_num; + double scale; + int32_t zero_point; +}; + +class Allocator; +/// \brief The MSTensor class defines a tensor in MindSpore. +class MS_API MSTensor { + public: + class Impl; + /// \brief Creates a MSTensor object, whose data need to be copied before accessed by Model, must be used in pairs + /// with DestroyTensorPtr. + /// + /// \param[in] name The name of the MSTensor. + /// \param[in] type The data type of the MSTensor. + /// \param[in] shape The shape of the MSTensor. + /// \param[in] data The data pointer that points to allocated memory. + /// \param[in] data_len The length of the memory, in bytes. + /// + /// \return A pointer of MSTensor. + static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + + /// \brief Creates a MSTensor object, whose data can be directly accessed by Model, must be used in pairs with + /// DestroyTensorPtr. + /// + /// \param[in] name The name of the MSTensor. + /// \param[in] type The data type of the MSTensor. + /// \param[in] shape The shape of the MSTensor. + /// \param[in] data The data pointer that points to allocated memory. + /// \param[in] data_len The length of the memory, in bytes. + /// + /// \return A pointer of MSTensor. + static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + + /// \brief Creates a MSTensor object, whose device data can be directly accessed by Model, must be used in pairs with + /// DestroyTensorPtr. + /// + /// \param[in] name The name of the MSTensor. + /// \param[in] type The data type of the MSTensor. + /// \param[in] shape The shape of the MSTensor. + /// \param[in] data The data pointer that points to device memory. + /// \param[in] data_len The length of the memory, in bytes. + /// + /// \return A pointer of MSTensor. + static inline MSTensor *CreateDevTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + + /// \brief Creates a MSTensor object from local image file, must be used in pairs with DestroyTensorPtr. + /// + /// \param[in] image_file Path of image file. + /// + /// \return A pointer of MSTensor. + static inline MSTensor *CreateImageTensor(const std::string &image_file) noexcept; + + /// \brief Create a string type MSTensor object whose data can be accessed by Model only after being copied, must be + /// used in pair with DestroyTensorPtr. + /// + /// \param[in] name The name of the MSTensor. + /// \param[in] str A vector container containing several strings. + /// + /// \return A pointer of MSTensor. + static inline MSTensor *StringsToTensor(const std::string &name, const std::vector &str); + + /// \brief Parse the string type MSTensor object into strings. + /// + /// \param[in] tensor A MSTensor object. + /// + /// \return A vector container containing several strings. + static inline std::vector TensorToStrings(const MSTensor &tensor); + + /// \brief Destroy an object created by Clone, StringsToTensor, CreateRefTensor, CreateDevTensor or CreateTensor. Do + /// not use it to destroy MSTensor from other sources. + /// + /// \param[in] tensor A MSTensor object. + static void DestroyTensorPtr(MSTensor *tensor) noexcept; + + MSTensor(); + explicit MSTensor(const std::shared_ptr &impl); + inline MSTensor(const std::string &name, DataType type, const std::vector &shape, const void *data, + size_t data_len); + explicit MSTensor(std::nullptr_t); + ~MSTensor(); + + /// \brief Obtains the name of the MSTensor. + /// + /// \return The name of the MSTensor. + inline std::string Name() const; + + /// \brief Obtains the data type of the MSTensor. + /// + /// \return The data type of the MSTensor. + enum DataType DataType() const; + + /// \brief Obtains the shape of the MSTensor. + /// + /// \return The shape of the MSTensor. + const std::vector &Shape() const; + + /// \brief Obtains the number of elements of the MSTensor. + /// + /// \return The number of elements of the MSTensor. + int64_t ElementNum() const; + + /// \brief Obtains a shared pointer to the copy of data of the MSTensor. The data can be read on host. + /// + /// \return A shared pointer to the copy of data of the MSTensor. + std::shared_ptr Data() const; + + /// \brief Obtains the pointer to the data of the MSTensor. If the MSTensor is a device tensor, the data cannot be + /// accessed directly on host. + /// + /// \return A pointer to the data of the MSTensor. + void *MutableData(); + + /// \brief Obtains the length of the data of the MSTensor, in bytes. + /// + /// \return The length of the data of the MSTensor, in bytes. + size_t DataSize() const; + + /// \brief Get whether the MSTensor data is const data + /// + /// \return Const flag of MSTensor + bool IsConst() const; + + /// \brief Gets the boolean value that indicates whether the memory of MSTensor is on device. + /// + /// \return The boolean value that indicates whether the memory of MSTensor is on device. + bool IsDevice() const; + + /// \brief Gets a deep copy of the MSTensor, must be used in pair with DestroyTensorPtr. + /// + /// \return A pointer points to a deep copy of the MSTensor. + MSTensor *Clone() const; + + /// \brief Gets the boolean value that indicates whether the MSTensor is valid. + /// + /// \return The boolean value that indicates whether the MSTensor is valid. + bool operator==(std::nullptr_t) const; + + /// \brief Gets the boolean value that indicates whether the MSTensor is valid. + /// + /// \return The boolean value that indicates whether the MSTensor is valid. + bool operator!=(std::nullptr_t) const; + + /// \brief Get the boolean value that indicates whether the MSTensor equals tensor. + /// + /// \param[in] another MSTensor. + /// + /// \return The boolean value that indicates whether the MSTensor equals tensor. + bool operator==(const MSTensor &tensor) const; + + /// \brief Set the shape of for the MSTensor. Only valid for Lite. + /// + /// \param[in] Shape of the MSTensor, a vector of int64_t. + void SetShape(const std::vector &shape); + + /// \brief Set the data type for the MSTensor. Only valid for Lite. + /// + /// \param[in] The data type of the MSTensor. + void SetDataType(enum DataType data_type); + + /// \brief Set the name for the MSTensor. Only valid for Lite. + /// + /// \param[in] The name of the MSTensor. + void SetTensorName(const std::string &name); + + /// \brief Set the Allocator for the MSTensor. Only valid for Lite. + /// + /// \param[in] A pointer to Allocator. + void SetAllocator(std::shared_ptr allocator); + + /// \brief Obtain the Allocator of the MSTensor. Only valid for Lite. + /// + /// \return A pointer to Allocator. + std::shared_ptr allocator() const; + + /// \brief Set the format for the MSTensor. Only valid for Lite. + /// + /// \param[in] The format of the MSTensor. + void SetFormat(mindspore::Format format); + + /// \brief Obtain the format of the MSTensor. Only valid for Lite. + /// + /// \return The format of the MSTensor. + mindspore::Format format() const; + + /// \brief Set the data for the MSTensor. Only valid for Lite. + /// + /// \param[in] A pointer to the data of the MSTensor. + void SetData(void *data); + + /// \brief Get the quantization parameters of the MSTensor. Only valid for Lite. + /// + /// \return The quantization parameters of the MSTensor. + std::vector QuantParams() const; + + /// \brief Set the quantization parameters for the MSTensor. Only valid for Lite. + /// + /// \param[in] The quantization parameters of the MSTensor. + void SetQuantParams(std::vector quant_params); + + const std::shared_ptr impl() const { return impl_; } + + private: + // api without std::string + static MSTensor *CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor *CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor *CreateDevTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor *CreateImageTensor(const std::vector &image_file) noexcept; + static MSTensor *CharStringsToTensor(const std::vector &name, const std::vector> &str); + static std::vector> TensorToStringChars(const MSTensor &tensor); + + MSTensor(const std::vector &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len); + std::vector CharName() const; + + friend class ModelImpl; + std::shared_ptr impl_; +}; + +class MS_API Buffer { + public: + Buffer(); + Buffer(const void *data, size_t data_len); + ~Buffer(); + + const void *Data() const; + void *MutableData(); + size_t DataSize() const; + + bool ResizeData(size_t data_len); + bool SetData(const void *data, size_t data_len); + + Buffer Clone() const; + + private: + class Impl; + std::shared_ptr impl_; +}; + +MSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + return CreateTensor(StringToChar(name), type, shape, data, data_len); +} + +MSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + return CreateRefTensor(StringToChar(name), type, shape, data, data_len); +} + +MSTensor *MSTensor::CreateDevTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + return CreateDevTensor(StringToChar(name), type, shape, data, data_len); +} + +MSTensor *MSTensor::CreateImageTensor(const std::string &image_file) noexcept { + return CreateImageTensor(StringToChar(image_file)); +} + +MSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector &str) { + return CharStringsToTensor(StringToChar(name), VectorStringToChar(str)); +} + +std::vector MSTensor::TensorToStrings(const MSTensor &tensor) { + return VectorCharToString(TensorToStringChars(tensor)); +} + +MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : MSTensor(StringToChar(name), type, shape, data, data_len) {} + +std::string MSTensor::Name() const { return CharToString(CharName()); } + +using Key = struct Key { + const size_t max_key_len = 32; + size_t len; + unsigned char key[32]; + Key() : len(0) {} + explicit Key(const char *dec_key, size_t key_len); +}; + +constexpr char kDecModeAesGcm[] = "AES-GCM"; + +/// \brief CallBackParam defined input arguments for callBack function. +struct MSCallBackParam { + std::string node_name; /**< node name argument */ + std::string node_type; /**< node type argument */ +}; + +/// \brief KernelCallBack defined the function pointer for callBack. +using MSKernelCallBack = std::function &inputs, const std::vector &outputs, + const MSCallBackParam &opInfo)>; + +std::vector CharVersion(); +inline std::string Version() { return CharToString(CharVersion()); } + +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_TYPES_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/context_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/context_c.h new file mode 100644 index 0000000..980b55b --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/context_c.h @@ -0,0 +1,179 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_CONTEXT_C_H +#define MINDSPORE_INCLUDE_C_API_CONTEXT_C_H + +#include +#include +#include +#include "include/c_api/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *MSContextHandle; +typedef void *MSDeviceInfoHandle; + +/// \brief Create a context object. +/// +/// \return Context object handle. +MS_API MSContextHandle MSContextCreate(); + +/// \brief Destroy the context object. +/// +/// \param[in] context Context object handle address. +MS_API void MSContextDestroy(MSContextHandle *context); + +/// \brief Set the number of threads at runtime. +/// +/// \param[in] context Context object handle. +/// \param[in] thread_num the number of threads at runtime. +MS_API void MSContextSetThreadNum(MSContextHandle context, int32_t thread_num); + +/// \brief Obtain the current thread number setting. +/// +/// \param[in] context Context object handle. +/// +/// \return The current thread number setting. +MS_API int32_t MSContextGetThreadNum(const MSContextHandle context); + +/// \brief Set the thread affinity to CPU cores. +/// +/// \param[in] context Context object handle. +/// \param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first +MS_API void MSContextSetThreadAffinityMode(MSContextHandle context, int mode); + +/// \brief Obtain the thread affinity of CPU cores. +/// +/// \param[in] context Context object handle. +/// +/// \return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first +MS_API int MSContextGetThreadAffinityMode(const MSContextHandle context); + +/// \brief Set the thread lists to CPU cores. +/// +/// \note If core_list and mode are set by MSContextSetThreadAffinityMode at the same time, +/// the core_list is effective, but the mode is not effective. +/// +/// \param[in] context Context object handle. +/// \param[in] core_list: a array of thread core lists. +/// \param[in] core_num The number of core. +MS_API void MSContextSetThreadAffinityCoreList(MSContextHandle context, const int32_t *core_list, size_t core_num); + +/// \brief Obtain the thread lists of CPU cores. +/// +/// \param[in] context Context object handle. +/// \param[out] core_num The number of core. +/// +/// \return a array of thread core lists. +MS_API const int32_t *MSContextGetThreadAffinityCoreList(const MSContextHandle context, size_t *core_num); + +/// \brief Set the status whether to perform model inference or training in parallel. +/// +/// \param[in] context Context object handle. +/// \param[in] is_parallel: true, parallel; false, not in parallel. +MS_API void MSContextSetEnableParallel(MSContextHandle context, bool is_parallel); + +/// \brief Obtain the status whether to perform model inference or training in parallel. +/// +/// \param[in] context Context object handle. +/// +/// \return Bool value that indicates whether in parallel. +MS_API bool MSContextGetEnableParallel(const MSContextHandle context); + +/// \brief Add device info to context object. +/// +/// \param[in] context Context object handle. +/// \param[in] device_info Device info object handle. +MS_API void MSContextAddDeviceInfo(MSContextHandle context, MSDeviceInfoHandle device_info); + +/// \brief Create a device info object. +/// +/// \param[in] device_info Device info object handle. +/// +/// \return Device info object handle. +MS_API MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type); + +/// \brief Destroy the device info object. +/// +/// \param[in] device_info Device info object handle address. +MS_API void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info); + +/// \brief Set provider's name. +/// +/// \param[in] device_info Device info object handle. +/// \param[in] provider define the provider's name. +MS_API void MSDeviceInfoSetProvider(MSDeviceInfoHandle device_info, const char *provider); + +/// \brief Obtain provider's name +/// +/// \param[in] device_info Device info object handle. +/// +/// \return provider's name. +MS_API const char *MSDeviceInfoGetProvider(const MSDeviceInfoHandle device_info); + +/// \brief Set provider's device type. +/// +/// \param[in] device_info Device info object handle. +/// \param[in] device define the provider's device type. EG: CPU. +MS_API void MSDeviceInfoSetProviderDevice(MSDeviceInfoHandle device_info, const char *device); + +/// \brief Obtain provider's device type. +/// +/// \param[in] device_info Device info object handle. +/// +/// \return provider's device type. +MS_API const char *MSDeviceInfoGetProviderDevice(const MSDeviceInfoHandle device_info); + +/// \brief Obtain the device type of the device info. +/// +/// \param[in] device_info Device info object handle. +/// +/// \return Device Type of the device info. +MS_API MSDeviceType MSDeviceInfoGetDeviceType(const MSDeviceInfoHandle device_info); + +/// \brief Set enables to perform the float16 inference, Only valid for CPU/GPU. +/// +/// \param[in] device_info Device info object handle. +/// \param[in] is_fp16 Enable float16 inference or not. +MS_API void MSDeviceInfoSetEnableFP16(MSDeviceInfoHandle device_info, bool is_fp16); + +/// \brief Obtain enables to perform the float16 inference, Only valid for CPU/GPU. +/// +/// \param[in] device_info Device info object handle. +/// +/// \return Whether enable float16 inference. +MS_API bool MSDeviceInfoGetEnableFP16(const MSDeviceInfoHandle device_info); + +/// \brief Set the NPU frequency, Only valid for NPU. +/// +/// \param[in] device_info Device info object handle. +/// \param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme +/// performance), default as 3. +MS_API void MSDeviceInfoSetFrequency(MSDeviceInfoHandle device_info, int frequency); + +/// \brief Obtain the NPU frequency, Only valid for NPU. +/// +/// \param[in] device_info Device info object handle. +/// +/// \return NPU frequency +MS_API int MSDeviceInfoGetFrequency(const MSDeviceInfoHandle device_info); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_CONTEXT_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/data_type_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/data_type_c.h new file mode 100644 index 0000000..3b736e0 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/data_type_c.h @@ -0,0 +1,52 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_DATA_TYPE_C_H +#define MINDSPORE_INCLUDE_C_API_DATA_TYPE_C_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum MSDataType { + kMSDataTypeUnknown = 0, + kMSDataTypeObjectTypeString = 12, + kMSDataTypeObjectTypeList = 13, + kMSDataTypeObjectTypeTuple = 14, + kMSDataTypeObjectTypeTensor = 17, + kMSDataTypeNumberTypeBegin = 29, + kMSDataTypeNumberTypeBool = 30, + kMSDataTypeNumberTypeInt8 = 32, + kMSDataTypeNumberTypeInt16 = 33, + kMSDataTypeNumberTypeInt32 = 34, + kMSDataTypeNumberTypeInt64 = 35, + kMSDataTypeNumberTypeUInt8 = 37, + kMSDataTypeNumberTypeUInt16 = 38, + kMSDataTypeNumberTypeUInt32 = 39, + kMSDataTypeNumberTypeUInt64 = 40, + kMSDataTypeNumberTypeFloat16 = 42, + kMSDataTypeNumberTypeFloat32 = 43, + kMSDataTypeNumberTypeFloat64 = 44, + kMSDataTypeNumberTypeEnd = 46, + // add new enum here + kMSDataTypeInvalid = INT32_MAX, +} MSDataType; + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_DATA_TYPE_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/format_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/format_c.h new file mode 100644 index 0000000..7b73dab --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/format_c.h @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_FORMAT_C_H +#define MINDSPORE_INCLUDE_C_API_FORMAT_C_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum MSFormat { + kMSFormatNCHW = 0, + kMSFormatNHWC = 1, + kMSFormatNHWC4 = 2, + kMSFormatHWKC = 3, + kMSFormatHWCK = 4, + kMSFormatKCHW = 5, + kMSFormatCKHW = 6, + kMSFormatKHWC = 7, + kMSFormatCHWK = 8, + kMSFormatHW = 9, + kMSFormatHW4 = 10, + kMSFormatNC = 11, + kMSFormatNC4 = 12, + kMSFormatNC4HW4 = 13, + kMSFormatNCDHW = 15, + kMSFormatNWC = 16, + kMSFormatNCW = 17 +} MSFormat; + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_FORMAT_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/model_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/model_c.h new file mode 100644 index 0000000..ddd31b5 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/model_c.h @@ -0,0 +1,144 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_MODEL_C_H +#define MINDSPORE_INCLUDE_C_API_MODEL_C_H + +#include "include/c_api/tensor_c.h" +#include "include/c_api/context_c.h" +#include "include/c_api/status_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *MSModelHandle; + +typedef struct MSTensorHandleArray { + size_t handle_num; + MSTensorHandle *handle_list; +} MSTensorHandleArray; + +#define MS_MAX_SHAPE_NUM 32 +typedef struct MSShapeInfo { + size_t shape_num; + int64_t shape[MS_MAX_SHAPE_NUM]; +} MSShapeInfo; + +typedef struct MSCallBackParamC { + char *node_name; + char *node_type; +} MSCallBackParamC; + +typedef bool (*MSKernelCallBackC)(const MSTensorHandleArray inputs, const MSTensorHandleArray outputs, + const MSCallBackParamC kernel_Info); + +/// \brief Create a model object. Only valid for Lite. +/// +/// \return Model object handle. +MS_API MSModelHandle MSModelCreate(); + +/// \brief Destroy the model object. Only valid for Lite. +/// +/// \param[in] model Model object handle address. +MS_API void MSModelDestroy(MSModelHandle *model); + +/// \brief Set workspace for the model object. Only valid for Iot. +/// +/// \param[in] model Model object handle. +/// \param[in] workspace Define the workspace address. +/// \param[in] workspace_size Define the workspace size. +MS_API void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size); + +/// \brief Build the model from model file buffer so that it can run on a device. Only valid for Lite. +/// +/// \param[in] model Model object handle. +/// \param[in] model_data Define the buffer read from a model file. +/// \param[in] data_size Define bytes number of model file buffer. +/// \param[in] model_type Define The type of model file. +/// \param[in] model_context Define the context used to store options during execution. +/// +/// \return MSStatus. +MS_API MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t data_size, MSModelType model_type, + const MSContextHandle model_context); + +/// \brief Load and build the model from model path so that it can run on a device. Only valid for Lite. +/// +/// \param[in] model Model object handle. +/// \param[in] model_path Define the model file path. +/// \param[in] model_type Define The type of model file. +/// \param[in] model_context Define the context used to store options during execution. +/// +/// \return MSStatus. +MS_API MSStatus MSModelBuildFromFile(MSModelHandle model, const char *model_path, MSModelType model_type, + const MSContextHandle model_context); + +/// \brief Resizes the shapes of inputs. +/// +/// \param[in] model Model object handle. +/// \param[in] inputs The array that includes all input tensor handles. +/// \param[in] shape_infos Defines the new shapes of inputs, should be consistent with inputs. +/// \param[in] shape_info_num The num of shape_infos. +/// +/// \return MSStatus. +MS_API MSStatus MSModelResize(MSModelHandle model, const MSTensorHandleArray inputs, MSShapeInfo *shape_infos, + size_t shape_info_num); + +/// \brief Inference model. +/// +/// \param[in] model Model object handle. +/// \param[in] inputs The array that includes all input tensor handles. +/// \param[out] outputs The array that includes all output tensor handles. +/// \param[in] before CallBack before predict. +/// \param[in] after CallBack after predict. +/// +/// \return MSStatus. +MS_API MSStatus MSModelPredict(MSModelHandle model, const MSTensorHandleArray inputs, MSTensorHandleArray *outputs, + const MSKernelCallBackC before, const MSKernelCallBackC after); + +/// \brief Obtains all input tensor handles of the model. +/// +/// \param[in] model Model object handle. +/// +/// \return The array that includes all input tensor handles. +MS_API MSTensorHandleArray MSModelGetInputs(const MSModelHandle model); + +/// \brief Obtains all output tensor handles of the model. +/// +/// \param[in] model Model object handle. +/// +/// \return The array that includes all output tensor handles. +MS_API MSTensorHandleArray MSModelGetOutputs(const MSModelHandle model); + +/// \brief Obtains the input tensor handle of the model by name. +/// +/// \param[in] model Model object handle. +/// \param[in] tensor_name The name of tensor. +/// +/// \return The input tensor handle with the given name, if the name is not found, an NULL is returned. +MS_API MSTensorHandle MSModelGetInputByTensorName(const MSModelHandle model, const char *tensor_name); + +/// \brief Obtains the output tensor handle of the model by name. +/// +/// \param[in] model Model object handle. +/// \param[in] tensor_name The name of tensor. +/// +/// \return The output tensor handle with the given name, if the name is not found, an NULL is returned. +MS_API MSTensorHandle MSModelGetOutputByTensorName(const MSModelHandle model, const char *tensor_name); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_MODEL_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/status_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/status_c.h new file mode 100644 index 0000000..62b19b9 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/status_c.h @@ -0,0 +1,76 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_STATUS_C_H +#define MINDSPORE_INCLUDE_C_API_STATUS_C_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +enum MSCompCode { + kMSCompCodeCore = 0x00000000u, + kMSCompCodeMD = 0x10000000u, + kMSCompCodeME = 0x20000000u, + kMSCompCodeMC = 0x30000000u, + kMSCompCodeLite = 0xF0000000u, +}; + +typedef enum MSStatus { + kMSStatusSuccess = 0, + // Core + kMSStatusCoreFailed = kMSCompCodeCore | 0x1, + + // Lite // Common error code, range: [-1, -100) + kMSStatusLiteError = kMSCompCodeLite | (0x0FFFFFFF & -1), /**< Common error code. */ + kMSStatusLiteNullptr = kMSCompCodeLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ + kMSStatusLiteParamInvalid = kMSCompCodeLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ + kMSStatusLiteNoChange = kMSCompCodeLite | (0x0FFFFFFF & -4), /**< No change. */ + kMSStatusLiteSuccessExit = kMSCompCodeLite | (0x0FFFFFFF & -5), /**< No error but exit. */ + kMSStatusLiteMemoryFailed = kMSCompCodeLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */ + kMSStatusLiteNotSupport = kMSCompCodeLite | (0x0FFFFFFF & -7), /**< Fail to support. */ + kMSStatusLiteThreadPoolError = kMSCompCodeLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ + kMSStatusLiteUninitializedObj = kMSCompCodeLite | (0x0FFFFFFF & -9), /**< Object is not initialized. */ + + // Executor error code, range: [-100,-200) + kMSStatusLiteOutOfTensorRange = kMSCompCodeLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ + kMSStatusLiteInputTensorError = kMSCompCodeLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ + kMSStatusLiteReentrantError = kMSCompCodeLite | (0x0FFFFFFF & -102), /**< Exist executor running. */ + + // Graph error code, range: [-200,-300) + kMSStatusLiteGraphFileError = kMSCompCodeLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ + + // Node error code, range: [-300,-400) + kMSStatusLiteNotFindOp = kMSCompCodeLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */ + kMSStatusLiteInvalidOpName = kMSCompCodeLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */ + kMSStatusLiteInvalidOpAttr = kMSCompCodeLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ + kMSStatusLiteOpExecuteFailure = kMSCompCodeLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ + + // Tensor error code, range: [-400,-500) + kMSStatusLiteFormatError = kMSCompCodeLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ + + // InferShape error code, range: [-500,-600) + kMSStatusLiteInferError = kMSCompCodeLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ + kMSStatusLiteInferInvalid = kMSCompCodeLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ + + // User input param error code, range: [-600, 700) + kMSStatusLiteInputParamInvalid = kMSCompCodeLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ +} MSStatus; +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_STATUS_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/tensor_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/tensor_c.h new file mode 100644 index 0000000..9783bd9 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/tensor_c.h @@ -0,0 +1,146 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_TENSOE_C_H +#define MINDSPORE_INCLUDE_C_API_TENSOE_C_H + +#include +#include "include/c_api/types_c.h" +#include "include/c_api/data_type_c.h" +#include "include/c_api/format_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *MSTensorHandle; + +/// \brief Create a tensor object. +/// +/// \param[in] name The name of the tensor. +/// \param[in] type The data type of the tensor. +/// \param[in] shape The shape of the tensor. +/// \param[in] shape_num The num of the shape. +/// \param[in] data The data pointer that points to allocated memory. +/// \param[in] data_len The length of the memory, in bytes. +/// +/// \return Tensor object handle. +MS_API MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const int64_t *shape, size_t shape_num, + const void *data, size_t data_len); + +/// \brief Destroy the tensor object. +/// +/// \param[in] tensor Tensor object handle address. +MS_API void MSTensorDestroy(MSTensorHandle *tensor); + +/// \brief Obtain a deep copy of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return Tensor object handle. +MS_API MSTensorHandle MSTensorClone(MSTensorHandle tensor); + +/// \brief Set the name for the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// \param[in] name The name of the tensor. +MS_API void MSTensorSetName(MSTensorHandle tensor, const char *name); + +/// \brief Obtain the name of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The name of the tensor. +MS_API const char *MSTensorGetName(const MSTensorHandle tensor); + +/// \brief Set the data type for the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// \param[in] type The data type of the tensor. +MS_API void MSTensorSetDataType(MSTensorHandle tensor, MSDataType type); + +/// \brief Obtain the data type of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The date type of the tensor. +MS_API MSDataType MSTensorGetDataType(const MSTensorHandle tensor); + +/// \brief Set the shape for the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// \param[in] shape The shape array. +/// \param[in] shape_num Dimension of shape. +MS_API void MSTensorSetShape(MSTensorHandle tensor, const int64_t *shape, size_t shape_num); + +/// \brief Obtain the shape of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// \param[out] shape_num Dimension of shape. +/// +/// \return The shape array of the tensor. +MS_API const int64_t *MSTensorGetShape(const MSTensorHandle tensor, size_t *shape_num); + +/// \brief Set the format for the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// \param[in] format The format of the tensor. +MS_API void MSTensorSetFormat(MSTensorHandle tensor, MSFormat format); + +/// \brief Obtain the format of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The format of the tensor. +MS_API MSFormat MSTensorGetFormat(const MSTensorHandle tensor); + +/// \brief Obtain the data for the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// \param[in] data A pointer to the data of the tensor. +MS_API void MSTensorSetData(MSTensorHandle tensor, void *data); + +/// \brief Obtain the data pointer of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The data pointer of the tensor. +MS_API const void *MSTensorGetData(const MSTensorHandle tensor); + +/// \brief Obtain the mutable data pointer of the tensor. If the internal data is empty, it will allocate memory. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The data pointer of the tensor. +MS_API void *MSTensorGetMutableData(const MSTensorHandle tensor); + +/// \brief Obtain the element number of the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The element number of the tensor. +MS_API int64_t MSTensorGetElementNum(const MSTensorHandle tensor); + +/// \brief Obtain the data size fo the tensor. +/// +/// \param[in] tensor Tensor object handle. +/// +/// \return The data size of the tensor. +MS_API size_t MSTensorGetDataSize(const MSTensorHandle tensor); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_TENSOE_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/types_c.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/types_c.h new file mode 100644 index 0000000..342e24b --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/c_api/types_c.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_C_API_TYPES_C_H +#define MINDSPORE_INCLUDE_C_API_TYPES_C_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef MS_API +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else +#define MS_API __attribute__((visibility("default"))) +#endif +#endif + +typedef enum MSModelType { + kMSModelTypeMindIR = 0, + // insert new data type here + kMSModelTypeInvalid = 0xFFFFFFFF +} MSModelType; + +typedef enum MSDeviceType { + kMSDeviceTypeCPU = 0, + kMSDeviceTypeGPU, + kMSDeviceTypeKirinNPU, + // add new type here + kMSDeviceTypeInvalid = 100, +} MSDeviceType; + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_INCLUDE_C_API_TYPES_C_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/context.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/context.h new file mode 100644 index 0000000..c9a2139 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/context.h @@ -0,0 +1,78 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_CONTEXT_H_ +#define MINDSPORE_LITE_INCLUDE_CONTEXT_H_ +#include +#include "include/ms_tensor.h" +#include "include/lite_utils.h" +#include "include/lite_types.h" + +namespace mindspore::lite { +/// \brief CpuDeviceInfo defined for CPU's configuration information. +typedef struct CpuDeviceInfo { + bool enable_float16_ = false; /**< prior enable float16 inference */ + CpuBindMode cpu_bind_mode_ = MID_CPU; +} CpuDeviceInfo; + +/// \brief GpuDeviceInfo defined for GPU's configuration information. +typedef struct GpuDeviceInfo { + bool enable_float16_ = false; /**< prior enable float16 inference */ + uint32_t gpu_device_id_ = 0; +} GpuDeviceInfo; + +/// \brief NpuDeviceInfo defined for NPU's configuration information. +typedef struct NpuDeviceInfo { + int frequency_ = 3; /**< npu frequency inference, low 1, medium 2, high 3, extreme 4, other values will be set to 3 */ +} NpuDeviceInfo; + +/// \brief Ascend310DeviceInfo defined for Ascend's configuration information. +typedef struct AscendDeviceInfo { + uint32_t device_id_; +} AscendDeviceInfo; +/// \brief DeviceInfo defined for backend's configuration information. +struct DeviceInfo { + CpuDeviceInfo cpu_device_info_; + GpuDeviceInfo gpu_device_info_; + NpuDeviceInfo npu_device_info_; + AscendDeviceInfo ascend310_device_info_; +}; + +/// \brief DeviceContext defined for holding backend's configuration information. +struct DeviceContext { + DeviceType device_type_ = DT_CPU; + DeviceInfo device_info_; + std::string provider_{}; + std::string provider_device_{}; + AllocatorPtr allocator_ = nullptr; +}; + +/// \brief Context defined for holding environment variables during runtime. +struct Context { + String vendor_name_; + int thread_num_ = 2; /**< thread number config for thread pool */ + bool enable_parallel_ = false; + Vector affinity_core_list_; /**< explicitly specify the core to be bound. priority use affinity core list */ + AllocatorPtr allocator = nullptr; +#ifndef NOT_USE_STL + DeviceContextVector device_list_ = {{DT_CPU, {false, MID_CPU}}}; +#else + DeviceContextVector device_list_; +#endif // NOT_USE_STL + DelegatePtr delegate = nullptr; +}; +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/constants.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/constants.h new file mode 100644 index 0000000..f73d9d1 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/constants.h @@ -0,0 +1,264 @@ +/** + * Copyright 2019-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_CONSTANTS_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_CONSTANTS_H_ + +#include +#include +#include + +namespace mindspore { +namespace dataset { +// Various type defines for convenience +using uchar = unsigned char; +using dsize_t = int64_t; + +/// \brief The color conversion code +enum class ConvertMode { + COLOR_BGR2BGRA = 0, ///< Add alpha channel to BGR image. + COLOR_RGB2RGBA = COLOR_BGR2BGRA, ///< Add alpha channel to RGB image. + COLOR_BGRA2BGR = 1, ///< Remove alpha channel to BGR image. + COLOR_RGBA2RGB = COLOR_BGRA2BGR, ///< Remove alpha channel to RGB image. + COLOR_BGR2RGBA = 2, ///< Convert BGR image to RGBA image. + COLOR_RGB2BGRA = COLOR_BGR2RGBA, ///< Convert RGB image to BGRA image. + COLOR_RGBA2BGR = 3, ///< Convert RGBA image to BGR image. + COLOR_BGRA2RGB = COLOR_RGBA2BGR, ///< Convert BGRA image to RGB image. + COLOR_BGR2RGB = 4, ///< Convert BGR image to RGB image. + COLOR_RGB2BGR = COLOR_BGR2RGB, ///< Convert RGB image to BGR image. + COLOR_BGRA2RGBA = 5, ///< Convert BGRA image to RGBA image. + COLOR_RGBA2BGRA = COLOR_BGRA2RGBA, ///< Convert RGBA image to BGRA image. + COLOR_BGR2GRAY = 6, ///< Convert BGR image to GRAY image. + COLOR_RGB2GRAY = 7, ///< Convert RGB image to GRAY image. + COLOR_GRAY2BGR = 8, ///< Convert GRAY image to BGR image. + COLOR_GRAY2RGB = COLOR_GRAY2BGR, ///< Convert GRAY image to RGB image. + COLOR_GRAY2BGRA = 9, ///< Convert GRAY image to BGRA image. + COLOR_GRAY2RGBA = COLOR_GRAY2BGRA, ///< Convert GRAY image to RGBA image. + COLOR_BGRA2GRAY = 10, ///< Convert BGRA image to GRAY image. + COLOR_RGBA2GRAY = 11 ///< Convert RGBA image to GRAY image. +}; + +/// \brief Target devices to perform map operation. +enum class MapTargetDevice { + kCpu, ///< CPU Device. + kGpu, ///< Gpu Device. + kAscend310 ///< Ascend310 Device. +}; + +/// \brief The initial type of tensor implementation. +enum class TensorImpl { + kNone, ///< None type tensor. + kFlexible, ///< Flexible type tensor, can be converted to any type. + kCv, ///< CV type tensor. + kNP ///< Numpy type tensor. +}; + +/// \brief The mode for shuffling data. +enum class ShuffleMode { + kFalse = 0, ///< No shuffling is performed. + kFiles = 1, ///< Shuffle files only. + kGlobal = 2, ///< Shuffle both the files and samples. + kInfile = 3 ///< Shuffle data within each file. +}; + +/// \brief Possible scale for input audio. +enum class ScaleType { + kMagnitude = 0, ///< Audio scale is magnitude. + kPower = 1, ///< Audio scale is power. +}; + +/// \brief The scale for gain type. +enum class GainType { + kAmplitude = 0, ///< Audio gain type is amplitude. + kPower = 1, ///< Audio gain type is power. + kDb = 2, ///< Audio gain type is db. +}; + +/// \brief The method of padding. +enum class BorderType { + kConstant = 0, ///< Fill the border with constant values. + kEdge = 1, ///< Fill the border with the last value on the edge. + kReflect = 2, ///< Reflect the values on the edge omitting the last value of edge. + kSymmetric = 3 ///< Reflect the values on the edge repeating the last value of edge. +}; + +/// \brief Possible fix rotation angle for Rotate Op. +enum class FixRotationAngle { + k0Degree = 1, ///< Rotate 0 degree. + k0DegreeAndMirror = 2, ///< Rotate 0 degree and apply horizontal flip. + k180Degree = 3, ///< Rotate 180 degree. + k180DegreeAndMirror = 4, ///< Rotate 180 degree and apply horizontal flip. + k90DegreeAndMirror = 5, ///< Rotate 90 degree and apply horizontal flip. + k90Degree = 6, ///< Rotate 90 degree. + k270DegreeAndMirror = 7, ///< Rotate 270 degree and apply horizontal flip. + k270Degree = 8, ///< Rotate 270 degree. +}; + +/// \brief Possible options for Image format types in a batch. +enum class ImageBatchFormat { + kNHWC = 0, ///< Indicate the input batch is of NHWC format. + kNCHW = 1 ///< Indicate the input batch is of NCHW format. +}; + +/// \brief Possible options for Image format types. +enum class ImageFormat { + HWC = 0, ///< Indicate the input batch is of NHWC format + CHW = 1, ///< Indicate the input batch is of NHWC format + HW = 2 ///< Indicate the input batch is of NHWC format +}; + +/// \brief Possible options for interpolation method. +enum class InterpolationMode { + kLinear = 0, ///< Interpolation method is linear interpolation. + kNearestNeighbour = 1, ///< Interpolation method is nearest-neighbor interpolation. + kCubic = 2, ///< Interpolation method is bicubic interpolation. + kArea = 3, ///< Interpolation method is pixel area interpolation. + kCubicPil = 4 ///< Interpolation method is bicubic interpolation like implemented in pillow. +}; + +/// \brief Possible tokenize modes for JiebaTokenizer. +enum class JiebaMode { + kMix = 0, ///< Tokenize with MPSegment algorithm. + kMp = 1, ///< Tokenize with Hiddel Markov Model Segment algorithm. + kHmm = 2 ///< Tokenize with a mix of MPSegment and HMMSegment algorithm. +}; + +/// \brief Possible options for SPieceTokenizerOutType. +enum class SPieceTokenizerOutType { + kString = 0, ///< Output of sentencepiece tokenizer is string type. + kInt = 1 ///< Output of sentencepiece tokenizer is int type. +}; + +/// \brief Possible options for SPieceTokenizerLoadType. +enum class SPieceTokenizerLoadType { + kFile = 0, ///< Load sentencepiece tokenizer from local sentencepiece vocab file. + kModel = 1 ///< Load sentencepiece tokenizer from sentencepiece vocab instance. +}; + +/// \brief Type options for SentencePiece Model. +enum class SentencePieceModel { + kUnigram = 0, ///< Based on Unigram model. + kBpe = 1, ///< Based on Byte Pair Encoding (BPE) model. + kChar = 2, ///< Based on Char model. + kWord = 3 ///< Based on Word model. +}; + +/// \brief Possible options to specify a specific normalize mode. +enum class NormalizeForm { + kNone = 0, ///< Keep the input string tensor unchanged. + kNfc, ///< Normalize with Normalization Form C. + kNfkc, ///< Normalize with Normalization Form KC. + kNfd, ///< Normalize with Normalization Form D. + kNfkd, ///< Normalize with Normalization Form KD. +}; + +/// \brief Possible options for Mask. +enum class RelationalOp { + kEqual = 0, ///< equal to `==` + kNotEqual, ///< equal to `!=` + kLess, ///< equal to `<` + kLessEqual, ///< equal to `<=` + kGreater, ///< equal to `>` + kGreaterEqual, ///< equal to `>=` +}; + +/// \brief Possible modes for slice patches. +enum class SliceMode { + kPad = 0, ///< Pad some pixels before slice to patches. + kDrop = 1, ///< Drop remainder pixels before slice to patches. +}; + +/// \brief Possible options for SamplingStrategy. +enum class SamplingStrategy { + kRandom = 0, ///< Random sampling with replacement. + kEdgeWeight = 1 ///< Sampling with edge weight as probability. +}; + +/// \brief Possible values for output format in get all neighbors function of gnn dataset +enum class OutputFormat { + kNormal = 0, ///< Normal format. + kCoo = 1, ///< COO format. + kCsr = 2 ///< CSR format. +}; + +/// \brief Possible options for fade shape. +enum class FadeShape { + kLinear = 0, ///< Fade shape is linear mode. + kExponential = 1, ///< Fade shape is exponential mode. + kLogarithmic = 2, ///< Fade shape is logarithmic mode. + kQuarterSine = 3, ///< Fade shape is quarter_sine mode. + kHalfSine = 4, ///< Fade shape is half_sine mode. +}; + +/// \brief Convenience function to check bitmask for a 32bit int +/// \param[in] bits a 32bit int to be tested +/// \param[in] bitMask a 32bit int representing bit mask +/// \return bool Result for the check +inline bool BitTest(uint32_t bits, uint32_t bitMask) { return (bits & bitMask) == bitMask; } + +/// \brief Convenience function to set bitmask for a 32bit int +/// \param[in] bits a 32bit int to deal with +/// \param[in] bitMask a 32bit int representing bit mask +inline void BitSet(uint32_t *bits, uint32_t bitMask) { + if (bits == nullptr) { + return; + } + *bits |= bitMask; +} + +/// \brief Convenience function to clear bitmask from a 32bit int +/// \param[in] bits a 32bit int to deal with +/// \param[in] bitMask a 32bit int representing bit mask +inline void BitClear(uint32_t *bits, uint32_t bitMask) { + if (bits == nullptr) { + return; + } + *bits &= (~bitMask); +} + +constexpr int64_t kDeMaxDim = std::numeric_limits::max(); +constexpr int32_t kDeMaxRank = std::numeric_limits::max(); +constexpr int64_t kDeMaxFreq = std::numeric_limits::max(); // 9223372036854775807 or 2^(64-1) +constexpr int64_t kDeMaxTopk = std::numeric_limits::max(); + +constexpr uint32_t kCfgRowsPerBuffer = 1; +constexpr uint32_t kCfgParallelWorkers = 8; +constexpr uint32_t kCfgWorkerConnectorSize = 16; +constexpr uint32_t kCfgOpConnectorSize = 16; +constexpr uint32_t kCfgSendingBatch = 0; +constexpr int32_t kCfgDefaultRankId = -1; +constexpr uint32_t kCfgDefaultSeed = std::mt19937::default_seed; +constexpr uint32_t kCfgMonitorSamplingInterval = 1000; // timeout value for sampling interval in milliseconds +constexpr uint32_t kCfgCallbackTimeout = 60; // timeout value for callback in seconds +constexpr int32_t kCfgDefaultCachePort = 50052; +constexpr char kCfgDefaultCacheHost[] = "127.0.0.1"; +constexpr int32_t kDftPrefetchSize = 20; +constexpr int32_t kDftNumConnections = 12; +constexpr int32_t kDftAutoNumWorkers = false; +constexpr char kDftMetaColumnPrefix[] = "_meta-"; +constexpr int32_t kDecimal = 10; // used in strtol() to convert a string value according to decimal numeral system +constexpr int32_t kMinLegalPort = 1025; +constexpr int32_t kMaxLegalPort = 65535; + +// Invalid OpenCV type should not be from 0 to 7 (opencv4/opencv2/core/hal/interface.h) +constexpr uint8_t kCVInvalidType = 255; + +using connection_id_type = uint64_t; +using session_id_type = uint32_t; +using row_id_type = int64_t; +} // namespace dataset +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_CONSTANTS_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/data_helper.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/data_helper.h new file mode 100644 index 0000000..6111604 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/data_helper.h @@ -0,0 +1,460 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATA_HELPER_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATA_HELPER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "include/api/dual_abi_helper.h" +#include "include/api/status.h" + +namespace mindspore { +namespace dataset { + +/// \brief Simple class to do data manipulation, contains helper function to update json files in dataset +class DataHelper { + public: + /// \brief constructor + DataHelper() {} + + /// \brief Destructor + ~DataHelper() = default; + + /// \brief Create an Album dataset while taking in a path to a image folder + /// Creates the output directory if doesn't exist + /// \param[in] in_dir Image folder directory that takes in images + /// \param[in] out_dir Directory containing output json files + /// \return Status The status code returned + Status CreateAlbum(const std::string &in_dir, const std::string &out_dir) { + return CreateAlbumIF(StringToChar(in_dir), StringToChar(out_dir)); + } + + /// \brief Update a json file field with a vector of string values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional input for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), VectorStringToChar(value), StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of bool values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of int8 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of uint8 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of int16 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of uint16 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of int32 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of uint32 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of int64 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of uint64 values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of float values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a vector of double values + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value array to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateArray(const std::string &in_file, const std::string &key, const std::vector &value, + const std::string &out_file = "") { + return UpdateArrayIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a string value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const std::string &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), StringToChar(value), StringToChar(out_file)); + } + + /// \brief Update a json file field with a bool value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const bool &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an int8 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const int8_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an uint8 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const uint8_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an int16 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const int16_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an uint16 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const uint16_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an int32 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const int32_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an uint32 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const uint32_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an int64 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const int64_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with an uint64 value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const uint64_t &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a float value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const float &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Update a json file field with a double value + /// \param in_file The input file name to read in + /// \param key Key of field to write to + /// \param value Value to write to file + /// \param out_file Optional parameter for output file path, will write to input file if not specified + /// \return Status The status code returned + Status UpdateValue(const std::string &in_file, const std::string &key, const double &value, + const std::string &out_file = "") { + return UpdateValueIF(StringToChar(in_file), StringToChar(key), value, StringToChar(out_file)); + } + + /// \brief Template function to write tensor to file + /// \param[in] in_file File to write to + /// \param[in] data Array of type T values + /// \return Status The status code returned + template + Status WriteBinFile(const std::string &in_file, const std::vector &data) { + try { + std::ofstream o(in_file, std::ios::binary | std::ios::out); + if (!o.is_open()) { + return Status(kMDUnexpectedError, "Error opening Bin file to write"); + } + size_t length = data.size(); + if (length == 0) { + return Status(kMDUnexpectedError, "size of data is 0 when written into file."); + } + o.write(reinterpret_cast(&data[0]), std::streamsize(length * sizeof(T))); + o.close(); + } + // Catch any exception and convert to Status return code + catch (const std::exception &err) { + return Status(kMDUnexpectedError, "Write bin file failed "); + } + return Status::OK(); + } + + /// \brief Write pointer to bin, use pointer to avoid memcpy + /// \note The value of `length`` must be equal to the length of `data` + /// \param[in] in_file File name to write to + /// \param[in] data Pointer to data + /// \param[in] length Length of values to write from pointer + /// \return Status The status code returned + template + Status WriteBinFile(const std::string &in_file, T *data, size_t length) { + try { + if (data == nullptr) { + return Status(kMDUnexpectedError, "input data can not be null"); + } + std::ofstream o(in_file, std::ios::binary | std::ios::out); + if (!o.is_open()) { + return Status(kMDUnexpectedError, "Error opening Bin file to write"); + } + o.write(reinterpret_cast(data), std::streamsize(length * sizeof(T))); + if (!o.good()) { + return Status(kMDUnexpectedError, "Error writing Bin file"); + } + o.close(); + } + // Catch any exception and convert to Status return code + catch (const std::exception &err) { + return Status(kMDUnexpectedError, "Write bin file failed"); + } + return Status::OK(); + } + + /// \brief Helper function to copy content of a tensor to buffer + /// \note This function iterates over the tensor in bytes, since + /// \param[in] tensor_addr The memory held by a tensor + /// \param[in] tensor_size The amount of data in bytes in tensor_addr, e.g. tensor->SizeInBytes() + /// \param[out] addr The address to copy tensor data to + /// \param[in] buffer_size The buffer size of addr + /// \return The size of the tensor (bytes copied + size_t DumpData(const unsigned char *tensor_addr, const size_t &tensor_size, void *addr, const size_t &buffer_size); + + /// \brief Helper function to delete key in json file + /// \note This function will return okay even if key not found + /// \param[in] in_file Json file to remove key from + /// \param[in] key The key to remove + /// \return Status The status code returned + Status RemoveKey(const std::string &in_file, const std::string &key, const std::string &out_file = "") { + return RemoveKeyIF(StringToChar(in_file), StringToChar(key), StringToChar(out_file)); + } + + /// \brief A print method typically used for debugging + /// \param out - The output stream to write output to + void Print(std::ostream &out) const; + + /// \brief << Stream output operator overload + /// \note This allows you to write the debug print info using stream operators + /// \param out Reference to the output stream being overloaded + /// \param dh Reference to the DataSchema to display + /// \return The output stream must be returned + friend std::ostream &operator<<(std::ostream &out, const DataHelper &dh) { + dh.Print(out); + return out; + } + + private: + // Helper function for dual ABI support + Status CreateAlbumIF(const std::vector &in_dir, const std::vector &out_dir); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector> &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, const std::vector &value, + const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, const std::vector &value, + const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, + const std::vector &value, const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, const std::vector &value, + const std::vector &out_file); + Status UpdateArrayIF(const std::vector &in_file, const std::vector &key, const std::vector &value, + const std::vector &out_file); + + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const std::vector &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const bool &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const int8_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const uint8_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const int16_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const uint16_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const int32_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const uint32_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const int64_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const uint64_t &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const float &value, + const std::vector &out_file); + Status UpdateValueIF(const std::vector &in_file, const std::vector &key, const double &value, + const std::vector &out_file); + Status RemoveKeyIF(const std::vector &in_file, const std::vector &key, const std::vector &out_file); +}; +} // namespace dataset +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATA_HELPER_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/datasets.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/datasets.h new file mode 100644 index 0000000..8905ebf --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/datasets.h @@ -0,0 +1,574 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATASETS_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATASETS_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "include/api/dual_abi_helper.h" +#include "include/api/types.h" +#include "include/dataset/iterator.h" +#include "include/dataset/samplers.h" +#include "include/dataset/transforms.h" + +namespace mindspore { +namespace dataset { + +class Tensor; +class TensorShape; +class TreeAdapter; +class TreeAdapterLite; +class TreeGetters; + +class DatasetCache; +class DatasetNode; + +class Iterator; + +class TensorOperation; +class SchemaObj; +class SamplerObj; + +// Dataset classes (in alphabetical order) +class BatchDataset; +class MapDataset; +class ProjectDataset; +class ShuffleDataset; +class DSCallback; + +/// \class Dataset datasets.h +/// \brief A base class to represent a dataset in the data pipeline. +class Dataset : public std::enable_shared_from_this { + public: + // need friend class so they can access the children_ field + friend class Iterator; + friend class TransferNode; + + /// \brief Constructor + Dataset(); + + /// \brief Destructor + ~Dataset() = default; + + /// \brief Gets the dataset size + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \return dataset size. If failed, return -1 + int64_t GetDatasetSize(bool estimate = false); + + /// \brief Gets the output type + /// \return a vector of DataType. If failed, return an empty vector + std::vector GetOutputTypes(); + + /// \brief Gets the output shape + /// \return a vector of TensorShape. If failed, return an empty vector + std::vector> GetOutputShapes(); + + /// \brief Gets the batch size + /// \return int64_t + int64_t GetBatchSize(); + + /// \brief Gets the repeat count + /// \return int64_t + int64_t GetRepeatCount(); + + /// \brief Gets the number of classes + /// \return number of classes. If failed, return -1 + int64_t GetNumClasses(); + + /// \brief Gets the column names + /// \return Names of the columns. If failed, return an empty vector + std::vector GetColumnNames() { return VectorCharToString(GetColumnNamesCharIF()); } + + /// \brief Gets the class indexing + /// \return a map of ClassIndexing. If failed, return an empty map + std::vector>> GetClassIndexing() { + return ClassIndexCharToString(GetClassIndexingCharIF()); + } + + /// \brief Setter function for runtime number of workers + /// \param[in] num_workers The number of threads in this operator + /// \return Shared pointer to the original object + std::shared_ptr SetNumWorkers(int32_t num_workers); + + /// \brief Function to create an PullBasedIterator over the Dataset + /// \param[in] columns List of columns to be used to specify the order of columns + /// \return Shared pointer to the Iterator + std::shared_ptr CreatePullBasedIterator(std::vector> columns = {}); + + /// \brief Function to create an Iterator over the Dataset pipeline + /// \param[in] columns List of columns to be used to specify the order of columns + /// \param[in] num_epochs Number of epochs to run through the pipeline, default -1 which means infinite epochs. + /// An empty row is returned at the end of each epoch + /// \return Shared pointer to the Iterator + std::shared_ptr CreateIterator(std::vector columns = {}, int32_t num_epochs = -1) { + return CreateIteratorCharIF(VectorStringToChar(columns), num_epochs); + } + + /// \brief Function to transfer data through a device. + /// \notes If device is Ascend, features of data will be transferred one by one. The limitation + /// of data transmission per time is 256M. + /// \param[in] queue_name Channel name (default="", create new unique name). + /// \param[in] device_type Type of device (default="", get from MSContext). + /// \param[in] device_id id of device (default=1, get from MSContext). + /// \param[in] num_epochs Number of epochs (default=-1, infinite epochs). + /// \param[in] send_epoch_end Whether to send end of sequence to device or not (default=true). + /// \param[in] total_batches Number of batches to be sent to the device (default=0, all data). + /// \param[in] create_data_info_queue Whether to create queue which stores types and shapes + /// of data or not(default=false). + /// \return Returns true if no error encountered else false. + bool DeviceQueue(std::string queue_name = "", std::string device_type = "", int32_t device_id = 0, + int32_t num_epochs = -1, bool send_epoch_end = true, int32_t total_batches = 0, + bool create_data_info_queue = false) { + return DeviceQueueCharIF(StringToChar(queue_name), StringToChar(device_type), device_id, num_epochs, send_epoch_end, + total_batches, create_data_info_queue); + } + + /// \brief Function to create a Saver to save the dynamic data processed by the dataset pipeline + /// \note Usage restrictions: + /// 1. Supported dataset formats: 'mindrecord' only + /// 2. To save the samples in order, set dataset's shuffle to false and num_files to 1. + /// 3. Before calling the function, do not use batch operator, repeat operator or data augmentation operators + /// with random attribute in map operator. + /// 4. Mindrecord does not support bool, uint64, multi-dimensional uint8(drop dimension) nor + /// multi-dimensional string. + /// \param[in] file_name Path to dataset file + /// \param[in] num_files Number of dataset files (default=1) + /// \param[in] file_type Dataset format (default="mindrecord") + /// \return Returns true if no error encountered else false + bool Save(std::string dataset_path, int32_t num_files = 1, std::string dataset_type = "mindrecord") { + return SaveCharIF(StringToChar(dataset_path), num_files, StringToChar(dataset_type)); + } + + /// \brief Function to create a BatchDataset + /// \notes Combines batch_size number of consecutive rows into batches + /// \param[in] batch_size The number of rows each batch is created with + /// \param[in] drop_remainder Determines whether or not to drop the last possibly incomplete + /// batch. If true, and if there are less than batch_size rows + /// available to make the last batch, then those rows will + /// be dropped and not propagated to the next node + /// \return Shared pointer to the current BatchDataset + std::shared_ptr Batch(int32_t batch_size, bool drop_remainder = false); + + /// \brief Function to create a MapDataset + /// \notes Applies each operation in operations to this dataset + /// \param[in] operations Vector of raw pointers to TensorTransform objects to be applied on the dataset. Operations + /// are applied in the order they appear in this list + /// \param[in] input_columns Vector of the names of the columns that will be passed to the first + /// operation as input. The size of this list must match the number of + /// input columns expected by the first operator. The default input_columns + /// is the first column + /// \param[in] output_columns Vector of names assigned to the columns outputted by the last operation + /// This parameter is mandatory if len(input_columns) != len(output_columns) + /// The size of this list must match the number of output columns of the + /// last operation. The default output_columns will have the same + /// name as the input columns, i.e., the columns will be replaced + /// \param[in] project_columns A list of column names to project + /// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). + /// \return Shared pointer to the current MapDataset + std::shared_ptr Map(std::vector operations, + const std::vector &input_columns = {}, + const std::vector &output_columns = {}, + const std::vector &project_columns = {}, + const std::shared_ptr &cache = nullptr, + std::vector> callbacks = {}) { + std::vector> transform_ops; + (void)std::transform( + operations.begin(), operations.end(), std::back_inserter(transform_ops), + [](TensorTransform *op) -> std::shared_ptr { return op != nullptr ? op->Parse() : nullptr; }); + return std::make_shared(shared_from_this(), transform_ops, VectorStringToChar(input_columns), + VectorStringToChar(output_columns), VectorStringToChar(project_columns), cache, + callbacks); + } + + /// \brief Function to create a MapDataset + /// \notes Applies each operation in operations to this dataset + /// \param[in] operations Vector of shared pointers to TensorTransform objects to be applied on the dataset. + /// Operations are applied in the order they appear in this list + /// \param[in] input_columns Vector of the names of the columns that will be passed to the first + /// operation as input. The size of this list must match the number of + /// input columns expected by the first operator. The default input_columns + /// is the first column + /// \param[in] output_columns Vector of names assigned to the columns outputted by the last operation + /// This parameter is mandatory if len(input_columns) != len(output_columns) + /// The size of this list must match the number of output columns of the + /// last operation. The default output_columns will have the same + /// name as the input columns, i.e., the columns will be replaced + /// \param[in] project_columns A list of column names to project + /// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). + /// \return Shared pointer to the current MapDataset + std::shared_ptr Map(std::vector> operations, + const std::vector &input_columns = {}, + const std::vector &output_columns = {}, + const std::vector &project_columns = {}, + const std::shared_ptr &cache = nullptr, + std::vector> callbacks = {}) { + std::vector> transform_ops; + (void)std::transform(operations.begin(), operations.end(), std::back_inserter(transform_ops), + [](std::shared_ptr op) -> std::shared_ptr { + return op != nullptr ? op->Parse() : nullptr; + }); + return std::make_shared(shared_from_this(), transform_ops, VectorStringToChar(input_columns), + VectorStringToChar(output_columns), VectorStringToChar(project_columns), cache, + callbacks); + } + + /// \brief Function to create a MapDataset + /// \notes Applies each operation in operations to this dataset + /// \param[in] operations Vector of TensorTransform objects to be applied on the dataset. Operations are applied in + /// the order they appear in this list + /// \param[in] input_columns Vector of the names of the columns that will be passed to the first + /// operation as input. The size of this list must match the number of + /// input columns expected by the first operator. The default input_columns + /// is the first column + /// \param[in] output_columns Vector of names assigned to the columns outputted by the last operation + /// This parameter is mandatory if len(input_columns) != len(output_columns) + /// The size of this list must match the number of output columns of the + /// last operation. The default output_columns will have the same + /// name as the input columns, i.e., the columns will be replaced + /// \param[in] project_columns A list of column names to project + /// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). + /// \return Shared pointer to the current MapDataset + std::shared_ptr Map(const std::vector> operations, + const std::vector &input_columns = {}, + const std::vector &output_columns = {}, + const std::vector &project_columns = {}, + const std::shared_ptr &cache = nullptr, + std::vector> callbacks = {}) { + std::vector> transform_ops; + (void)std::transform(operations.begin(), operations.end(), std::back_inserter(transform_ops), + [](TensorTransform &op) -> std::shared_ptr { return op.Parse(); }); + return std::make_shared(shared_from_this(), transform_ops, VectorStringToChar(input_columns), + VectorStringToChar(output_columns), VectorStringToChar(project_columns), cache, + callbacks); + } + + /// \brief Function to create a Project Dataset + /// \notes Applies project to the dataset + /// \param[in] columns The name of columns to project + /// \return Shared pointer to the current Dataset + std::shared_ptr Project(const std::vector &columns) { + return std::make_shared(shared_from_this(), VectorStringToChar(columns)); + } + + /// \brief Function to create a Shuffle Dataset + /// \notes Randomly shuffles the rows of this dataset + /// \param[in] buffer_size The size of the buffer (must be larger than 1) for shuffling + /// \return Shared pointer to the current ShuffleDataset + std::shared_ptr Shuffle(int32_t buffer_size) { + return std::make_shared(shared_from_this(), buffer_size); + } + + std::shared_ptr IRNode() { return ir_node_; } + + protected: + std::shared_ptr tree_getters_; + std::shared_ptr ir_node_; + + private: + // Char interface(CharIF) of GetColumnNames + std::vector> GetColumnNamesCharIF(); + + // Char interface(CharIF) of GetClassIndexing + std::vector, std::vector>> GetClassIndexingCharIF(); + + // Char interface(CharIF) of CreateIterator + std::shared_ptr CreateIteratorCharIF(std::vector> columns, int32_t num_epochs); + + // Char interface(CharIF) of DeviceQueue + bool DeviceQueueCharIF(const std::vector &queue_name, const std::vector &device_type, int32_t device_id, + int32_t num_epochs, bool send_epoch_end, int32_t total_batches, bool create_data_info_queue); + + // Char interface(CharIF) of Save + bool SaveCharIF(const std::vector &dataset_path, int32_t num_files, const std::vector &dataset_type); +}; + +class SchemaObj { + public: + /// \brief Constructor + explicit SchemaObj(const std::string &schema_file = "") : SchemaObj(StringToChar(schema_file)) {} + + /// \brief Destructor + ~SchemaObj() = default; + + /// \brief SchemaObj Init function + /// \return bool true if schema initialization is successful + Status Init(); + + /// \brief Add new column to the schema with unknown shape of rank 1 + /// \param[in] name Name of the column. + /// \param[in] ms_type Data type of the column(mindspore::DataType). + /// \return Status code + Status add_column(const std::string &name, mindspore::DataType ms_type) { + return add_column_char(StringToChar(name), ms_type); + } + + /// \brief Add new column to the schema with unknown shape of rank 1 + /// \param[in] name Name of the column. + /// \param[in] ms_type Data type of the column(std::string). + /// \param[in] shape Shape of the column. + /// \return Status code + Status add_column(const std::string &name, const std::string &ms_type) { + return add_column_char(StringToChar(name), StringToChar(ms_type)); + } + + /// \brief Add new column to the schema + /// \param[in] name Name of the column. + /// \param[in] ms_type Data type of the column(mindspore::DataType). + /// \param[in] shape Shape of the column. + /// \return Status code + Status add_column(const std::string &name, mindspore::DataType ms_type, const std::vector &shape) { + return add_column_char(StringToChar(name), ms_type, shape); + } + + /// \brief Add new column to the schema + /// \param[in] name Name of the column. + /// \param[in] ms_type Data type of the column(std::string). + /// \param[in] shape Shape of the column. + /// \return Status code + Status add_column(const std::string &name, const std::string &ms_type, const std::vector &shape) { + return add_column_char(StringToChar(name), StringToChar(ms_type), shape); + } + + /// \brief Get a JSON string of the schema + /// \return JSON string of the schema + std::string to_json() { return CharToString(to_json_char()); } + + /// \brief Get a JSON string of the schema + std::string to_string() { return to_json(); } + + /// \brief Set a new value to dataset_type + void set_dataset_type(std::string dataset_type); + + /// \brief Set a new value to num_rows + void set_num_rows(int32_t num_rows); + + /// \brief Get the current num_rows + int32_t get_num_rows() const; + + /// \brief Get schema file from JSON file + /// \param[in] json_string Name of JSON file to be parsed. + /// \return Status code + Status FromJSONString(const std::string &json_string) { return FromJSONStringCharIF(StringToChar(json_string)); } + + /// \brief Parse and add column information + /// \param[in] json_string Name of JSON string for column dataset attribute information, decoded from schema file. + /// \return Status code + Status ParseColumnString(const std::string &json_string) { + return ParseColumnStringCharIF(StringToChar(json_string)); + } + + private: + // Char constructor of SchemaObj + explicit SchemaObj(const std::vector &schema_file); + + // Char interface of add_column + Status add_column_char(const std::vector &name, mindspore::DataType ms_type); + + Status add_column_char(const std::vector &name, const std::vector &ms_type); + + Status add_column_char(const std::vector &name, mindspore::DataType ms_type, const std::vector &shape); + + Status add_column_char(const std::vector &name, const std::vector &ms_type, + const std::vector &shape); + + // Char interface of to_json + const std::vector to_json_char(); + + // Char interface of FromJSONString + Status FromJSONStringCharIF(const std::vector &json_string); + + // Char interface of ParseColumnString + Status ParseColumnStringCharIF(const std::vector &json_string); + + struct Data; + std::shared_ptr data_; +}; + +class BatchDataset : public Dataset { + public: + BatchDataset(std::shared_ptr input, int32_t batch_size, bool drop_remainder = false); + ~BatchDataset() = default; +}; + +class MapDataset : public Dataset { + public: + MapDataset(std::shared_ptr input, std::vector> operations, + const std::vector> &input_columns, const std::vector> &output_columns, + const std::vector> &project_columns, const std::shared_ptr &cache, + std::vector> callbacks); + ~MapDataset() = default; +}; + +class ProjectDataset : public Dataset { + public: + ProjectDataset(std::shared_ptr input, const std::vector> &columns); + ~ProjectDataset() = default; +}; + +class ShuffleDataset : public Dataset { + public: + ShuffleDataset(std::shared_ptr input, int32_t buffer_size); + ~ShuffleDataset() = default; +}; + +/// \brief Function to create a SchemaObj. +/// \param[in] schema_file Path of schema file. +/// \note The reason for using this API is that std::string will be constrained by the +/// compiler option '_GLIBCXX_USE_CXX11_ABI' while char is free of this restriction. +/// \return Shared pointer to the current schema. +std::shared_ptr SchemaCharIF(const std::vector &schema_file); + +/// \brief Function to create a SchemaObj. +/// \param[in] schema_file Path of schema file. +/// \return Shared pointer to the current schema. +inline std::shared_ptr Schema(const std::string &schema_file = "") { + return SchemaCharIF(StringToChar(schema_file)); +} + +class AlbumDataset : public Dataset { + public: + AlbumDataset(const std::vector &dataset_dir, const std::vector &data_schema, + const std::vector> &column_names, bool decode, const std::shared_ptr &sampler, + const std::shared_ptr &cache); + AlbumDataset(const std::vector &dataset_dir, const std::vector &data_schema, + const std::vector> &column_names, bool decode, const Sampler *sampler, + const std::shared_ptr &cache); + AlbumDataset(const std::vector &dataset_dir, const std::vector &data_schema, + const std::vector> &column_names, bool decode, + const std::reference_wrapper sampler, const std::shared_ptr &cache); + ~AlbumDataset() = default; +}; + +/// \brief Function to create an AlbumDataset +/// \notes The generated dataset is specified through setting a schema +/// \param[in] dataset_dir Path to the root directory that contains the dataset +/// \param[in] data_schema Path to dataset schema file +/// \param[in] column_names Column names used to specify columns to load, if empty, will read all columns. +/// (default = {}) +/// \param[in] decode the option to decode the images in dataset (default = false) +/// \param[in] sampler Shared pointer to a sampler object used to choose samples from the dataset. If sampler is not +/// given, +/// a `RandomSampler` will be used to randomly iterate the entire dataset (default = RandomSampler()) +/// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). +/// \return Shared pointer to the current Dataset +inline std::shared_ptr Album(const std::string &dataset_dir, const std::string &data_schema, + const std::vector &column_names = {}, bool decode = false, + const std::shared_ptr &sampler = std::make_shared(), + const std::shared_ptr &cache = nullptr) { + return std::make_shared(StringToChar(dataset_dir), StringToChar(data_schema), + VectorStringToChar(column_names), decode, sampler, cache); +} +/// \brief Function to create an AlbumDataset +/// \notes The generated dataset is specified through setting a schema +/// \param[in] dataset_dir Path to the root directory that contains the dataset +/// \param[in] data_schema Path to dataset schema file +/// \param[in] column_names Column names used to specify columns to load +/// \param[in] decode the option to decode the images in dataset +/// \param[in] sampler Raw pointer to a sampler object used to choose samples from the dataset. +/// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). +/// \return Shared pointer to the current Dataset +inline std::shared_ptr Album(const std::string &dataset_dir, const std::string &data_schema, + const std::vector &column_names, bool decode, + const Sampler *sampler, + const std::shared_ptr &cache = nullptr) { + return std::make_shared(StringToChar(dataset_dir), StringToChar(data_schema), + VectorStringToChar(column_names), decode, sampler, cache); +} +/// \brief Function to create an AlbumDataset +/// \notes The generated dataset is specified through setting a schema +/// \param[in] dataset_dir Path to the root directory that contains the dataset +/// \param[in] data_schema Path to dataset schema file +/// \param[in] column_names Column names used to specify columns to load +/// \param[in] decode the option to decode the images in dataset +/// \param[in] sampler Sampler object used to choose samples from the dataset. +/// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). +/// \return Shared pointer to the current Dataset +inline std::shared_ptr Album(const std::string &dataset_dir, const std::string &data_schema, + const std::vector &column_names, bool decode, + const std::reference_wrapper sampler, + const std::shared_ptr &cache = nullptr) { + return std::make_shared(StringToChar(dataset_dir), StringToChar(data_schema), + VectorStringToChar(column_names), decode, sampler, cache); +} + +class MnistDataset : public Dataset { + public: + MnistDataset(const std::vector &dataset_dir, const std::vector &usage, + const std::shared_ptr &sampler, const std::shared_ptr &cache); + MnistDataset(const std::vector &dataset_dir, const std::vector &usage, const Sampler *sampler, + const std::shared_ptr &cache); + MnistDataset(const std::vector &dataset_dir, const std::vector &usage, + const std::reference_wrapper sampler, const std::shared_ptr &cache); + ~MnistDataset() = default; +}; + +/// \brief Function to create a MnistDataset +/// \notes The generated dataset has two columns ["image", "label"] +/// \param[in] dataset_dir Path to the root directory that contains the dataset +/// \param[in] usage of MNIST, can be "train", "test" or "all" (default = "all"). +/// \param[in] sampler Shared pointer to a sampler object used to choose samples from the dataset. If sampler is not +/// given, +/// a `RandomSampler` will be used to randomly iterate the entire dataset (default = RandomSampler()) +/// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). +/// \return Shared pointer to the current MnistDataset +inline std::shared_ptr Mnist(const std::string &dataset_dir, const std::string &usage = "all", + const std::shared_ptr &sampler = std::make_shared(), + const std::shared_ptr &cache = nullptr) { + return std::make_shared(StringToChar(dataset_dir), StringToChar(usage), sampler, cache); +} + +/// \brief Function to create a MnistDataset +/// \notes The generated dataset has two columns ["image", "label"] +/// \param[in] dataset_dir Path to the root directory that contains the dataset +/// \param[in] usage of MNIST, can be "train", "test" or "all" +/// \param[in] sampler Raw pointer to a sampler object used to choose samples from the dataset. +/// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). +/// \return Shared pointer to the current MnistDataset +inline std::shared_ptr Mnist(const std::string &dataset_dir, const std::string &usage, + const Sampler *sampler, + const std::shared_ptr &cache = nullptr) { + return std::make_shared(StringToChar(dataset_dir), StringToChar(usage), sampler, cache); +} + +/// \brief Function to create a MnistDataset +/// \notes The generated dataset has two columns ["image", "label"] +/// \param[in] dataset_dir Path to the root directory that contains the dataset +/// \param[in] usage of MNIST, can be "train", "test" or "all" +/// \param[in] sampler Sampler object used to choose samples from the dataset. +/// \param[in] cache Tensor cache to use. (default=nullptr which means no cache is used). +/// \return Shared pointer to the current MnistDataset +inline std::shared_ptr Mnist(const std::string &dataset_dir, const std::string &usage, + const std::reference_wrapper sampler, + const std::shared_ptr &cache = nullptr) { + return std::make_shared(StringToChar(dataset_dir), StringToChar(usage), sampler, cache); +} +} // namespace dataset +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATASETS_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/execute.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/execute.h new file mode 100644 index 0000000..73f91be --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/execute.h @@ -0,0 +1,139 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ + +#include +#include +#include +#include +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/dataset/constants.h" +#include "include/dataset/transforms.h" + +namespace mindspore { +namespace dataset { +class DeviceResource; +// class to run tensor operations in eager mode +class Execute { + public: + /// \brief Constructor. + /// \param[in] op TensorOperation to be applied in Eager mode, it accepts operation in type of shared pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(std::shared_ptr op, MapTargetDevice device_type = MapTargetDevice::kCpu, + uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] op TensorTransform to be applied in Eager mode, it accepts operation in type of shared pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(std::shared_ptr op, MapTargetDevice device_type = MapTargetDevice::kCpu, + uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] op TensorTransform to be applied in Eager mode, it accepts operation in type of reference. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(std::reference_wrapper op, MapTargetDevice device_type = MapTargetDevice::kCpu, + uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] op TensorTransform to be applied in Eager mode, it accepts operation in type of raw pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(TensorTransform *op, MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] ops A vector of TensorOperations to be applied in Eager mode, it accepts operation + /// in type of shared pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(std::vector> ops, + MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] ops A vector of TensorTransforms to be applied in Eager mode, it accepts operation + /// in type of shared pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(std::vector> ops, + MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] ops A vector of TensorTransforms to be applied in Eager mode, it accepts operation + /// in type of raw pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(const std::vector> ops, + MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); + + /// \brief Constructor. + /// \param[in] ops A vector of TensorTransforms to be applied in Eager mode, it accepts operation + /// in type of raw pointer. + /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). + /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). + explicit Execute(const std::vector &ops, MapTargetDevice device_type = MapTargetDevice::kCpu, + uint32_t device_id = 0); + + /// \brief Destructor. + ~Execute(); + + /// \brief Callable function to execute the TensorTransform in eager mode. + /// \param[in] input Tensor to be transformed. + /// \param[out] output Transformed tensor. + /// \return Status error code, returns OK if no error encountered. + Status operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output); + + /// \brief Callable function to execute the TensorTransform in eager mode. + /// \param[in] input_tensor_list List of Tensor to be transformed. + /// \param[out] out Result tensor after transform. + /// \return Status error code, returns OK if no error encountered. + Status operator()(const std::vector &input_tensor_list, std::vector *out); + + /// \brief Given a set of Executes, run them + static Status Run(const std::vector> &data_graph, + const std::vector &inputs, std::vector *outputs); + + /// \brief The function to release device memory on Ascend310. + Status DeviceMemoryRelease(); + + /// \brief The function to generate AIPP configuration. + std::string AippCfgGenerator(); + + private: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + Status ParseTransforms(); + + /// \brief The function to validate target device setting is valid or not. + Status ValidateDevice(); + + /// \brief Initialize 310 resource + Status InitResource(MapTargetDevice device_type, uint32_t device_id); + + std::vector> transforms_; + std::vector> ops_; + MapTargetDevice device_type_; + std::shared_ptr device_resource_; + struct ExtraInfo; + std::shared_ptr info_; +}; + +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/iterator.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/iterator.h new file mode 100644 index 0000000..3af62c1 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/iterator.h @@ -0,0 +1,167 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_ITERATOR_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_ITERATOR_H_ + +#include +#include +#include +#include +#include +#include "include/api/dual_abi_helper.h" +#include "include/api/status.h" +#include "include/api/types.h" + +namespace mindspore { +namespace dataset { + +// Forward declare +class ExecutionTree; +class DatasetOp; +class Tensor; + +class NativeRuntimeContext; +class IteratorConsumer; +class PullBasedIteratorConsumer; + +class Dataset; + +using MSTensorMap = std::unordered_map; +using MSTensorMapChar = std::map, mindspore::MSTensor>; +using MSTensorVec = std::vector; + +// Abstract class for iterating over the dataset. +class Iterator { + public: + /// \brief Constructor. + Iterator(); + + /// \brief Destructor. + ~Iterator(); + + /// \brief Method for building and launching the pipeline. + /// \param[in] ds The last DatasetOp in the dataset pipeline. + /// \param[in] num_epochs Number of epochs passed down to EpochCtrlNode (default=-1, which means infinite epochs). + /// \return Status error code, returns OK if no error encountered. + Status BuildAndLaunchTree(std::shared_ptr ds, int32_t num_epochs); + + /// \brief Function to get the next row from the data pipeline. + /// \note Type of return data is a unordered_map(with column name). + /// \param[out] row The output tensor row. + /// \return Status error code, returns OK if no error encountered. + Status GetNextRow(MSTensorMap *row) { + if (row == nullptr) { + return Status(kMDUnexpectedError, "Got nullptr when GetNext row."); + } + MSTensorMapChar row_; + row_.clear(); + row->clear(); + Status s = GetNextRowCharIF(&row_); + TensorMapCharToString(&row_, row); + return s; + } + + /// \brief Char interface(CharIF) of GetNextRow. + /// \note The reason for using this API is that std::string will be constrained by the + /// compiler option '_GLIBCXX_USE_CXX11_ABI' while char is free of this restriction. + Status GetNextRowCharIF(MSTensorMapChar *row); + + /// \brief Function to get the next row from the data pipeline. + /// \note Type of return data is a vector(without column name). + /// \param[out] row The output tensor row. + /// \return Status error code, returns OK if no error encountered. + virtual Status GetNextRow(MSTensorVec *row); + + /// \brief Function to shut down the data pipeline. + void Stop(); + + /// \brief Inter class as iterator of Iterator. + class _Iterator { + public: + /// \brief Constructor + explicit _Iterator(Iterator *lt); + + /// \brief Destructor + ~_Iterator() { + if (cur_row_ != nullptr) { + delete cur_row_; + cur_row_ = nullptr; + } + } + + /// \brief prefix ++ overload + _Iterator &operator++(); + + /// \brief dereference operator + MSTensorMap &operator*() { return *cur_row_; } + + /// \brief dereference operator + MSTensorMap *operator->() { return cur_row_; } + + /// \brief bool operator + bool operator!=(const _Iterator &rhs) { return cur_row_ != rhs.cur_row_; } + + private: + int ind_; // the cur node our Iterator points to + Iterator *lt_; + MSTensorMap *cur_row_; + }; + + /// \brief Function to return the iterator points to the begin of Iterator. + _Iterator begin() { return _Iterator(this); } + + /// \brief Function to return the iterator points to the end of Iterator. + _Iterator end() { return _Iterator(nullptr); } + + private: + std::unique_ptr runtime_context_; + IteratorConsumer *consumer_; +}; + +class PullIterator : public Iterator { + public: + /// \brief Constructor. + PullIterator(); + + /// \brief Destructor. + ~PullIterator() = default; + + /// \brief Function to get next row from the data pipeline. + /// \note Type of return data is a vector(without column name). + /// \param[out] row The output tensor row. + /// \return Status error code, returns OK if no error encountered else false. + Status GetNextRow(MSTensorVec *const row) override; + + /// \brief Function to get specified rows from the data pipeline. + /// \note Type of return data is a vector(without column name). This behavior is subject to change. + /// \param[in] num_rows The number of rows to fetch. + /// \param[out] row The output tensor row. + /// \return Status error code, returns OK if no error encountered else false. + Status GetRows(int32_t num_rows, std::vector *const row); + + /// \brief Method for building and launching the pipeline. + /// \note Consider making this function protected. + /// \param[in] ds The root node that calls the function. + /// \return Status error code, returns OK if no error encountered. + Status BuildAndLaunchTree(std::shared_ptr ds); + + private: + std::unique_ptr pull_consumer_; +}; +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_ITERATOR_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/image_process.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/image_process.h new file mode 100644 index 0000000..32194e5 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/image_process.h @@ -0,0 +1,306 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef IMAGE_PROCESS_H_ +#define IMAGE_PROCESS_H_ + +#include +#include +#include +#include + +#include "lite_cv/lite_mat.h" + +namespace mindspore { +namespace dataset { + +#define CV_PI 3.1415926535897932384626433832795 +#define IM_TOOL_EXIF_ORIENTATION_0_DEG 1 +#define IM_TOOL_EXIF_ORIENTATION_0_DEG_MIRROR 2 +#define IM_TOOL_EXIF_ORIENTATION_180_DEG 3 +#define IM_TOOL_EXIF_ORIENTATION_180_DEG_MIRROR 4 +#define IM_TOOL_EXIF_ORIENTATION_90_DEG_MIRROR 5 +#define IM_TOOL_EXIF_ORIENTATION_90_DEG 6 +#define IM_TOOL_EXIF_ORIENTATION_270_DEG_MIRROR 7 +#define IM_TOOL_EXIF_ORIENTATION_270_DEG 8 +#define NUM_OF_RGB_CHANNELS 9 +#define IM_TOOL_DATA_TYPE_FLOAT (1) +#define IM_TOOL_DATA_TYPE_UINT8 (2) +#define IM_TOOL_RETURN_STATUS_SUCCESS (0) +#define IM_TOOL_RETURN_STATUS_INVALID_INPUT (1) +#define IM_TOOL_RETURN_STATUS_FAILED (2) + +#define INT16_CAST(X) \ + static_cast(::std::min(::std::max(static_cast(X + (X >= 0.f ? 0.5f : -0.5f)), -32768), 32767)); + +enum PaddBorderType { + PADD_BORDER_CONSTANT = 0, /**< Fills the border with constant values. */ + PADD_BORDER_REPLICATE = 1, /**< Fills the border with replicate mode. */ + PADD_BORDER_REFLECT_101 = 4, /**< Fills the border with reflect 101 mode. */ + PADD_BORDER_DEFAULT = PADD_BORDER_REFLECT_101 /**< Default pad mode, use reflect 101 mode. */ +}; + +struct BoxesConfig { + public: + std::vector img_shape; + std::vector num_default; + std::vector feature_size; + float min_scale; + float max_scale; + std::vector> aspect_rations; + std::vector steps; + std::vector prior_scaling; +}; + +/// \brief resizing image by bilinear algorithm, the data type of currently only supports is uint8, +/// the channel of currently supports is 3 and 1. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] dst_w The width of the output image. +/// \param[in] dst_h The length of the output image. +bool ResizeBilinear(const LiteMat &src, LiteMat &dst, int dst_w, int dst_h); + +/// \brief Init Lite Mat from pixel, the conversion of currently supports is rbgaTorgb and rgbaTobgr. +/// \note The length of the pointer must be the same as that of the multiplication of w and h. +/// \param[in] data Input image data. +/// \param[in] pixel_type The type of pixel_type. +/// \param[in] data_type The type of data_type. +/// \param[in] w The width of the output image. +/// \param[in] h The length of the output image. +/// \param[in] m Used to store image data. +bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType data_type, int w, int h, LiteMat &m); + +/// \brief convert the data type, the conversion of currently supports is uint8 to float. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] scale Scale pixel value(default:1.0). +bool ConvertTo(const LiteMat &src, LiteMat &dst, double scale = 1.0); + +/// \brief crop image, the channel supports is 3 and 1. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] x The x coordinate value of the starting point of the screenshot. +/// \param[in] y The y coordinate value of the starting point of the screenshot. +/// \param[in] w The width of the screenshot. +/// \param[in] h The height of the screenshot. +bool Crop(const LiteMat &src, LiteMat &dst, int x, int y, int w, int h); + +/// \brief normalize image, currently the supports data type is float. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] mean Mean of the data set. +/// \param[in] std Norm of the data set. +bool SubStractMeanNormalize(const LiteMat &src, LiteMat &dst, const std::vector &mean, + const std::vector &std); + +/// \brief padd image, the channel supports is 3 and 1. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] top The length of top. +/// \param[in] bottom The length of bottom. +/// \param[in] left The length of left. +/// \param[in] right he length of right. +/// \param[in] pad_type The type of pad. +/// \param[in] fill_b_or_gray B or GRAY. +/// \param[in] fill_g G. +/// \param[in] fill_r R. +bool Pad(const LiteMat &src, LiteMat &dst, int top, int bottom, int left, int right, PaddBorderType pad_type, + uint8_t fill_b_or_gray = 0, uint8_t fill_g = 0, uint8_t fill_r = 0); + +/// \brief Extract image channel by index. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] col The serial number of the channel. +bool ExtractChannel(LiteMat &src, LiteMat &dst, int col); + +/// \brief Split image channels to single channel. +/// \param[in] src Input image data. +/// \param[in] mv Single channel data. +bool Split(const LiteMat &src, std::vector &mv); + +/// \brief Create a multi-channel image out of several single-channel arrays. +/// \param[in] mv Single channel data. +/// \param[in] dst Output image data. +bool Merge(const std::vector &mv, LiteMat &dst); + +/// \brief Apply affine transformation for 1 channel image. +/// \param[in] src Input image data. +/// \param[in] out_img Output image data. +/// \param[in] M[6] Affine transformation matrix. +/// \param[in] dsize The size of the output image. +/// \param[in] borderValue The pixel value is used for filing after the image is captured. +bool Affine(LiteMat &src, LiteMat &out_img, const double M[6], std::vector dsize, UINT8_C1 borderValue); + +/// \brief Apply affine transformation for 3 channel image. +/// \param[in] src Input image data. +/// \param[in] out_img Output image data. +/// \param[in] M[6] Affine transformation matrix. +/// \param[in] dsize The size of the output image. +/// \param[in] borderValue The pixel value is used for filing after the image is captured. +bool Affine(LiteMat &src, LiteMat &out_img, const double M[6], std::vector dsize, UINT8_C3 borderValue); + +/// \brief Get default anchor boxes for Faster R-CNN, SSD, YOLO etc. +/// \param[in] config Objects of BoxesConfig structure. +std::vector> GetDefaultBoxes(const BoxesConfig config); + +/// \brief Convert the prediction boxes to the actual boxes of (y, x, h, w). +/// \param[in] boxes Actual size box. +/// \param[in] default_boxes Default box. +/// \param[in] config Objects of BoxesConfig structure. +void ConvertBoxes(std::vector> &boxes, const std::vector> &default_boxes, + const BoxesConfig config); + +/// \brief Apply Non-Maximum Suppression. +/// \param[in] all_boxes All input boxes. +/// \param[in] all_scores Score after all boxes are executed through the network. +/// \param[in] thres Pre-value of IOU. +/// \param[in] max_boxes Maximum value of output box. +std::vector ApplyNms(const std::vector> &all_boxes, std::vector &all_scores, float thres, + int max_boxes); + +/// \brief affine image by linear. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] M Transformation matrix +/// \param[in] dst_w The width of the output image. +/// \param[in] dst_h The height of the output image. +/// \param[in] borderType Edge processing type. +/// \param[in] borderValue Boundary fill value. +bool WarpAffineBilinear(const LiteMat &src, LiteMat &dst, const LiteMat &M, int dst_w, int dst_h, + PaddBorderType borderType, std::vector &borderValue); + +/// \brief affine image by linear. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] M Transformation matrix +/// \param[in] dst_w The width of the output image. +/// \param[in] dst_h The height of the output image. +/// \param[in] borderType Edge processing type. +/// \param[in] borderValue Boundary fill value. +bool WarpPerspectiveBilinear(const LiteMat &src, LiteMat &dst, const LiteMat &M, int dst_w, int dst_h, + PaddBorderType borderType, std::vector &borderValue); + +/// \brief Matrix rotation. +/// \param[in] x The value of the x-axis of the coordinate rotation point. +/// \param[in] y The value of the y-axis of the coordinate rotation point. +/// \param[in] angle Rotation angle. +/// \param[in] scale Scaling ratio. +/// \param[in] M Output transformation matrix. +bool GetRotationMatrix2D(float x, float y, double angle, double scale, LiteMat &M); + +/// \brief Perspective transformation. +/// \param[in] src_point Input coordinate point. +/// \param[in] dst_point Output coordinate point. +/// \param[in] M Output matrix. +bool GetPerspectiveTransform(std::vector src_point, std::vector dst_point, LiteMat &M); + +/// \brief Affine transformation. +/// \param[in] src_point Input coordinate point. +/// \param[in] dst_point Output coordinate point. +/// \param[in] M Output matrix. +bool GetAffineTransform(std::vector src_point, std::vector dst_point, LiteMat &M); + +/// \brief Matrix transpose. +/// \param[in] src Input matrix. +/// \param[in] dst Output matrix. +bool Transpose(const LiteMat &src, LiteMat &dst); + +/// \brief Filter the image by a Gaussian kernel +/// \param[in] src LiteMat image to be processed. Only LiteMat of type UINT8 is supported now. +/// \param[in] dst LiteMat image after processing. +/// \param[in] ksize The size of Gaussian kernel. It should be a vector of size 2 as {kernel_x, kernel_y}, both value of +/// which should be positive and odd. +/// \param[in] sigmaX The Gaussian kernel standard deviation of width. It should be a positive value. +/// \param[in] sigmaY The Gaussian kernel standard deviation of height (default=0.f). It should be a positive value, +/// or will use the value of sigmaX. +/// \param[in] pad_type The padding type used while filtering (default=PaddBorderType::PADD_BORDER_DEFAULT). +bool GaussianBlur(const LiteMat &src, LiteMat &dst, const std::vector &ksize, double sigmaX, double sigmaY = 0.f, + PaddBorderType pad_type = PaddBorderType::PADD_BORDER_DEFAULT); + +/// \brief Detect edges in an image +/// \param[in] src LiteMat image to be processed. Only single channel LiteMat of type UINT8 is supported now. +/// \param[in] dst LiteMat image after processing. +/// \param[in] low_thresh The lower bound of the edge. Pixel with value below it will not be considered as a boundary. +/// It should be a nonnegative value. +//// \param[in] high_thresh The higher bound of the edge. Pixel with value over it will +/// be absolutely considered as a boundary. It should be a nonnegative value and no less than low_thresh. +/// \param[in] ksize The size of Sobel kernel (default=3). It can only be 3, 5 or 7. +/// \param[in] L2gradient Whether to use L2 distance while calculating gradient (default=false). +bool Canny(const LiteMat &src, LiteMat &dst, double low_thresh, double high_thresh, int ksize = 3, + bool L2gradient = false); + +/// \brief Apply a 2D convolution over the image. +/// \param[in] src LiteMat image to be processed. Only LiteMat of type UINT8 and FLOAT32 is supported now. +/// \param[in] kernel LiteMat 2D convolution kernel. Only LiteMat of type FLOAT32 is supported now. +/// \param[in] dst LiteMat image after processing. +/// \param[in] dst_type Output data type of dst. +/// \param[in] pad_type The padding type used while filtering (default=PaddBorderType::PADD_BORDER_DEFAULT). +bool Conv2D(const LiteMat &src, const LiteMat &kernel, LiteMat &dst, LDataType dst_type, + PaddBorderType pad_type = PaddBorderType::PADD_BORDER_DEFAULT); + +/// \brief Applies a separable linear convolution over the image +/// \param[in] src LiteMat image to be processed. Only LiteMat of type UINT8 and FLOAT32 is supported now. +/// \param[in] kx LiteMat 1D convolution kernel. Only LiteMat of type FLOAT32 is supported now. +/// \param[in] ky LiteMat 1D convolution kernel. Only LiteMat of type FLOAT32 is supported now. +/// \param[in] dst LiteMat image after processing. +/// \param[in] dst_type Output data type of dst. +/// \param[in] pad_type The padding type used while filtering (default=PaddBorderType::PADD_BORDER_DEFAULT). +bool ConvRowCol(const LiteMat &src, const LiteMat &kx, const LiteMat &ky, LiteMat &dst, LDataType dst_type, + PaddBorderType pad_type = PaddBorderType::PADD_BORDER_DEFAULT); + +/// \brief Filter the image by a Sobel kernel +/// \param[in] src LiteMat image to be processed. Only LiteMat of type UINT8 is supported now. +/// \param[in] dst LiteMat image after processing. +/// \param[in] flag_x Order of the derivative x. It should be a nonnegative value and can not be equal to 0 at the same +/// time with flag_y. +/// \param[in] flag_y Order of the derivative y. It should be a nonnegative value and can not be equal +/// to 0 at the same time with flag_x. +/// \param[in] ksize The size of Sobel kernel (default=3). It can only be 1, 3, 5 or 7. +/// \param[in] scale The scale factor for the computed derivative values (default=1.0). +/// \param[in] pad_type The padding type used while filtering (default=PaddBorderType::PADD_BORDER_DEFAULT). +bool Sobel(const LiteMat &src, LiteMat &dst, int flag_x, int flag_y, int ksize = 3, double scale = 1.0, + PaddBorderType pad_type = PaddBorderType::PADD_BORDER_DEFAULT); + +/// \brief Convert RGB image or color image to BGR image. +/// \param[in] src Input image data. +/// \param[in] data_type The type of data_type. +/// \param[in] w The width of output image. +/// \param[in] h The height of output image. +/// \param[in] mat Output image data. +bool ConvertRgbToBgr(const LiteMat &src, const LDataType &data_type, int w, int h, LiteMat &mat); + +/// \brief Convert RGB image or color image to grayscale image. +/// \param[in] src Input image data. +/// \param[in] data_type The type of data_type. +/// \param[in] w The width of output image. +/// \param[in] h The height of output image. +/// \param[in] mat Output image data. +bool ConvertRgbToGray(const LiteMat &src, LDataType data_type, int w, int h, LiteMat &mat); + +/// \brief Resize preserve AR with filler. +/// \param[in] src Input image data. +/// \param[in] dst Output image data. +/// \param[in] h The height of output image. +/// \param[in] w The width of output image. +/// \param[in] ratioShiftWShiftH Array that records the ratio, width shift, and height shift. +/// \param[in] invM Fixed direction array. +/// \param[in] img_orientation Way of export direction. +bool ResizePreserveARWithFiller(LiteMat &src, LiteMat &dst, int h, int w, float (*ratioShiftWShiftH)[3], + float (*invM)[2][3], int img_orientation); + +} // namespace dataset +} // namespace mindspore +#endif // IMAGE_PROCESS_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/lite_mat.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/lite_mat.h new file mode 100644 index 0000000..6788a71 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/lite_cv/lite_mat.h @@ -0,0 +1,377 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINI_MAT_H_ +#define MINI_MAT_H_ + +#include +#include + +namespace mindspore { +namespace dataset { + +#define ALIGN 16 +#define MAX_DIMS 3 + +template +struct Chn1 { + Chn1(T c1) : c1(c1) {} + T c1; +}; + +template +struct Chn2 { + Chn2(T c1, T c2) : c1(c1), c2(c2) {} + T c1; + T c2; +}; + +template +struct Chn3 { + Chn3(T c1, T c2, T c3) : c1(c1), c2(c2), c3(c3) {} + T c1; + T c2; + T c3; +}; + +template +struct Chn4 { + Chn4(T c1, T c2, T c3, T c4) : c1(c1), c2(c2), c3(c3), c4(c4) {} + T c1; + T c2; + T c3; + T c4; +}; + +struct Point { + float x; + float y; + Point() : x(0), y(0) {} + Point(float _x, float _y) : x(_x), y(_y) {} +}; + +typedef struct imageToolsImage { + int w; + int h; + int stride; + int dataType; + void *image_buff; +} imageToolsImage_t; + +using BOOL_C1 = Chn1; +using BOOL_C2 = Chn2; +using BOOL_C3 = Chn3; +using BOOL_C4 = Chn4; + +using UINT8_C1 = Chn1; +using UINT8_C2 = Chn2; +using UINT8_C3 = Chn3; +using UINT8_C4 = Chn4; + +using INT8_C1 = Chn1; +using INT8_C2 = Chn2; +using INT8_C3 = Chn3; +using INT8_C4 = Chn4; + +using UINT16_C1 = Chn1; +using UINT16_C2 = Chn2; +using UINT16_C3 = Chn3; +using UINT16_C4 = Chn4; + +using INT16_C1 = Chn1; +using INT16_C2 = Chn2; +using INT16_C3 = Chn3; +using INT16_C4 = Chn4; + +using UINT32_C1 = Chn1; +using UINT32_C2 = Chn2; +using UINT32_C3 = Chn3; +using UINT32_C4 = Chn4; + +using INT32_C1 = Chn1; +using INT32_C2 = Chn2; +using INT32_C3 = Chn3; +using INT32_C4 = Chn4; + +using UINT64_C1 = Chn1; +using UINT64_C2 = Chn2; +using UINT64_C3 = Chn3; +using UINT64_C4 = Chn4; + +using INT64_C1 = Chn1; +using INT64_C2 = Chn2; +using INT64_C3 = Chn3; +using INT64_C4 = Chn4; + +using FLOAT32_C1 = Chn1; +using FLOAT32_C2 = Chn2; +using FLOAT32_C3 = Chn3; +using FLOAT32_C4 = Chn4; + +using FLOAT64_C1 = Chn1; +using FLOAT64_C2 = Chn2; +using FLOAT64_C3 = Chn3; +using FLOAT64_C4 = Chn4; + +enum LPixelType { + BGR = 0, /**< Pixel in BGR type. */ + RGB = 1, /**< Pixel in RGB type. */ + RGBA = 2, /**< Pixel in RGBA type. */ + RGBA2GRAY = 3, /**< Convert image from RGBA to GRAY. */ + RGBA2BGR = 4, /**< Convert image from RGBA to BGR. */ + RGBA2RGB = 5, /**< Convert image from RGBA to RGB. */ + NV212BGR = 6, /**< Convert image from NV21 to BGR. */ + NV122BGR = 7, /**< Convert image from NV12 to BGR. */ +}; + +enum WARP_BORDER_MODE { WARP_BORDER_MODE_CONSTANT }; + +class LDataType { + public: + enum Type : uint8_t { + UNKNOWN = 0, /**< Unknown data type. */ + BOOL, /**< BOOL data type. */ + INT8, /**< INT8 data type. */ + UINT8, /**< UINT8 data type. */ + INT16, /**< INT16 data type. */ + UINT16, /**< UINT16 data type. */ + INT32, /**< INT32 data type. */ + UINT32, /**< UINT32 data type. */ + INT64, /**< INT64 data type. */ + UINT64, /**< UINT64 data type. */ + FLOAT16, /**< FLOAT16 data type. */ + FLOAT32, /**< FLOAT32 data type. */ + FLOAT64, /**< FLOAT64 data type. */ + DOUBLE, /**< DOUBLE data type. */ + NUM_OF_TYPES /**< number of types. */ + }; + + LDataType() : type_(UNKNOWN) {} + + LDataType(Type d) : type_(d) {} + + ~LDataType() = default; + + inline Type Value() const { return type_; } + inline bool operator==(const LDataType &ps) const { return this->type_ == ps.type_; } + + inline bool operator!=(const LDataType &ps) const { return this->type_ != ps.type_; } + + uint8_t SizeInBytes() const { + if (type_ < LDataType::NUM_OF_TYPES) + return SIZE_IN_BYTES[type_]; + else + return 0; + } + + public: + static inline const uint8_t SIZE_IN_BYTES[] = { + 0, /**< Unknown size. */ + 1, /**< Size of BOOL. */ + 1, /**< Size of INT8. */ + 1, /**< Size of UINT8. */ + 2, /**< Size of INT16. */ + 2, /**< Size of UINT16. */ + 4, /**< Size of INT32. */ + 4, /**< Size of UINT32. */ + 8, /**< Size of INT64. */ + 8, /**< Size of UINT64. */ + 2, /**< Size of FLOAT16. */ + 4, /**< Size of FLOAT32. */ + 8, /**< Size of FLOAT64. */ + 8, /**< Size of DOUBLE. */ + }; + + Type type_; +}; + +class LiteMat { + // Class that represents a lite Mat of a Image. + public: + /// \brief Constructor + LiteMat(); + + /// \brief Function to create an LiteMat object. + /// \param[in] width The width of the input object. + /// \param[in] data_type The data type of the input object. + explicit LiteMat(int width, LDataType data_type = LDataType::UINT8); + + /// \brief Function to create an LiteMat object. + /// \param[in] width The width of the input object. + /// \param[in] height The height of the input object. + /// \param[in] data_type The data type of the input object. + LiteMat(int width, int height, LDataType data_type = LDataType::UINT8); + + /// \brief Function to create an LiteMat object. + /// \param[in] width The width of the input object. + /// \param[in] height The height of the input object. + /// \param[in] p_data The pointer data of the input object. + /// \param[in] data_type The data type of the input object. + LiteMat(int width, int height, void *p_data, LDataType data_type = LDataType::UINT8); + + /// \brief Function to create an LiteMat object. + /// \param[in] width The width of the input object. + /// \param[in] height The height of the input object. + /// \param[in] channel The channel of the input object. + /// \param[in] data_type The data type of the input object. + LiteMat(int width, int height, int channel, LDataType data_type = LDataType::UINT8); + + /// \brief Function to create an LiteMat object. + /// \param[in] width The width of the input object. + /// \param[in] height The height of the input object. + /// \param[in] channel The channel of the input object. + /// \param[in] p_data The pointer data of the input object. + /// \param[in] data_type The data type of the input object. + LiteMat(int width, int height, int channel, void *p_data, LDataType data_type = LDataType::UINT8); + + /// \brief Destructor of LiteMat. + ~LiteMat(); + + LiteMat(const LiteMat &m); + + /// \brief Perform Init operation on given LiteMat + /// \param[in] width Set width for given LiteMat. + /// \param[in] data_type Set data type for given LiteMat. + void Init(int width, LDataType data_type = LDataType::UINT8); + + /// \brief Perform Init operation on given LiteMat + /// \param[in] width Set width for given LiteMat. + /// \param[in] height Set height for given LiteMat. + /// \param[in] data_type Set data type for given LiteMat. + void Init(int width, int height, LDataType data_type = LDataType::UINT8); + + /// \brief Perform Init operation on given LiteMat + /// \param[in] width Set width for given LiteMat. + /// \param[in] height Set height for given LiteMat. + /// \param[in] p_data Set pointer data for given LiteMat. + /// \param[in] data_type Set data type for given LiteMat. + void Init(int width, int height, void *p_data, LDataType data_type = LDataType::UINT8); + + /// \brief Perform Init operation on given LiteMat + /// \param[in] width Set width for given LiteMat. + /// \param[in] height Set height for given LiteMat. + /// \param[in] channel Set channel for given LiteMat. + /// \param[in] data_type Set data type for given LiteMat. + /// \param[in] align_memory Whether malloc align memory or not, default is true, + /// which is better for doing acceleration. + void Init(int width, int height, int channel, const LDataType &data_type = LDataType::UINT8, + bool align_memory = true); + + /// \brief Perform Init operation on given LiteMat + /// \param[in] width Set width for given LiteMat. + /// \param[in] height Set height for given LiteMat. + /// \param[in] channel Set channel for given LiteMat. + /// \param[in] p_data Set pointer data for given LiteMat. + /// \param[in] data_type Set data type for given LiteMat. + void Init(int width, int height, int channel, void *p_data, LDataType data_type = LDataType::UINT8); + + bool GetROI(int x, int y, int w, int h, LiteMat &dst); // NOLINT + + bool IsEmpty() const; + + void Release(); + + LiteMat &operator=(const LiteMat &m); + + template + operator T *() { + return reinterpret_cast(data_ptr_); + } + + template + operator const T *() const { + return reinterpret_cast(data_ptr_); + } + + template + inline T *ptr(int w) const { + if (w >= height_) { + return nullptr; + } + if (IsEmpty()) { + return nullptr; + } + return reinterpret_cast(reinterpret_cast(data_ptr_) + steps_[0] * w); + } + + private: + /// \brief Apply for memory alignment + /// \param[in] size The size of the requested memory alignment. + void *AlignMalloc(unsigned int size); + + /// \brief Free memory + /// \param[in] ptr Pointer to free memory. + void AlignFree(void *ptr); + + /// \brief Initialize the element size of different types of data. + /// \param[in] data_type Type of data. + void InitElemSize(LDataType data_type); + + /// \brief Add value of reference count. + /// \param[in] p The point of references count. + /// \param[in] value The value of new added references. + /// \return return reference count. + int addRef(int *p, int value); + + /// \brief Set the step size of the pixels in the Litemat array. + /// \param[in] c0 The number used to set teh value of step[0]. + /// \param[in] c1 The number used to set teh value of step[1]. + /// \param[in] c2 The number used to set teh value of step[2]. + void setSteps(int c0, int c1, int c2); + + bool CheckLiteMat(); + + public: + void *data_ptr_ = nullptr; + int elem_size_; + int width_; + int height_; + int channel_; + int c_step_; + int dims_; + size_t size_; + LDataType data_type_; + int *ref_count_; + size_t steps_[MAX_DIMS]; + bool release_flag; +}; + +/// \brief Calculates the difference between the two images for each element +bool Subtract(const LiteMat &src_a, const LiteMat &src_b, LiteMat *dst); + +/// \brief Calculates the division between the two images for each element +bool Divide(const LiteMat &src_a, const LiteMat &src_b, LiteMat *dst); + +/// \brief Calculates the multiply between the two images for each element +bool Multiply(const LiteMat &src_a, const LiteMat &src_b, LiteMat *dst); + +#define RETURN_FALSE_IF_LITEMAT_EMPTY(_m) \ + do { \ + if ((_m).IsEmpty()) { \ + return false; \ + } \ + } while (false) + +#define RETURN_IF_LITEMAT_EMPTY(_m) \ + do { \ + if ((_m).IsEmpty()) { \ + return; \ + } \ + } while (false) + +} // namespace dataset +} // namespace mindspore +#endif // MINI_MAT_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/samplers.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/samplers.h new file mode 100644 index 0000000..59898d4 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/samplers.h @@ -0,0 +1,256 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_SAMPLERS_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_SAMPLERS_H_ + +#include +#include + +namespace mindspore { +namespace dataset { + +// Forward declare +class SamplerObj; + +// Abstract class to represent a sampler in the data pipeline. +/// \class Sampler samplers.h +/// \brief An abstract base class to represent a sampler in the data pipeline. +class Sampler : std::enable_shared_from_this { + friend class AlbumDataset; + friend class CelebADataset; + friend class Cifar10Dataset; + friend class Cifar100Dataset; + friend class CityscapesDataset; + friend class CLUEDataset; + friend class CocoDataset; + friend class CSVDataset; + friend class DIV2KDataset; + friend class FlickrDataset; + friend class ImageFolderDataset; + friend class ManifestDataset; + friend class MindDataDataset; + friend class MnistDataset; + friend class RandomDataDataset; + friend class SBUDataset; + friend class TextFileDataset; + friend class TFRecordDataset; + friend class USPSDataset; + friend class VOCDataset; + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + Sampler() {} + + /// \brief Destructor + ~Sampler() = default; + + /// \brief A virtual function to add a child sampler. + /// \param[in] child The child sampler to be added as a children of this sampler. + virtual void AddChild(std::shared_ptr child) { children_.push_back(child); } + + protected: + /// \brief Pure virtual function to convert a Sampler class into an IR Sampler object. + /// \return shared pointer to the newly created TensorOperation. + virtual std::shared_ptr Parse() const = 0; + + std::vector> children_; +}; + +/// \brief A class to represent a Distributed Sampler in the data pipeline. +/// \note A Sampler that accesses a shard of the dataset. +class DistributedSampler final : public Sampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] num_shards Number of shards to divide the dataset into. + /// \param[in] shard_id Shard ID of the current shard within num_shards. + /// \param[in] shuffle If true, the indices are shuffled (default=true). + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + /// \param[in] seed The seed in use when shuffle is true (default=1). + /// \param[in] offset The starting position where access to elements in the dataset begins (default=-1). + /// \param[in] even_dist If true, each shard would return the same number of rows (default=true). + /// If false the total rows returned by all the shards would not have overlap. + DistributedSampler(int64_t num_shards, int64_t shard_id, bool shuffle = true, int64_t num_samples = 0, + uint32_t seed = 1, int64_t offset = -1, bool even_dist = true); + /// \brief Destructor. + ~DistributedSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; + + private: + int64_t num_shards_; + int64_t shard_id_; + bool shuffle_; + int64_t num_samples_; + uint32_t seed_; + int64_t offset_; + bool even_dist_; +}; + +/// \brief A class to represent a PK Sampler in the data pipeline. +/// \note Samples K elements for each P class in the dataset. +/// This will sample all classes. +class PKSampler final : public Sampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] num_val Number of elements to sample for each class. + /// \param[in] shuffle If true, the class IDs are shuffled (default=false). + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + explicit PKSampler(int64_t num_val, bool shuffle = false, int64_t num_samples = 0); + + /// \brief Destructor. + ~PKSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; + + private: + int64_t num_val_; + bool shuffle_; + int64_t num_samples_; +}; + +/// \brief A class to represent a Random Sampler in the data pipeline. +/// \note Samples the elements randomly. +class RandomSampler final : public Sampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] replacement If true, put the sample ID back for the next draw (default=false). + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + explicit RandomSampler(bool replacement = false, int64_t num_samples = 0); + + /// \brief Destructor. + ~RandomSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; + + private: + bool replacement_; + int64_t num_samples_; +}; + +/// \brief A class to represent a Sequential Sampler in the data pipeline. +/// \note Samples the dataset elements sequentially, same as not having a sampler. +class SequentialSampler final : public Sampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] start_index Index to start sampling at (default=0, start at first id). + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + explicit SequentialSampler(int64_t start_index = 0, int64_t num_samples = 0); + + /// \brief Destructor. + ~SequentialSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; + + private: + int64_t start_index_; + int64_t num_samples_; +}; + +/// \brief A class to represent a Subset Sampler in the data pipeline. +/// \note Samples the elements from a sequence of indices. +class SubsetSampler : public Sampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] indices A vector sequence of indices. + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + explicit SubsetSampler(std::vector indices, int64_t num_samples = 0); + + /// \brief Destructor. + ~SubsetSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; + + std::vector indices_; + int64_t num_samples_; +}; + +/// \brief A class to represent a Subset Random Sampler in the data pipeline. +/// \note Samples the elements randomly from a sequence of indices. +class SubsetRandomSampler final : public SubsetSampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] indices A vector sequence of indices. + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + explicit SubsetRandomSampler(std::vector indices, int64_t num_samples = 0); + + /// \brief Destructor. + ~SubsetRandomSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; +}; + +/// \brief A class to represent a Weighted Random Sampler in the data pipeline. +/// \note Samples the elements from [0, len(weights) - 1] randomly with the given +/// weights (probabilities). +class WeightedRandomSampler final : public Sampler { + friend std::shared_ptr SelectSampler(int64_t, bool, int32_t, int32_t); + + public: + /// \brief Constructor + /// \param[in] weights A vector sequence of weights, not necessarily summing up to 1. + /// \param[in] num_samples The number of samples to draw (default=0, return all samples). + /// \param[in] replacement If true, put the sample ID back for the next draw (default=true). + explicit WeightedRandomSampler(std::vector weights, int64_t num_samples = 0, bool replacement = true); + + /// \brief Destructor. + ~WeightedRandomSampler() = default; + + protected: + /// \brief The function to convert a Sampler into an IR SamplerObj. + /// \return shared pointer to the newly created SamplerObj. + std::shared_ptr Parse() const override; + + private: + std::vector weights_; + int64_t num_samples_; + bool replacement_; +}; + +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_SAMPLERS_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/transforms.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/transforms.h new file mode 100644 index 0000000..df59cba --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/transforms.h @@ -0,0 +1,401 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_TRANSFORMS_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_TRANSFORMS_H_ + +#include +#include +#include +#include + +#include "include/api/dual_abi_helper.h" +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/dataset/constants.h" + +namespace mindspore { +namespace dataset { + +class TensorOperation; + +// We need the following two groups of forward declaration to friend the class in class TensorTransform. +namespace transforms { +class Compose; +class RandomApply; +class RandomChoice; +} // namespace transforms + +namespace vision { +class BoundingBoxAugment; +class RandomSelectSubpolicy; +class UniformAugment; +} // namespace vision + +// Abstract class to represent a tensor transform operation in the data pipeline. +/// \class TensorTransform transforms.h +/// \brief A base class to represent a tensor transform operation in the data pipeline. +class TensorTransform : public std::enable_shared_from_this { + friend class Dataset; + friend class Execute; + friend class transforms::Compose; + friend class transforms::RandomApply; + friend class transforms::RandomChoice; + friend class vision::BoundingBoxAugment; + friend class vision::RandomSelectSubpolicy; + friend class vision::UniformAugment; + + public: + /// \brief Constructor + TensorTransform() {} + + /// \brief Destructor + ~TensorTransform() = default; + + protected: + /// \brief Pure virtual function to convert a TensorTransform class into a IR TensorOperation object. + /// \return shared pointer to the newly created TensorOperation. + virtual std::shared_ptr Parse() = 0; + + /// \brief Virtual function to convert a TensorTransform class into a IR TensorOperation object. + /// \param[in] env A string to determine the running environment + /// \return shared pointer to the newly created TensorOperation. + virtual std::shared_ptr Parse(const MapTargetDevice &env) { return nullptr; } +}; + +/// \brief Slice object used in SliceOption. +class Slice { + public: + /// \brief Constructor, with start, stop and step default to 0. + Slice() : start_(0), stop_(0), step_(0) {} + /// \brief Constructor. + /// \param[in] start Starting integer specifying where to start the slicing. + /// \param[in] stop Ending integer specifying where to stop the slicing. + /// \param[in] step An integer specifying the step of the slicing. + Slice(dsize_t start, dsize_t stop, dsize_t step) : start_(start), stop_(stop), step_(step) {} + /// \brief Constructor, with step=1 + /// \param[in] start Starting integer specifying where to start the slicing. + /// \param[in] stop Ending integer specifying where to stop the slicing. + Slice(dsize_t start, dsize_t stop) : start_(start), stop_(stop), step_(1) {} + /// \brief Constructor, with start=0 and step=1 + /// \param[in] stop Ending integer specifying where to stop the slicing. + explicit Slice(dsize_t stop) : start_(0), stop_(stop), step_(1) {} + Slice(Slice const &slice) = default; + + ~Slice() = default; + + bool valid() const { return step_ != 0; } + dsize_t start_; + dsize_t stop_; + dsize_t step_; +}; + +/// \brief SliceOption used in Slice TensorTransform. +class SliceOption { + public: + /// \param[in] all Slice the whole dimension + explicit SliceOption(bool all) : all_(all) {} + /// \param[in] indices Slice these indices along the dimension. Negative indices are supported. + explicit SliceOption(std::vector indices) : indices_(indices) {} + /// \param[in] slice Slice the generated indices from the slice object along the dimension. + explicit SliceOption(Slice slice) : slice_(slice) {} + SliceOption(SliceOption const &slice) = default; + + ~SliceOption() = default; + + // only one of the following will be valid + // given indices to slice the Tensor. + std::vector indices_ = {}; + // Slice object. All start, stop and step are 0 if invalid. + Slice slice_; + bool all_ = false; +}; + +// Transform operations for performing data transformation. +namespace transforms { + +/// \brief Compose a list of transforms into a single transform. +class Compose final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] transforms A vector of raw pointers to TensorTransform objects to be applied. + explicit Compose(const std::vector &transforms); + /// \brief Constructor. + /// \param[in] transforms A vector of shared pointers to TensorTransform objects to be applied. + explicit Compose(const std::vector> &transforms); + /// \brief Constructor. + /// \param[in] transforms A vector of TensorTransform objects to be applied. + explicit Compose(const std::vector> &transforms); + + /// \brief Destructor + ~Compose() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Concatenate all tensors into a single tensor. +class Concatenate final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] axis Concatenate the tensors along given axis, only support 0 or -1 so far (default=0). + /// \param[in] prepend MSTensor to be prepended to the concatenated tensors (default={}). + /// \param[in] append MSTensor to be appended to the concatenated tensors (default={}). + explicit Concatenate(int8_t axis = 0, const MSTensor &prepend = {}, const MSTensor &append = {}); + + /// \brief Destructor + ~Concatenate() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Duplicate the input tensor to a new output tensor. +/// The input tensor is carried over to the output list. +class Duplicate final : public TensorTransform { + public: + /// \brief Constructor. + Duplicate(); + + /// \brief Destructor + ~Duplicate() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; +}; + +/// \brief Fill all elements in the tensor with the specified value. +/// The output tensor will have the same shape and type as the input tensor. +class Fill final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] fill_value Scalar value to fill the tensor with. + /// It can only be MSTensor of the following types from mindspore::DataType: + /// String, Bool, Int8/16/32/64, UInt8/16/32/64, Float16/32/64. + explicit Fill(const MSTensor &fill_value); + + /// \brief Destructor + ~Fill() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Mask content of the input tensor with the given predicate. +/// Any element of the tensor that matches the predicate will be evaluated to True, otherwise False. +class Mask final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] op One of the relational operators: EQ, NE LT, GT, LE or GE. + /// \param[in] constant Constant to be compared to. It can only be MSTensor of the following types + /// from mindspore::DataType: String, Int, Float, Bool. + /// \param[in] de_type Type of the generated mask. It can only be numeric or boolean datatype. + /// (default=mindspore::DataType::kNumberTypeBool) + explicit Mask(RelationalOp op, const MSTensor &constant, + mindspore::DataType ms_type = mindspore::DataType(mindspore::DataType::kNumberTypeBool)); + + /// \brief Destructor + ~Mask() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Convert the labels into OneHot format. +class OneHot final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] num_classes number of classes. + explicit OneHot(int32_t num_classes); + + /// \brief Destructor + ~OneHot() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Pad input tensor according to pad_shape +class PadEnd final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] pad_shape List of integers representing the shape needed, need to have same rank with input tensor. + /// Dimensions that set to `-1` will not be padded (i.e., original dim will be used). + /// Shorter dimensions will truncate the values. + /// \param[in] pad_value Value used to pad (default={}). + explicit PadEnd(const std::vector &pad_shape, const MSTensor &pad_value = {}); + + /// \brief Destructor + ~PadEnd() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Randomly perform a series of transforms with a given probability. +class RandomApply final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] transforms A vector of raw pointers to TensorTransform objects to be applied. + /// \param[in] prob The probability to apply the transformation list (default=0.5). + explicit RandomApply(const std::vector &transforms, double prob = 0.5); + /// \brief Constructor. + /// \param[in] transforms A vector of shared pointers to TensorTransform objects to be applied. + /// \param[in] prob The probability to apply the transformation list (default=0.5). + explicit RandomApply(const std::vector> &transforms, double prob = 0.5); + /// \brief Constructor. + /// \param[in] transforms A vector of TensorTransform objects to be applied. + /// \param[in] prob The probability to apply the transformation list (default=0.5). + explicit RandomApply(const std::vector> &transforms, double prob = 0.5); + + /// \brief Destructor + ~RandomApply() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Randomly select one transform from a list of transforms to perform on the input tensor. +class RandomChoice final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] transforms A vector of raw pointers to TensorTransform objects to be applied. + explicit RandomChoice(const std::vector &transforms); + /// \brief Constructor. + /// \param[in] transforms A vector of shared pointers to TensorTransform objects to be applied. + explicit RandomChoice(const std::vector> &transforms); + /// \brief Constructor. + /// \param[in] transforms A vector of TensorTransform objects to be applied. + explicit RandomChoice(const std::vector> &transforms); + + /// \brief Destructor + ~RandomChoice() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Extract a tensor out using the given n slices. +/// The functionality of Slice is similar to the feature of indexing of NumPy. +/// (Currently only rank-1 tensors are supported). +class Slice final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] slice_input Vector of SliceOption + explicit Slice(const std::vector &slice_input); + + /// \brief Destructor + ~Slice() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Cast the MindSpore data type of a tensor to another. +class TypeCast final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] data_type mindspore::DataType to be cast to. + explicit TypeCast(mindspore::DataType data_type); + + /// \brief Destructor + ~TypeCast() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Return an output tensor that contains all the unique elements of the input tensor in +/// the same order as they appear in the input tensor. +class Unique final : public TensorTransform { + public: + /// \brief Constructor. + Unique(); + + /// \brief Destructor + ~Unique() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; +}; +} // namespace transforms +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_TRANSFORMS_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/vision_lite.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/vision_lite.h new file mode 100644 index 0000000..68e595b --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/dataset/vision_lite.h @@ -0,0 +1,371 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_LITE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_LITE_H_ + +#include +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/dataset/constants.h" +#include "include/dataset/transforms.h" + +namespace mindspore { +namespace dataset { + +// Transform operations for performing computer vision. +namespace vision { + +// Forward Declarations +class RotateOperation; + +/// \brief Apply affine transform on the input image. +class Affine final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] degrees The degrees to rotate the image. + /// \param[in] translation The values representing vertical and horizontal translation (default = {0.0, 0.0}). + /// The first value represents the x axis translation while the second represents the y axis translation. + /// \param[in] scale The scaling factor for the image (default = 0.0). + /// \param[in] shear A float vector of size 2, representing the shear degrees (default = {0.0, 0.0}). + /// \param[in] interpolation An enum for the mode of interpolation. + /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation (Only supports this mode in Lite). + /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation. + /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation. + /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation. + /// \param[in] fill_value A vector representing the value to fill the area outside the transformation + /// in the output image. If 1 value is provided, it is used for all RGB channels. + /// If 3 values are provided, it is used to fill R, G, B channels respectively. + explicit Affine(float_t degrees, const std::vector &translation = {0.0, 0.0}, float scale = 0.0, + const std::vector &shear = {0.0, 0.0}, + InterpolationMode interpolation = InterpolationMode::kNearestNeighbour, + const std::vector &fill_value = {0, 0, 0}); + + /// \brief Destructor. + ~Affine() = default; + + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Crop the input image at the center to the given size. +class CenterCrop final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] size A vector representing the output size of the cropped image. + /// If the size is a single value, a squared crop of size (size, size) is returned. + /// If the size has 2 values, it should be (height, width). + explicit CenterCrop(std::vector size); + + /// \brief Destructor. + ~CenterCrop() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + std::shared_ptr Parse(const MapTargetDevice &env) override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Crop an image based on location and crop size. +class Crop final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] coordinates Starting location of crop. Must be a vector of two values, in the form of {x_coor, y_coor}. + /// \param[in] size Size of the cropped area. + /// If the size is a single value, a squared crop of size (size, size) is returned. + /// If the size has 2 values, it should be (height, width). + Crop(std::vector coordinates, std::vector size); + + /// \brief Destructor. + ~Crop() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Decode the input image in RGB mode. +class Decode final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] rgb A boolean indicating whether to decode the image in RGB mode or not. + explicit Decode(bool rgb = true); + + /// \brief Destructor. + ~Decode() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + std::shared_ptr Parse(const MapTargetDevice &env) override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Blur the input image with the specified Gaussian kernel. +class GaussianBlur final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] kernel_size A vector of Gaussian kernel size for width and height. The value must be positive and odd. + /// \param[in] sigma A vector of Gaussian kernel standard deviation sigma for width and height. The values must be + /// positive. Using default value 0 means to calculate the sigma according to the kernel size. + GaussianBlur(const std::vector &kernel_size, const std::vector &sigma = {0., 0.}); + + /// \brief Destructor. + ~GaussianBlur() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Normalize the input image with respect to mean and standard deviation. +class Normalize final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] mean A vector of mean values for each channel, with respect to channel order. + /// The mean values must be in range [0.0, 255.0]. + /// \param[in] std A vector of standard deviations for each channel, with respect to channel order. + /// The standard deviation values must be in range (0.0, 255.0]. + Normalize(std::vector mean, std::vector std); + + /// \brief Destructor. + ~Normalize() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + std::shared_ptr Parse(const MapTargetDevice &env) override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply a Random Affine transformation on the input image in RGB or Greyscale mode. +class RandomAffine final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] degrees A float vector of size 2, representing the starting and ending degree. + /// \param[in] translate_range A float vector of size 2 or 4, representing percentages of translation on x and y axes. + /// If the size is 2, (min_dx, max_dx, 0, 0). + /// If the size is 4, (min_dx, max_dx, min_dy, max_dy), + /// all values are in range [-1, 1]. + /// \param[in] scale_range A float vector of size 2, representing the starting and ending scales in the range. + /// \param[in] shear_ranges A float vector of size 2 or 4, representing the starting and ending shear degrees + /// vertically and horizontally. + /// If the size is 2, (min_shear_x, max_shear_x, 0, 0), + /// if the size is 4, (min_shear_x, max_shear_x, min_shear_y, max_shear_y). + /// \param[in] interpolation An enum for the mode of interpolation. + /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation (Only supports this mode in Lite). + /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation. + /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation. + /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation. + /// \param[in] fill_value A vector representing the value to fill the area outside the transform + /// in the output image. If 1 value is provided, it is used for all RGB channels. + /// If 3 values are provided, it is used to fill R, G and B channels respectively. + explicit RandomAffine(const std::vector °rees, + const std::vector &translate_range = {0.0, 0.0, 0.0, 0.0}, + const std::vector &scale_range = {1.0, 1.0}, + const std::vector &shear_ranges = {0.0, 0.0, 0.0, 0.0}, + InterpolationMode interpolation = InterpolationMode::kNearestNeighbour, + const std::vector &fill_value = {0, 0, 0}); + + /// \brief Destructor. + ~RandomAffine() = default; + + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Resize the input image to the given size. +class Resize final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] size A vector representing the output size of the resized image. + /// If the size is a single value, the image will be resized to this value with + /// the same image aspect ratio. If the size has 2 values, it should be (height, width). + /// \param[in] interpolation An enum for the mode of interpolation. + /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation (Only supports this mode in Lite). + /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation. + /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation. + /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation. + /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow. + explicit Resize(std::vector size, InterpolationMode interpolation = InterpolationMode::kLinear); + + /// \brief Destructor. + ~Resize() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + std::shared_ptr Parse(const MapTargetDevice &env) override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Keep the original picture ratio and fills the rest. +class ResizePreserveAR final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] height The height of image output value after resizing. + /// \param[in] width The width of image output value after resizing. + /// \param[in] img_orientation optional rotation angle. + /// - img_orientation = 1, Rotate 0 degree. + /// - img_orientation = 2, Rotate 0 degree and apply horizontal flip. + /// - img_orientation = 3, Rotate 180 degree. + /// - img_orientation = 4, Rotate 180 degree and apply horizontal flip. + /// - img_orientation = 5, Rotate 90 degree and apply horizontal flip. + /// - img_orientation = 6, Rotate 90 degree. + /// - img_orientation = 7, Rotate 270 degree and apply horizontal flip. + /// - img_orientation = 8, Rotate 270 degree. + ResizePreserveAR(int32_t height, int32_t width, int32_t img_orientation = 0); + + /// \brief Destructor. + ~ResizePreserveAR() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief RGB2BGR TensorTransform. +/// \notes Convert the format of input image from RGB to BGR. +class RGB2BGR final : public TensorTransform { + public: + /// \brief Constructor. + RGB2BGR() = default; + + /// \brief Destructor. + ~RGB2BGR() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; +}; + +/// \brief RGB2GRAY TensorTransform. +/// \note Convert RGB image or color image to grayscale image. +/// \brief Convert a RGB image or color image to a grayscale one. +class RGB2GRAY final : public TensorTransform { + public: + /// \brief Constructor. + RGB2GRAY() = default; + + /// \brief Destructor. + ~RGB2GRAY() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; +}; + +/// \brief Rotate the input image according to parameters. +class Rotate final : public TensorTransform { + public: + /// \brief Constructor. + /// \note This api is only used in Lite, the interpolation mode is bilinear. + /// \param[in] angle_id The fix rotation angle. + /// - FixRotationAngle::k0Degree = 1, Rotate 0 degree. + /// - FixRotationAngle::k0DegreeAndMirror = 2, Rotate 0 degree and apply horizontal flip. + /// - FixRotationAngle::k180Degree = 3, Rotate 180 degree. + /// - FixRotationAngle::k180DegreeAndMirror = 4, Rotate 180 degree and apply horizontal flip. + /// - FixRotationAngle::k90DegreeAndMirror = 5, Rotate 90 degree and apply horizontal flip. + /// - FixRotationAngle::k90Degree = 6, Rotate 90 degree. + /// - FixRotationAngle::k270DegreeAndMirror = 7, Rotate 270 degree and apply horizontal flip. + /// - FixRotationAngle::k270Degree = 8, Rotate 270 degree. + explicit Rotate(FixRotationAngle angle_id = FixRotationAngle::k0Degree); + + /// \brief Constructor. + /// \param[in] degrees A float value, representing the rotation degrees. + /// \param[in] resample An enum for the mode of interpolation. + /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation. + /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation. + /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation. + /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation. + /// \param[in] expand A boolean representing whether the image is expanded after rotation. + /// \param[in] center A float vector of size 2 or empty, representing the x and y center of rotation + /// or the center of the image. + /// \param[in] fill_value A vector representing the value to fill the area outside the transform + /// in the output image. If 1 value is provided, it is used for all RGB channels. + /// If 3 values are provided, it is used to fill R, G, B channels respectively. + Rotate(float degrees, InterpolationMode resample = InterpolationMode::kNearestNeighbour, bool expand = false, + std::vector center = {}, std::vector fill_value = {0, 0, 0}); + + /// \brief Destructor. + ~Rotate() = default; + + protected: + /// \brief The function to convert a TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + std::shared_ptr op_; + struct Data; + std::shared_ptr data_; +}; + +} // namespace vision +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_LITE_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/errorcode.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/errorcode.h new file mode 100644 index 0000000..796aeea --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/errorcode.h @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ +#define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ + +#include "include/lite_utils.h" + +namespace mindspore { +namespace lite { +/// \brief STATUS defined for holding error code in MindSpore Lite. +using STATUS = int; + +/* Success */ +constexpr int RET_OK = 0; /**< No error occurs. */ + +/* Common error code, range: [-1, -100) */ +constexpr int RET_ERROR = -1; /**< Common error code. */ +constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/ +constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/ +constexpr int RET_NO_CHANGE = -4; /**< No change. */ +constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */ +constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */ +constexpr int RET_NOT_SUPPORT = -7; /**< Fail to support. */ +constexpr int RET_THREAD_POOL_ERROR = -8; /**< Error occur in thread pool. */ + +/* Executor error code, range: [-100,-200) */ +constexpr int RET_OUT_OF_TENSOR_RANGE = -100; /**< Failed to check range. */ +constexpr int RET_INPUT_TENSOR_ERROR = -101; /**< Failed to check input tensor. */ +constexpr int RET_REENTRANT_ERROR = -102; /**< Exist executor running. */ + +/* Graph error code, range: [-200,-300) */ +constexpr int RET_GRAPH_FILE_ERR = -200; /**< Failed to verify graph file. */ + +/* Node error code, range: [-300,-400) */ +constexpr int RET_NOT_FIND_OP = -300; /**< Failed to find operator. */ +constexpr int RET_INVALID_OP_NAME = -301; /**< Invalid operator name. */ +constexpr int RET_INVALID_OP_ATTR = -302; /**< Invalid operator attr. */ +constexpr int RET_OP_EXECUTE_FAILURE = -303; /**< Failed to execution operator. */ + +/* Tensor error code, range: [-400,-500) */ +constexpr int RET_FORMAT_ERR = -400; /**< Failed to checking tensor format. */ + +/* InferShape error code, range: [-500,-600) */ +constexpr int RET_INFER_ERR = -500; /**< Failed to infer shape. */ +constexpr int RET_INFER_INVALID = -501; /**< Invalid infer shape before runtime. */ + +/* User input param error code, range: [-600, 700) */ +constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user. */ + +/// \brief Print description of errorcode. +/// +/// \param[in] error_code define return status of procedure. +/// +/// \return String of errorcode info. +String GetErrorInfo(STATUS error_code); + +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ir/dtype/type_id.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ir/dtype/type_id.h new file mode 100644 index 0000000..bb3a58c --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ir/dtype/type_id.h @@ -0,0 +1,97 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ +#define MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ + +namespace mindspore { +// +// Supported meta type +// +enum TypeId : int { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeRowTensorType, + kObjectTypeSparseTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeComplex64, + kNumberTypeComplex128, + kNumberTypeInt4, + kNumberTypeEnd, + // + // Monad Types + // + // Monad types is placed at the end of enum, + // in order to keep fit with the type of existing model on the lite side. + kMonadTypeBegin = kNumberTypeEnd, + kObjectTypeMonad, + kObjectTypeUMonad, + kObjectTypeIOMonad, + kMonadTypeEnd +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/kernel_interface.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/kernel_interface.h new file mode 100644 index 0000000..c1d9e6e --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/kernel_interface.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_INCLUDE_KERNEL_INTERFACE_H_ +#define MINDSPORE_LITE_INCLUDE_KERNEL_INTERFACE_H_ + +#include +#include +#include "include/api/types.h" +#include "include/api/status.h" +#include "include/lite_utils.h" +#include "schema/model_generated.h" + +namespace mindspore { +namespace kernel { +/// \brief KernelInterface defined customized op's interface, such as infershape, and so on. +class MS_API KernelInterface { + public: + /// \brief Destructor of KernelInterface. + virtual ~KernelInterface() = default; + + /// \brief Method to infer customized op's output shape. + /// + /// \param[in] inputs Define the input tensors of op. + /// \param[in] outputs Define the output tensors of op. + /// \param[in] primitive Define the attributes of op. + /// + /// \return Status as a status identification of inferring. + virtual Status Infer(std::vector *inputs, std::vector *outputs, + const schema::Primitive *primitive) { + return kSuccess; + } +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_KERNEL_INTERFACE_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_session.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_session.h new file mode 100644 index 0000000..a33dbf8 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_session.h @@ -0,0 +1,236 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_SESSION_H +#define MINDSPORE_LITE_INCLUDE_LITE_SESSION_H + +#ifndef NOT_USE_STL +#include +#endif // NOT_USE_STL +#include +#include +#include "include/ms_tensor.h" +#include "include/model.h" +#include "include/context.h" +#include "include/errorcode.h" +#include "include/lite_types.h" + +namespace mindspore { +namespace lite { +class TrainCfg; +} + +namespace session { +/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model. +class MS_API LiteSession { + public: + /// \brief Static method to create a LiteSession pointer. + /// + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(const lite::Context *context); + + /// \brief Static method to create a LiteSession pointer which has already compiled a model. + /// + /// \param[in] model_buf Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(const char *model_buf, size_t size, const lite::Context *context); + + /// \brief Destructor of MindSpore Lite LiteSession. + virtual ~LiteSession() = default; + + /// \brief Attempt to bind or unbind threads in the thread pool to or from the specified cpu core. + /// + /// \param[in] if_bind Define whether to bind or unbind threads. + virtual void BindThread(bool if_bind) = 0; + + /// \brief Compile MindSpore Lite model. + /// + /// \note CompileGraph should be called before RunGraph. + /// + /// \param[in] model Define the model to be compiled. + /// + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h. + virtual int CompileGraph(lite::Model *model) = 0; + + /// \brief Get input MindSpore Lite MSTensors of model. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual Vector GetInputs() const = 0; + + /// \brief Get input MindSpore Lite MSTensors of model by tensor name. + /// + /// \param[in] node_name Define tensor name. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const = 0; + + /// \brief Run session with callback. + /// + /// \param[in] before Define a call_back_function to be called before running each node. + /// \param[in] after Define a call_back_function called after running each node. + /// + /// \note RunGraph should be called after CompileGraph. + /// + /// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. + virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; + + /// \brief Get output MindSpore Lite MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \note Deprecated, replace with GetOutputByTensorName + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual Vector GetOutputsByNodeName(const String &node_name) const = 0; + +#ifndef NOT_USE_STL + /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. + /// + /// \return The map of output tensor name and MindSpore Lite MSTensor. + virtual std::unordered_map GetOutputs() const = 0; +#endif + + /// \brief Get name of output tensors of model compiled by this session. + /// + /// \return The vector of string as output tensor names in order. + virtual Vector GetOutputTensorNames() const = 0; + + /// \brief Get output MindSpore Lite MSTensors of model by tensor name. + /// + /// \param[in] tensor_name Define tensor name. + /// + /// \return Pointer of MindSpore Lite MSTensor. + virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const = 0; + + /// \brief Resize inputs shape. + /// + /// \param[in] inputs Define the inputs of the model. + /// \param[in] dims Define the inputs new shape. + /// + /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. + virtual int Resize(const Vector &inputs, const Vector> &dims) = 0; + + /// \brief Set model to train mode + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h + virtual int Train() { return mindspore::lite::RET_ERROR; } + + /// \brief Check mode of model + /// + /// \return boolean indication if model is in train mode + virtual bool IsTrain() { return false; } + + /// \brief Set model to eval mode + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h + virtual int Eval() { return mindspore::lite::RET_OK; } + + /// \brief Check mode of model + /// + /// \return boolean indication if model is in eval mode + virtual bool IsEval() { return true; } + + /// \brief Sets the Learning Rate of the training + /// + /// \param[in] learning_rate to set + /// + /// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h + virtual int SetLearningRate(float learning_rate) { return mindspore::lite::RET_ERROR; } + + /// \brief Gets the Learning Rate of the training + /// + /// \return learning rate. 0.0 if no optimizer was found + virtual float GetLearningRate() { return 0.0; } + + /// \brief Setup training with virtual batches + /// + /// \param[in] virtual_batch_multiplier - virtual batch multiplier, use any number < 1 to disable + /// \param[in] lr - learning rate to use for virtual batch, -1 for internal configuration + /// \param[in] momentum - batch norm momentum to use for virtual batch, -1 for internal configuration + + /// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h + virtual int SetupVirtualBatch(int virtual_batch_multiplier, float lr = -1.0f, float momentum = -1.0f) { + return mindspore::lite::RET_ERROR; + } + + /// \brief Get output MindSpore Lite MSTensors of Training model prediction + /// + /// \return a vector of output tensors (MindSpore Lite MSTensor). + virtual std::vector GetPredictions() const { + std::vector outputs; + return outputs; + } + + /// \brief Save model + /// \param[in] file_name pretrained model file name prefix. '.ms' extenension is added if does not exist + /// \param[in] model_type indication whether to save full model or only the inference part + /// \param[in] quant_type indication whether to quantize exported model + /// \param[in] format of exported file (currently only FT_FLATBUFFERS is supported) + /// \param[in] out_put_tensor_name of exported tensorname + /// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h + virtual int Export(const std::string &file_name, lite::ModelType model_type = lite::MT_TRAIN, + lite::QuantizationType quant_type = lite::QT_DEFAULT, lite::FormatType = lite::FT_FLATBUFFERS, + std::vector out_put_tensor_name = {}) { + return mindspore::lite::RET_ERROR; + } + + /// \brief Get model featuremap MindSpore Lite MSTensors of Training model prediction + /// + /// \return a vector of output tensors (MindSpore Lite MSTensor). + virtual std::vector GetFeatureMaps() const { + std::vector features; + return features; + } + + /// \brief update model featuremap save to update_ms_file + /// \param[in] features new featuremap + /// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h + virtual int UpdateFeatureMaps(const std::vector &features) { return mindspore::lite::RET_ERROR; } + + /// \brief Get model gradient + /// + /// \return a vector of gradient tensors (MindSpore Lite MSTensor). + virtual std::vector GetGradients() const { + std::vector gradients; + return gradients; + } + + /// \brief update model gradient + /// + /// \param[in] new gradients + /// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h + virtual int ApplyGradients(const std::vector &gradients) { return mindspore::lite::RET_ERROR; } + + /// \brief Get model optimizer params + /// + /// \return a vector of optimizer parameters (MindSpore Lite MSTensor). + virtual std::vector GetOptimizerParams() const { + std::vector params; + return params; + } + + /// \brief set model optimizer params + /// + /// \param[in] new optimizer params + /// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h + virtual int SetOptimizerParams(const std::vector ¶ms) { return mindspore::lite::RET_ERROR; } +}; +} // namespace session +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_types.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_types.h new file mode 100644 index 0000000..7081146 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_types.h @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_ +#define MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_ + +namespace mindspore::lite { +/// \brief CpuBindMode defined for holding bind cpu strategy argument. +typedef enum { + NO_BIND, /**< no bind */ + HIGHER_CPU, /**< bind higher cpu first */ + MID_CPU /**< bind middle cpu first */ +} CpuBindMode; + +/// \brief DeviceType defined for holding user's preferred backend. +typedef enum { + DT_CPU, /**< CPU device type */ + DT_GPU, /**< GPU device type */ + DT_NPU, /**< NPU device type */ + DT_ASCEND310 /**< ASCEND310 device type */ +} DeviceType; + +typedef enum { + FT_FLATBUFFERS, /**< Flatbuffers format */ + FT_PROTOBUF /**< Protobuf format */ +} FormatType; + +typedef enum { + QT_DEFAULT, /**< the quantization of the original model will apply */ + QT_NONE, /**< apply no quantization */ + QT_WEIGHT /**< apply weight quantization */ +} QuantizationType; + +typedef enum { + MT_TRAIN, /**< Both Train and Inference part of the compiled model are serialized */ + MT_INFERENCE /**< Only the Inference part of the compiled model is serialized */ +} ModelType; + +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_utils.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_utils.h new file mode 100644 index 0000000..673fa87 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/lite_utils.h @@ -0,0 +1,673 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ +#define MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ + +#ifndef NOT_USE_STL +#include +#include +#include +#include +#else +#include +#include +#include +#include +#include +#include +#include +#endif // NOT_USE_STL + +#ifndef MS_API +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else +#define MS_API __attribute__((visibility("default"))) +#endif +#endif + +namespace mindspore { +namespace schema { +struct Tensor; +} // namespace schema + +namespace tensor { +class MSTensor; +} // namespace tensor + +namespace lite { +struct DeviceContext; +struct LiteQuantParam; +} // namespace lite + +#ifdef NOT_USE_STL +#define MS_C_EXCEPTION(...) exit(1) + +class String { + public: + String() { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + } + + String(size_t count, char ch) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (count + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memset(buffer_, ch, count); + buffer_[count] = '\0'; + size_ = count; + } + + String(const char *s, size_t count) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_t size_s = strlen(s); + if (size_s <= count) { + size_ = size_s; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, s, size_); + buffer_[size_] = '\0'; + } + + explicit String(const char *s) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_ = strlen(s); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(buffer_, s, size_ + 1); + } + + String(const String &other) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (other.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = other.size_; + memcpy(buffer_, other.buffer_, size_ + 1); + } + + String(const String &other, size_t pos, size_t count = npos) { + if (pos >= other.size_) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + } else { + if (count == npos) { + count = other.size_ - pos; + } + if (pos + count > other.size_) { + size_ = other.size_ - pos; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, other.buffer_ + pos, size_); + buffer_[size_] = '\0'; + } + } + + ~String() { free(buffer_); } + + String &operator=(const String &str) { + if (this == &str) { + return *this; + } + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (str.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = str.size_; + memcpy(buffer_, str.buffer_, size_ + 1); + return *this; + } + + String &operator=(const char *str) { + free(buffer_); + if (str == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return *this; + } + size_t size_s = strlen(str); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_s + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = size_s; + memcpy(buffer_, str, size_ + 1); + return *this; + } + + char &at(size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; + } + + const char &at(size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; + } + + inline char &operator[](size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); + } + + inline const char &operator[](size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); + } + + char *data() noexcept { return buffer_; } + const char *data() const noexcept { return buffer_; } + const char *c_str() const noexcept { return buffer_; } + + // capacity + bool empty() const noexcept { return size_ == 0; } + size_t size() const noexcept { return size_; } + size_t length() const noexcept { return size_; } + + // operations + void clear() noexcept { + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + } + + String &append(size_t count, const char ch) { + (*this) += ch; + return *this; + } + + String &append(const String &str) { + (*this) += str; + return *this; + } + + String &append(const char *str) { + if (str == nullptr) { + return *this; + } + (*this) += str; + return *this; + } + + String &operator+(const String &str) { + (*this) += str; + return *this; + } + + String &operator+=(const String &str) { + size_t new_size = size_ + str.size_; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str.buffer_, str.size_); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; + } + + String &operator+=(const char *str) { + if (str == nullptr) { + return *this; + } + size_t str_size = strlen(str); + size_t new_size = size_ + str_size; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str, str_size); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; + } + + String &operator+=(const char ch) { + char *tmp = reinterpret_cast(malloc(sizeof(char) * (size_ + 2))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + tmp[size_] = ch; + tmp[size_ + 1] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ += 1; + return *this; + } + + int compare(const String &str) const { return strcmp(buffer_, str.buffer_); } + int compare(const char *str) const { return strcmp(buffer_, str); } + + String substr(size_t pos = 0, size_t count = npos) const { return String(*this, pos, count); } + + static const size_t npos = -1; + + private: + size_t size_; + char *buffer_; +}; + +inline String operator+(const String &lhs, const char *rhs) { + String str = lhs; + str += rhs; + return str; +} + +inline String operator+(const char *lhs, const String &rhs) { + String str = rhs; + str += lhs; + return str; +} + +inline bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; } +inline bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; } +inline bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; } +inline bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; } + +inline String to_String(int32_t value) { + char tmp[sizeof(int32_t) * 4]; + snprintf(tmp, sizeof(int32_t) * 4, "%d", value); + return String(tmp, strlen(tmp)); +} + +inline String to_String(float value) { + char tmp[FLT_MAX_10_EXP + 20]; + snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value); + return String(tmp, strlen(tmp)); +} + +#define DEFAULT_CAPACITY 4 +#define MIN(x, y) ((x < y) ? (x) : (y)) +template +class Vector { + public: + Vector() { + size_ = 0; + capacity_ = DEFAULT_CAPACITY; + elem_size_ = sizeof(T); + data_ = nullptr; + } + + explicit Vector(size_t size) { + size_ = size; + elem_size_ = sizeof(T); + capacity_ = (size == 0 ? DEFAULT_CAPACITY : size); + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } + + Vector(size_t size, const T &value) { + size_ = size; + elem_size_ = sizeof(T); + capacity_ = (size == 0 ? DEFAULT_CAPACITY : size); + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + for (int i = 0; i < static_cast(size_); ++i) { + data_[i] = value; + } + } + + Vector(const Vector &vec) { + size_ = vec.size_; + elem_size_ = sizeof(T); + capacity_ = vec.capacity_; + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + for (int i = 0; i < static_cast(size_); ++i) { + data_[i] = vec.data_[i]; + } + } + + ~Vector() { + if (data_ != nullptr) { + delete[] data_; + } + } + + void clear() { + size_ = 0; + if (data_ != nullptr) { + delete[] data_; + data_ = nullptr; + } + } + + void push_back(const T &elem) { + if (data_ == nullptr) { + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } else if (size_ == capacity_) { + resize(size_ + 1); + --size_; + } + data_[size_] = elem; + ++size_; + } + + void push_back(T &&elem) { + if (data_ == nullptr) { + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } else if (size_ == capacity_) { + resize(size_ + 1); + --size_; + } + data_[size_] = elem; + ++size_; + } + + void pop_back() { + if (size_ > 0) { + --size_; + } else { + MS_C_EXCEPTION("Index is out of range!"); + } + } + + void insert(const T &elem, size_t index) { + if (index <= size_) { + ++size_; + if (size_ > capacity_) { + resize(size_); + } + if (index == size_ - 1) { + push_back(elem); + } else { + for (int i = static_cast(size_) - 1; i > static_cast(index); --i) { + data_[i + 1] = data_[i]; + } + data_[index] = elem; + } + } else { + MS_C_EXCEPTION("Input index is out of range!"); + } + } + + T *begin() { return data_; } + + const T *begin() const { return data_; } + + T *end() { return data_ + size_; } + + const T *end() const { return data_ + size_; } + + T &front() { + if (size_ > 0) { + return data_[0]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + const T &front() const { + if (size_ > 0) { + return data_[0]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + T &back() { + if (size_ > 0) { + return data_[size_ - 1]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + const T &back() const { + if (size_ > 0) { + return data_[size_ - 1]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + T &at(size_t index) { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + const T &at(size_t index) const { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + T &operator[](size_t index) { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + const T &operator[](size_t index) const { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + T *data() { return data_; } + + const T *data() const { return data_; } + + size_t size() const { return size_; } + + size_t capacity() const { return capacity_; } + + bool empty() const { return size_ == 0; } + + void erase(size_t index) { + if (index == size_ - 1) { + --size_; + } else if (index < size_) { + for (int i = index; i < static_cast(size_); ++i) { + data_[i] = data_[i + 1]; + } + --size_; + } else { + MS_C_EXCEPTION("Input index is out of range!"); + } + } + + void resize(size_t size) { + while (size > capacity_) { + capacity_ *= 2; + } + T *tmp = data_; + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + for (int i = 0; i < MIN(static_cast(size), static_cast(size_)); ++i) { + data_[i] = tmp[i]; + } + size_ = size; + delete[] tmp; + } + + void reserve(size_t capacity) { + if (capacity > capacity_) { + capacity_ = capacity; + } + } + + Vector &operator=(const Vector &vec) { + if (this == &vec) { + return *this; + } + size_ = vec.size_; + elem_size_ = sizeof(T); + capacity_ = vec.capacity_; + data_ = new (std::nothrow) T[capacity_]; + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + for (int i = 0; i < static_cast(size_); ++i) { + data_[i] = vec.data_[i]; + } + return *this; + } + + private: + size_t size_; + size_t elem_size_; + size_t capacity_; + T *data_; +}; +using TensorPtrVector = Vector; +using Uint32Vector = Vector; +class Allocator; +using AllocatorPtr = void *; +class Delegate; +using DelegatePtr = void *; +using DeviceContextVector = Vector; +using KernelCallBack = void (*)(void *, void *); +#else +/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. +/// +/// \note List public class and interface for reference. +class Allocator; +using AllocatorPtr = std::shared_ptr; + +class Delegate; +using DelegatePtr = std::shared_ptr; + +using TensorPtrVector = std::vector; +using Uint32Vector = std::vector; +template +using Vector = std::vector; + +template +inline std::string to_string(T t) { + return std::to_string(t); +} + +namespace tensor { +using String = std::string; +} // namespace tensor + +namespace session { +using String = std::string; +} // namespace session + +/// \brief CallBackParam defined input arguments for callBack function. +struct CallBackParam { + session::String node_name; /**< node name argument */ + session::String node_type; /**< node type argument */ +}; + +struct GPUCallBackParam : CallBackParam { + double execute_time{-1.f}; +}; + +/// \brief KernelCallBack defined the function pointer for callBack. +using KernelCallBack = std::function inputs, Vector outputs, + const CallBackParam &opInfo)>; + +namespace lite { +using String = std::string; +using DeviceContextVector = std::vector; + +/// \brief Set data of MSTensor from string vector. +/// +/// \param[in] input string vector. +/// \param[out] MSTensor. +/// +/// \return STATUS as an error code of this interface, STATUS is defined in errorcode.h. +int MS_API StringsToMSTensor(const Vector &inputs, tensor::MSTensor *tensor); + +/// \brief Get string vector from MSTensor. +/// \param[in] MSTensor. +/// \return string vector. +Vector MS_API MSTensorToStrings(const tensor::MSTensor *tensor); +} // namespace lite +#endif // NOT_USE_STL +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/format.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/format.h new file mode 100644 index 0000000..5f763da --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/format.h @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_MINDAPI_BASE_FORMAT_H_ +#define MINDSPORE_CORE_MINDAPI_BASE_FORMAT_H_ + +#include +#include +namespace mindspore { +enum Format : int64_t { + DEFAULT_FORMAT = -1, + NCHW = 0, + NHWC = 1, + NHWC4 = 2, + HWKC = 3, + HWCK = 4, + KCHW = 5, + CKHW = 6, + KHWC = 7, + CHWK = 8, + HW = 9, + HW4 = 10, + NC = 11, + NC4 = 12, + NC4HW4 = 13, + NUM_OF_FORMAT = 14, + NCDHW = 15, + NWC = 16, + NCW = 17, + NDHWC = 18, + NC8HW8 = 19 +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_MINDAPI_BASE_FORMAT_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/type_id.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/type_id.h new file mode 100644 index 0000000..f8b5827 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/type_id.h @@ -0,0 +1,105 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_MINDAPI_BASE_TYPE_ID_H_ +#define MINDSPORE_CORE_MINDAPI_BASE_TYPE_ID_H_ + +namespace mindspore { +/// \brief TypeId defines data type identifiers. +enum TypeId : int { + kTypeUnknown = 0, + // + // Meta types. + // + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeRowTensorType, + kObjectTypeCOOTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeComplex, + kNumberTypeComplex64, + kNumberTypeComplex128, + kNumberTypeInt4, + kNumberTypeGLUInt, + kNumberTypeEnd, + // + // Monad Types + // + kMonadTypeBegin = kNumberTypeEnd, + kObjectTypeMonad, + kObjectTypeUMonad, + kObjectTypeIOMonad, + kMonadTypeEnd, + // + // Sparse Types + // + // Sparse types is placed at the end of enum, + // in order to keep fit with the type of existing model on the lite side. + kSparseTypeBegin = kMonadTypeEnd, + kObjectTypeCSRTensorType, + kObjectTypeSparseTensorType, + kSparseTypeEnd +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_MINDAPI_BASE_TYPE_ID_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/types.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/types.h new file mode 100644 index 0000000..3548964 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/mindapi/base/types.h @@ -0,0 +1,124 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_MINDAPI_BASE_TYPES_H_ +#define MINDSPORE_CORE_MINDAPI_BASE_TYPES_H_ + +#include + +namespace mindspore { +enum CoordinateTransformMode : int64_t { + ASYMMETRIC = 0, + ALIGN_CORNERS = 1, + HALF_PIXEL = 2, + CROP_AND_RESIZE = 3, +}; + +enum class ResizeMethod : int64_t { + UNKNOWN = -1, + LINEAR = 0, + NEAREST = 1, + CUBIC = 2, +}; + +enum class NearestMode : int64_t { + NORMAL = 0, + ROUND_HALF_DOWN = 1, + ROUND_HALF_UP = 2, + FLOOR = 3, + CEIL = 4, +}; + +enum RoundMode : int64_t { + FLOOR = 0, + CEIL = 1, +}; + +enum ActivationType : int64_t { + NO_ACTIVATION = 0, + RELU = 1, + SIGMOID = 2, + RELU6 = 3, + ELU = 4, + LEAKY_RELU = 5, + ABS = 6, + RELU1 = 7, + SOFTSIGN = 8, + SOFTPLUS = 9, + TANH = 10, + SELU = 11, + HSWISH = 12, + HSIGMOID = 13, + THRESHOLDRELU = 14, + LINEAR = 15, + HARD_TANH = 16, + SIGN = 17, + SWISH = 18, + GELU = 19, + GLU = 20, + UNKNOWN = 21, +}; + +enum ReduceMode : int64_t { + Reduce_Mean = 0, + Reduce_Max = 1, + Reduce_Min = 2, + Reduce_Prod = 3, + Reduce_Sum = 4, + Reduce_Sum_Square = 5, + Reduce_ASum = 6, + Reduce_All = 7, + Reduce_L2 = 8 +}; + +enum EltwiseMode : int64_t { + PROD = 0, + SUM = 1, + MAXIMUM = 2, + ELTWISEMODE_UNKNOW = 3, +}; + +enum Reduction : int64_t { + REDUCTION_SUM = 0, + MEAN = 1, + NONE = 2, +}; + +enum PadMode : int64_t { + PAD = 0, + SAME = 1, + VALID = 2, +}; + +enum class LshProjectionType : int64_t { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2, +}; + +enum PaddingMode : int64_t { + CONSTANT = 0, + REFLECT = 1, + SYMMETRIC = 2, + MODE_RESERVED = 3, +}; + +enum PoolMode : int64_t { + MAX_POOLING = 0, + MEAN_POOLING = 1, +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_MINDAPI_BASE_TYPES_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/model.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/model.h new file mode 100644 index 0000000..fe58efb --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/model.h @@ -0,0 +1,82 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_INCLUDE_MODEL_H_ +#define MINDSPORE_LITE_INCLUDE_MODEL_H_ + +#include "include/lite_utils.h" + +namespace mindspore::lite { +struct MS_API Model { + struct Node { + String name_; + int node_type_; + const void *primitive_ = nullptr; + Uint32Vector input_indices_; + Uint32Vector output_indices_; + int quant_type_; + int device_type_ = -1; + }; + using NodePtrVector = Vector; + struct SubGraph { + String name_; + Uint32Vector input_indices_; + Uint32Vector output_indices_; + Uint32Vector node_indices_; + Uint32Vector tensor_indices_; + }; + using SubGraphPtrVector = Vector; + String name_; + String version_; + Uint32Vector input_indices_; + Uint32Vector output_indices_; + TensorPtrVector all_tensors_; + NodePtrVector all_nodes_; + char *buf = nullptr; + SubGraphPtrVector sub_graphs_; +#ifdef ENABLE_MODEL_OBF + using NodeStatVector = Vector; + using PrimTypeVector = Vector; + using PrimVector = Vector; + PrimTypeVector all_prims_type_; + NodeStatVector all_nodes_stat_; + bool model_obfuscated_ = false; + PrimVector deobf_prims_; +#endif + + /// \brief Static method to create a Model pointer. + static Model *Import(const char *model_buf, size_t size); + + /// \brief Static method to create a Model pointer. + static Model *Import(const char *filename); + + /// \brief method to export model to file. + static int Export(Model *model, const char *filename); + + /// \brief method to export model to buffer. + static int Export(Model *model, char *buf, size_t *size); + + /// \brief Free meta graph temporary buffer + virtual void Free() = 0; + + /// \brief Free all temporary buffer.EG: nodes in the model. + virtual void Destroy() = 0; + + /// \brief Model destruct, free all memory + virtual ~Model() = default; +}; +} // namespace mindspore::lite + +#endif // MINDSPORE_LITE_INCLUDE_MODEL_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ms_tensor.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ms_tensor.h new file mode 100644 index 0000000..3035422 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/ms_tensor.h @@ -0,0 +1,134 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ +#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ + +#include +#include "include/lite_utils.h" +#include "ir/dtype/type_id.h" + +namespace mindspore { +enum Format : int64_t; + +namespace tensor { +/// \brief MSTensor defined tensor in MindSpore Lite. +class MS_API MSTensor { + public: + /// \brief Constructor of MindSpore Lite MSTensor. + /// + /// \return Instance of MindSpore Lite MSTensor. + MSTensor() = default; + + /// \brief Destructor of MindSpore Lite Model. + virtual ~MSTensor() = default; + + /// \brief Create a MSTensor. + /// + /// \return Pointer to an instance of MindSpore Lite MSTensor. + static MSTensor *CreateTensor(const String &name, TypeId type, const Vector &shape, const void *data, + size_t data_len); + + /// \brief Set memory allocator for current MSTensor. + /// + /// \param[in] allocator Define memory allocator, which is shown in allocator.h. + virtual void set_allocator(AllocatorPtr allocator) = 0; + + /// \brief Get memory allocator of current MSTensor. + /// + /// \return Pointer of memory allocator class. + virtual AllocatorPtr allocator() const = 0; + + /// \brief Get data type of the MindSpore Lite MSTensor. + /// + /// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are + /// suitable for MSTensor. + /// + /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. + virtual TypeId data_type() const = 0; + + /// \brief Set data type of current MSTensor. + /// + /// \param[in] data_type Define data type, which is shown in type_id.h. + virtual void set_data_type(TypeId data_type) = 0; + + /// \brief Set format of current MSTensor. + /// + /// \param[in] format Define format of data, which is shown in format.h + virtual void set_format(mindspore::Format format) = 0; + + /// \brief Get format of current MSTensor. + /// + /// \return format, which is shown in format.h + virtual mindspore::Format format() const = 0; + + /// \brief Get shape of the MindSpore Lite MSTensor. + /// + /// \return A vector of int as the shape of the MindSpore Lite MSTensor. + virtual Vector shape() const = 0; + + /// \brief Set the shape of MSTensor. + virtual void set_shape(const Vector &shape) = 0; + + /// \brief Get number of element in MSTensor. + /// + /// \return Number of element in MSTensor. + virtual int ElementsNum() const = 0; + + /// \brief Get byte size of data in MSTensor. + /// + /// \return Byte size of data in MSTensor. + virtual size_t Size() const = 0; + + /// \brief Get the name of MSTensor. + /// + /// \return the name of MSTensor. + virtual String tensor_name() const = 0; + + /// \brief Set the name of MSTensor. + virtual void set_tensor_name(const String &name) = 0; + + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. The memory buffer will be + /// automatically allocated. + /// + /// \return the pointer points to data in MSTensor. + virtual void *MutableData() = 0; + + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. No memory buffer will be + /// allocated. + /// + /// \return the pointer points to data in MSTensor. + virtual void *data() = 0; + + /// \brief Set the data of MSTensor. + virtual void set_data(void *data) = 0; + + virtual Vector quant_params() const = 0; + + virtual void set_quant_params(Vector) = 0; + + /// \brief Get whether the MSTensor data is const data + /// + /// \return Const flag of MSTensor + virtual bool IsConst() const = 0; +}; +} // namespace tensor +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel.h new file mode 100644 index 0000000..a458f9e --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel.h @@ -0,0 +1,148 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_REGISTRY_REGISTER_KERNEL_H_ +#define MINDSPORE_LITE_INCLUDE_REGISTRY_REGISTER_KERNEL_H_ + +#include +#include +#include +#include +#include "schema/model_generated.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/kernel.h" +#include "include/api/data_type.h" +#include "include/api/status.h" + +namespace mindspore { +namespace registry { +/// \brief KernelDesc defined kernel's basic attribute. +struct KernelDesc { + DataType data_type; /**< kernel data type argument */ + int type; /**< op type argument */ + std::string arch; /**< deviceType argument */ + std::string provider; /**< user identification argument */ +}; + +/// \brief CreateKernel Defined a functor to create a kernel. +/// +/// \param[in] inputs Define input tensors of kernel. +/// \param[in] outputs Define output tensors of kernel. +/// \param[in] primitive Define attributes of op. +/// \param[in] ctx Define for holding environment variables during runtime. +/// +/// \return Smart Pointer of kernel. +using CreateKernel = std::function( + const std::vector &inputs, const std::vector &outputs, const schema::Primitive *primitive, + const mindspore::Context *ctx)>; + +/// \brief RegisterKernel Defined registration of kernel. +class MS_API RegisterKernel { + public: + /// \brief Static method to register kernel which is correspondng to an ordinary op. + /// + /// \param[in] arch Define deviceType, such as CPU. + /// \param[in] provider Define the identification of user. + /// \param[in] data_type Define kernel's input data type. + /// \param[in] type Define the ordinary op type. + /// \param[in] creator Define a function pointer to create a kernel. + /// + /// \return Status as a status identification of registering. + static Status RegKernel(const std::string &arch, const std::string &provider, DataType data_type, int type, + const CreateKernel creator); + + /// \brief Static method to register kernel which is corresponding to custom op. + /// + /// \param[in] arch Define deviceType, such as CPU. + /// \param[in] provider Define the identification of user. + /// \param[in] data_type Define kernel's input data type. + /// \param[in] type Define the concrete type of a custom op. + /// \param[in] creator Define a function pointer to create a kernel. + /// + /// \return Status as a status identification of registering. + static Status RegCustomKernel(const std::string &arch, const std::string &provider, DataType data_type, + const std::string &type, const CreateKernel creator); + + /// \brief Static methon to get a kernel's create function. + /// + /// \param[in] desc Define kernel's basic attribute. + /// \param[in] primitive Define the primitive of kernel generated by flatbuffers. + /// + /// \return Function pointer to create a kernel. + static CreateKernel GetCreator(const schema::Primitive *primitive, KernelDesc *desc); +}; + +/// \brief KernelReg Defined registration class of kernel. +class MS_API KernelReg { + public: + /// \brief Destructor of KernelReg. + ~KernelReg() = default; + + /// \brief Method to register ordinary op. + /// + /// \param[in] arch Define deviceType, such as CPU. + /// \param[in] provider Define the identification of user. + /// \param[in] data_type Define kernel's input data type. + /// \param[in] op_type Define the ordinary op type. + /// \param[in] creator Define a function pointer to create a kernel. + KernelReg(const std::string &arch, const std::string &provider, DataType data_type, int op_type, + const CreateKernel creator) { + RegisterKernel::RegKernel(arch, provider, data_type, op_type, creator); + } + + /// \brief Method to register customized op. + /// + /// \param[in] arch Define deviceType, such as CPU. + /// \param[in] provider Define the identification of user. + /// \param[in] data_type Define kernel's input data type. + /// \param[in] op_type Define the concrete type of a custom op. + /// \param[in] creator Define a function pointer to create a kernel. + KernelReg(const std::string &arch, const std::string &provider, DataType data_type, const std::string &op_type, + const CreateKernel creator) { + RegisterKernel::RegCustomKernel(arch, provider, data_type, op_type, creator); + } +}; + +/// \brief Defined registering macro to register ordinary op kernel, which called by user directly. +/// +/// \param[in] arch Define deviceType, such as CPU. +/// \param[in] provider Define the identification of user. +/// \param[in] data_type Define kernel's input data type. +/// \param[in] op_type Define the ordinary op type. +/// \param[in] creator Define a function pointer to create a kernel. +#define REGISTER_KERNEL(arch, provider, data_type, op_type, creator) \ + namespace { \ + static mindspore::registry::KernelReg g_##arch##provider##data_type##op_type##kernelReg(#arch, #provider, data_type, \ + op_type, creator); \ + } // namespace + +/// \brief Defined registering macro to register custom op kernel, which called by user directly. +/// +/// \param[in] arch Define deviceType, such as CPU. +/// \param[in] provider Define the identification of user. +/// \param[in] data_type Define kernel's input data type. +/// \param[in] op_type Define the concrete type of a custom op. +/// \param[in] creator Define a function pointer to create a kernel. +#define REGISTER_CUSTOM_KERNEL(arch, provider, data_type, op_type, creator) \ + namespace { \ + static mindspore::registry::KernelReg g_##arch##provider##data_type##op_type##kernelReg(#arch, #provider, data_type, \ + #op_type, creator); \ + } // namespace +} // namespace registry +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_REGISTRY_REGISTER_KERNEL_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel_interface.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel_interface.h new file mode 100644 index 0000000..0254229 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/registry/register_kernel_interface.h @@ -0,0 +1,109 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_REGISTRY_KERNEL_INTERFACE_H_ +#define MINDSPORE_LITE_INCLUDE_REGISTRY_KERNEL_INTERFACE_H_ + +#include +#include +#include +#include +#include "include/kernel_interface.h" +#include "schema/model_generated.h" + +namespace mindspore { +namespace registry { +/// \brief KernelInterfaceCreator defined a functor to create KernelInterface. +using KernelInterfaceCreator = std::function()>; + +/// \brief RegisterKernelInterface defined registration and acquisition of KernelInterface. +class MS_API RegisterKernelInterface { + public: + /// \brief Static method to register op whose primitive type is custom. + /// + /// \param[in] provider Define the identification of user. + /// \param[in] op_type Define the concrete type of a custom op. + /// \param[in] creator Define the KernelInterface create function. + /// + /// \return Status as a status identification of registering. + static Status CustomReg(const std::string &provider, const std::string &op_type, + const KernelInterfaceCreator creator); + + /// \brief Static method to register op whose primitive type is ordinary. + /// + /// \param[in] provider Define the identification of user. + /// \param[in] op_type Define the ordinary op type. + /// \param[in] creator Define the KernelInterface create function. + /// + /// \return Status as a status identification of registering. + static Status Reg(const std::string &provider, int op_type, const KernelInterfaceCreator creator); + + /// \brief Static method to get registration of a certain op. + /// + /// \param[in] provider Define the identification of user. + /// \param[in] primitive Define the attributes of a certain op. + /// + /// \return Boolean value to represent registration of a certain op is existing or not. + static std::shared_ptr GetKernelInterface(const std::string &provider, + const schema::Primitive *primitive); +}; + +/// \brief KernelInterfaceReg defined registration class of KernelInterface. +class MS_API KernelInterfaceReg { + public: + /// \brief Constructor of KernelInterfaceReg to register an ordinary op. + /// + /// \param[in] provider Define the identification of user. + /// \param[in] op_type Define the ordinary op type. + /// \param[in] creator Define the KernelInterface create function. + KernelInterfaceReg(const std::string &provider, int op_type, const KernelInterfaceCreator creator) { + RegisterKernelInterface::Reg(provider, op_type, creator); + } + + /// \brief Constructor of KernelInterfaceReg to register custom op. + /// + /// \param[in] provider Define the identification of user. + /// \param[in] op_type Define the concrete type of a custom op. + /// \param[in] creator Define the KernelInterface create function. + KernelInterfaceReg(const std::string &provider, const std::string &op_type, const KernelInterfaceCreator creator) { + RegisterKernelInterface::CustomReg(provider, op_type, creator); + } +}; + +/// \brief Defined registering macro to register ordinary op, which called by user directly. +/// +/// \param[in] provider Define the identification of user. +/// \param[in] op_type Define the ordinary op type. +/// \param[in] creator Define the KernelInterface create function. +#define REGISTER_KERNEL_INTERFACE(provider, op_type, creator) \ + namespace { \ + static mindspore::registry::KernelInterfaceReg g_##provider##op_type##_inter_reg(#provider, op_type, creator); \ + } // namespace + +/// \brief Defined registering macro to register custom op, which called by user directly. +/// +/// \param[in] provider Define the identification of user. +/// \param[in] op_type Define the concrete type of a custom op. +/// \param[in] creator Define the KernelInterface create function. +#define REGISTER_CUSTOM_KERNEL_INTERFACE(provider, op_type, creator) \ + namespace { \ + static mindspore::registry::KernelInterfaceReg g_##provider##op_type##_custom_inter_reg(#provider, #op_type, \ + creator); \ + } // namespace +} // namespace registry +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_REGISTRY_KERNEL_INTERFACE_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/model_generated.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/model_generated.h new file mode 100644 index 0000000..e08e707 --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/model_generated.h @@ -0,0 +1,2364 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_MODEL_MINDSPORE_SCHEMA_H_ +#define FLATBUFFERS_GENERATED_MODEL_MINDSPORE_SCHEMA_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "ops_types_generated.h" +#include "ops_generated.h" + +namespace mindspore { +namespace schema { + +struct QuantParam; +struct QuantParamBuilder; + +struct Tensor; +struct TensorBuilder; + +struct Primitive; +struct PrimitiveBuilder; + +struct CNode; +struct CNodeBuilder; + +struct SubGraph; +struct SubGraphBuilder; + +struct MetaGraph; +struct MetaGraphBuilder; + +enum WeightQunatCompressType : int32_t { + WeightQunatCompressType_NONE = 0, + WeightQunatCompressType_INDEXING = 1, + WeightQunatCompressType_SPARSE = 2, + WeightQunatCompressType_FSE = 3, + WeightQunatCompressType_MIN = WeightQunatCompressType_NONE, + WeightQunatCompressType_MAX = WeightQunatCompressType_FSE +}; + +inline const WeightQunatCompressType (&EnumValuesWeightQunatCompressType())[4] { + static const WeightQunatCompressType values[] = { + WeightQunatCompressType_NONE, + WeightQunatCompressType_INDEXING, + WeightQunatCompressType_SPARSE, + WeightQunatCompressType_FSE + }; + return values; +} + +inline const char * const *EnumNamesWeightQunatCompressType() { + static const char * const names[5] = { + "NONE", + "INDEXING", + "SPARSE", + "FSE", + nullptr + }; + return names; +} + +inline const char *EnumNameWeightQunatCompressType(WeightQunatCompressType e) { + if (flatbuffers::IsOutRange(e, WeightQunatCompressType_NONE, WeightQunatCompressType_FSE)) return ""; + const size_t index = static_cast(e); + return EnumNamesWeightQunatCompressType()[index]; +} + +enum QuantType : int32_t { + QuantType_QUANT_NONE = 0, + QuantType_AwareTraining = 1, + QuantType_WeightQuant = 2, + QuantType_PostTraining = 3, + QuantType_QUANT_WEIGHT = 4, + QuantType_QUANT_ALL = 5, + QuantType_MIN = QuantType_QUANT_NONE, + QuantType_MAX = QuantType_QUANT_ALL +}; + +inline const QuantType (&EnumValuesQuantType())[6] { + static const QuantType values[] = { + QuantType_QUANT_NONE, + QuantType_AwareTraining, + QuantType_WeightQuant, + QuantType_PostTraining, + QuantType_QUANT_WEIGHT, + QuantType_QUANT_ALL + }; + return values; +} + +inline const char * const *EnumNamesQuantType() { + static const char * const names[7] = { + "QUANT_NONE", + "AwareTraining", + "WeightQuant", + "PostTraining", + "QUANT_WEIGHT", + "QUANT_ALL", + nullptr + }; + return names; +} + +inline const char *EnumNameQuantType(QuantType e) { + if (flatbuffers::IsOutRange(e, QuantType_QUANT_NONE, QuantType_QUANT_ALL)) return ""; + const size_t index = static_cast(e); + return EnumNamesQuantType()[index]; +} + +struct QuantParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantParamBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SCALE = 4, + VT_ZEROPOINT = 6, + VT_MIN = 8, + VT_MAX = 10, + VT_NARROWRANGE = 12, + VT_NUMBITS = 14, + VT_INITED = 16, + VT_VARCORR = 18, + VT_MEANCORR = 20, + VT_DSTDTYPE = 22, + VT_ROUNDTYPE = 24, + VT_MULTIPLIER = 26 + }; + double scale() const { + return GetField(VT_SCALE, 0.0); + } + int32_t zeroPoint() const { + return GetField(VT_ZEROPOINT, 0); + } + double min() const { + return GetField(VT_MIN, 0.0); + } + double max() const { + return GetField(VT_MAX, 0.0); + } + bool narrowRange() const { + return GetField(VT_NARROWRANGE, 1) != 0; + } + int32_t numBits() const { + return GetField(VT_NUMBITS, 8); + } + bool inited() const { + return GetField(VT_INITED, 0) != 0; + } + float varCorr() const { + return GetField(VT_VARCORR, 1.0f); + } + float meanCorr() const { + return GetField(VT_MEANCORR, 0.0f); + } + int32_t dstDtype() const { + return GetField(VT_DSTDTYPE, 32); + } + int32_t roundType() const { + return GetField(VT_ROUNDTYPE, 1); + } + int32_t multiplier() const { + return GetField(VT_MULTIPLIER, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_ZEROPOINT) && + VerifyField(verifier, VT_MIN) && + VerifyField(verifier, VT_MAX) && + VerifyField(verifier, VT_NARROWRANGE) && + VerifyField(verifier, VT_NUMBITS) && + VerifyField(verifier, VT_INITED) && + VerifyField(verifier, VT_VARCORR) && + VerifyField(verifier, VT_MEANCORR) && + VerifyField(verifier, VT_DSTDTYPE) && + VerifyField(verifier, VT_ROUNDTYPE) && + VerifyField(verifier, VT_MULTIPLIER) && + verifier.EndTable(); + } +}; + +struct QuantParamBuilder { + typedef QuantParam Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_scale(double scale) { + fbb_.AddElement(QuantParam::VT_SCALE, scale, 0.0); + } + void add_zeroPoint(int32_t zeroPoint) { + fbb_.AddElement(QuantParam::VT_ZEROPOINT, zeroPoint, 0); + } + void add_min(double min) { + fbb_.AddElement(QuantParam::VT_MIN, min, 0.0); + } + void add_max(double max) { + fbb_.AddElement(QuantParam::VT_MAX, max, 0.0); + } + void add_narrowRange(bool narrowRange) { + fbb_.AddElement(QuantParam::VT_NARROWRANGE, static_cast(narrowRange), 1); + } + void add_numBits(int32_t numBits) { + fbb_.AddElement(QuantParam::VT_NUMBITS, numBits, 8); + } + void add_inited(bool inited) { + fbb_.AddElement(QuantParam::VT_INITED, static_cast(inited), 0); + } + void add_varCorr(float varCorr) { + fbb_.AddElement(QuantParam::VT_VARCORR, varCorr, 1.0f); + } + void add_meanCorr(float meanCorr) { + fbb_.AddElement(QuantParam::VT_MEANCORR, meanCorr, 0.0f); + } + void add_dstDtype(int32_t dstDtype) { + fbb_.AddElement(QuantParam::VT_DSTDTYPE, dstDtype, 32); + } + void add_roundType(int32_t roundType) { + fbb_.AddElement(QuantParam::VT_ROUNDTYPE, roundType, 1); + } + void add_multiplier(int32_t multiplier) { + fbb_.AddElement(QuantParam::VT_MULTIPLIER, multiplier, 1); + } + explicit QuantParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantParam( + flatbuffers::FlatBufferBuilder &_fbb, + double scale = 0.0, + int32_t zeroPoint = 0, + double min = 0.0, + double max = 0.0, + bool narrowRange = true, + int32_t numBits = 8, + bool inited = false, + float varCorr = 1.0f, + float meanCorr = 0.0f, + int32_t dstDtype = 32, + int32_t roundType = 1, + int32_t multiplier = 1) { + QuantParamBuilder builder_(_fbb); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_scale(scale); + builder_.add_multiplier(multiplier); + builder_.add_roundType(roundType); + builder_.add_dstDtype(dstDtype); + builder_.add_meanCorr(meanCorr); + builder_.add_varCorr(varCorr); + builder_.add_numBits(numBits); + builder_.add_zeroPoint(zeroPoint); + builder_.add_inited(inited); + builder_.add_narrowRange(narrowRange); + return builder_.Finish(); +} + +struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NODETYPE = 4, + VT_DATATYPE = 6, + VT_DIMS = 8, + VT_FORMAT = 10, + VT_REFCOUNT = 12, + VT_OFFSET = 14, + VT_DATA = 16, + VT_QUANTPARAMS = 18, + VT_QUANTCLUSTERS = 20, + VT_NAME = 22, + VT_ENABLEHUFFMANCODE = 24, + VT_WEIGHTQUNATCOMPRESSTYPE = 26 + }; + int32_t nodeType() const { + return GetField(VT_NODETYPE, 0); + } + int32_t dataType() const { + return GetField(VT_DATATYPE, 0); + } + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t refCount() const { + return GetField(VT_REFCOUNT, 0); + } + int32_t offset() const { + return GetField(VT_OFFSET, 0); + } + const flatbuffers::Vector *data() const { + return GetPointer *>(VT_DATA); + } + const flatbuffers::Vector> *quantParams() const { + return GetPointer> *>(VT_QUANTPARAMS); + } + const flatbuffers::Vector *quantClusters() const { + return GetPointer *>(VT_QUANTCLUSTERS); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool enableHuffmanCode() const { + return GetField(VT_ENABLEHUFFMANCODE, 0) != 0; + } + mindspore::schema::WeightQunatCompressType weightQunatCompressType() const { + return static_cast(GetField(VT_WEIGHTQUNATCOMPRESSTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NODETYPE) && + VerifyField(verifier, VT_DATATYPE) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_REFCOUNT) && + VerifyField(verifier, VT_OFFSET) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && + VerifyOffset(verifier, VT_QUANTPARAMS) && + verifier.VerifyVector(quantParams()) && + verifier.VerifyVectorOfTables(quantParams()) && + VerifyOffset(verifier, VT_QUANTCLUSTERS) && + verifier.VerifyVector(quantClusters()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_ENABLEHUFFMANCODE) && + VerifyField(verifier, VT_WEIGHTQUNATCOMPRESSTYPE) && + verifier.EndTable(); + } +}; + +struct TensorBuilder { + typedef Tensor Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_nodeType(int32_t nodeType) { + fbb_.AddElement(Tensor::VT_NODETYPE, nodeType, 0); + } + void add_dataType(int32_t dataType) { + fbb_.AddElement(Tensor::VT_DATATYPE, dataType, 0); + } + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(Tensor::VT_DIMS, dims); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(Tensor::VT_FORMAT, static_cast(format), 0); + } + void add_refCount(int32_t refCount) { + fbb_.AddElement(Tensor::VT_REFCOUNT, refCount, 0); + } + void add_offset(int32_t offset) { + fbb_.AddElement(Tensor::VT_OFFSET, offset, 0); + } + void add_data(flatbuffers::Offset> data) { + fbb_.AddOffset(Tensor::VT_DATA, data); + } + void add_quantParams(flatbuffers::Offset>> quantParams) { + fbb_.AddOffset(Tensor::VT_QUANTPARAMS, quantParams); + } + void add_quantClusters(flatbuffers::Offset> quantClusters) { + fbb_.AddOffset(Tensor::VT_QUANTCLUSTERS, quantClusters); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Tensor::VT_NAME, name); + } + void add_enableHuffmanCode(bool enableHuffmanCode) { + fbb_.AddElement(Tensor::VT_ENABLEHUFFMANCODE, static_cast(enableHuffmanCode), 0); + } + void add_weightQunatCompressType(mindspore::schema::WeightQunatCompressType weightQunatCompressType) { + fbb_.AddElement(Tensor::VT_WEIGHTQUNATCOMPRESSTYPE, static_cast(weightQunatCompressType), 0); + } + explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensor( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t nodeType = 0, + int32_t dataType = 0, + flatbuffers::Offset> dims = 0, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + int32_t refCount = 0, + int32_t offset = 0, + flatbuffers::Offset> data = 0, + flatbuffers::Offset>> quantParams = 0, + flatbuffers::Offset> quantClusters = 0, + flatbuffers::Offset name = 0, + bool enableHuffmanCode = false, + mindspore::schema::WeightQunatCompressType weightQunatCompressType = mindspore::schema::WeightQunatCompressType_NONE) { + TensorBuilder builder_(_fbb); + builder_.add_weightQunatCompressType(weightQunatCompressType); + builder_.add_name(name); + builder_.add_quantClusters(quantClusters); + builder_.add_quantParams(quantParams); + builder_.add_data(data); + builder_.add_offset(offset); + builder_.add_refCount(refCount); + builder_.add_format(format); + builder_.add_dims(dims); + builder_.add_dataType(dataType); + builder_.add_nodeType(nodeType); + builder_.add_enableHuffmanCode(enableHuffmanCode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTensorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t nodeType = 0, + int32_t dataType = 0, + const std::vector *dims = nullptr, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + int32_t refCount = 0, + int32_t offset = 0, + const std::vector *data = nullptr, + const std::vector> *quantParams = nullptr, + const std::vector *quantClusters = nullptr, + const char *name = nullptr, + bool enableHuffmanCode = false, + mindspore::schema::WeightQunatCompressType weightQunatCompressType = mindspore::schema::WeightQunatCompressType_NONE) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + auto data__ = data ? _fbb.CreateVector(*data) : 0; + auto quantParams__ = quantParams ? _fbb.CreateVector>(*quantParams) : 0; + auto quantClusters__ = quantClusters ? _fbb.CreateVector(*quantClusters) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + return mindspore::schema::CreateTensor( + _fbb, + nodeType, + dataType, + dims__, + format, + refCount, + offset, + data__, + quantParams__, + quantClusters__, + name__, + enableHuffmanCode, + weightQunatCompressType); +} + +struct Primitive FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PrimitiveBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUE_TYPE = 4, + VT_VALUE = 6 + }; + mindspore::schema::PrimitiveType value_type() const { + return static_cast(GetField(VT_VALUE_TYPE, 0)); + } + const void *value() const { + return GetPointer(VT_VALUE); + } + template const T *value_as() const; + const mindspore::schema::Abs *value_as_Abs() const { + return value_type() == mindspore::schema::PrimitiveType_Abs ? static_cast(value()) : nullptr; + } + const mindspore::schema::Activation *value_as_Activation() const { + return value_type() == mindspore::schema::PrimitiveType_Activation ? static_cast(value()) : nullptr; + } + const mindspore::schema::ActivationGrad *value_as_ActivationGrad() const { + return value_type() == mindspore::schema::PrimitiveType_ActivationGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Adam *value_as_Adam() const { + return value_type() == mindspore::schema::PrimitiveType_Adam ? static_cast(value()) : nullptr; + } + const mindspore::schema::AddFusion *value_as_AddFusion() const { + return value_type() == mindspore::schema::PrimitiveType_AddFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::AdderFusion *value_as_AdderFusion() const { + return value_type() == mindspore::schema::PrimitiveType_AdderFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::AddGrad *value_as_AddGrad() const { + return value_type() == mindspore::schema::PrimitiveType_AddGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::AddN *value_as_AddN() const { + return value_type() == mindspore::schema::PrimitiveType_AddN ? static_cast(value()) : nullptr; + } + const mindspore::schema::All *value_as_All() const { + return value_type() == mindspore::schema::PrimitiveType_All ? static_cast(value()) : nullptr; + } + const mindspore::schema::ApplyMomentum *value_as_ApplyMomentum() const { + return value_type() == mindspore::schema::PrimitiveType_ApplyMomentum ? static_cast(value()) : nullptr; + } + const mindspore::schema::ArgMaxFusion *value_as_ArgMaxFusion() const { + return value_type() == mindspore::schema::PrimitiveType_ArgMaxFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::ArgMinFusion *value_as_ArgMinFusion() const { + return value_type() == mindspore::schema::PrimitiveType_ArgMinFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Assert *value_as_Assert() const { + return value_type() == mindspore::schema::PrimitiveType_Assert ? static_cast(value()) : nullptr; + } + const mindspore::schema::Assign *value_as_Assign() const { + return value_type() == mindspore::schema::PrimitiveType_Assign ? static_cast(value()) : nullptr; + } + const mindspore::schema::AssignAdd *value_as_AssignAdd() const { + return value_type() == mindspore::schema::PrimitiveType_AssignAdd ? static_cast(value()) : nullptr; + } + const mindspore::schema::AudioSpectrogram *value_as_AudioSpectrogram() const { + return value_type() == mindspore::schema::PrimitiveType_AudioSpectrogram ? static_cast(value()) : nullptr; + } + const mindspore::schema::AvgPoolFusion *value_as_AvgPoolFusion() const { + return value_type() == mindspore::schema::PrimitiveType_AvgPoolFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::AvgPoolGrad *value_as_AvgPoolGrad() const { + return value_type() == mindspore::schema::PrimitiveType_AvgPoolGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::BatchNorm *value_as_BatchNorm() const { + return value_type() == mindspore::schema::PrimitiveType_BatchNorm ? static_cast(value()) : nullptr; + } + const mindspore::schema::BatchNormGrad *value_as_BatchNormGrad() const { + return value_type() == mindspore::schema::PrimitiveType_BatchNormGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::BatchToSpace *value_as_BatchToSpace() const { + return value_type() == mindspore::schema::PrimitiveType_BatchToSpace ? static_cast(value()) : nullptr; + } + const mindspore::schema::BatchToSpaceND *value_as_BatchToSpaceND() const { + return value_type() == mindspore::schema::PrimitiveType_BatchToSpaceND ? static_cast(value()) : nullptr; + } + const mindspore::schema::BiasAdd *value_as_BiasAdd() const { + return value_type() == mindspore::schema::PrimitiveType_BiasAdd ? static_cast(value()) : nullptr; + } + const mindspore::schema::BinaryCrossEntropy *value_as_BinaryCrossEntropy() const { + return value_type() == mindspore::schema::PrimitiveType_BinaryCrossEntropy ? static_cast(value()) : nullptr; + } + const mindspore::schema::BinaryCrossEntropyGrad *value_as_BinaryCrossEntropyGrad() const { + return value_type() == mindspore::schema::PrimitiveType_BinaryCrossEntropyGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::BiasAddGrad *value_as_BiasAddGrad() const { + return value_type() == mindspore::schema::PrimitiveType_BiasAddGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::BroadcastTo *value_as_BroadcastTo() const { + return value_type() == mindspore::schema::PrimitiveType_BroadcastTo ? static_cast(value()) : nullptr; + } + const mindspore::schema::Cast *value_as_Cast() const { + return value_type() == mindspore::schema::PrimitiveType_Cast ? static_cast(value()) : nullptr; + } + const mindspore::schema::Ceil *value_as_Ceil() const { + return value_type() == mindspore::schema::PrimitiveType_Ceil ? static_cast(value()) : nullptr; + } + const mindspore::schema::Clip *value_as_Clip() const { + return value_type() == mindspore::schema::PrimitiveType_Clip ? static_cast(value()) : nullptr; + } + const mindspore::schema::Concat *value_as_Concat() const { + return value_type() == mindspore::schema::PrimitiveType_Concat ? static_cast(value()) : nullptr; + } + const mindspore::schema::Attention *value_as_Attention() const { + return value_type() == mindspore::schema::PrimitiveType_Attention ? static_cast(value()) : nullptr; + } + const mindspore::schema::Conv2DBackpropFilterFusion *value_as_Conv2DBackpropFilterFusion() const { + return value_type() == mindspore::schema::PrimitiveType_Conv2DBackpropFilterFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Conv2DBackpropInputFusion *value_as_Conv2DBackpropInputFusion() const { + return value_type() == mindspore::schema::PrimitiveType_Conv2DBackpropInputFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Conv2DFusion *value_as_Conv2DFusion() const { + return value_type() == mindspore::schema::PrimitiveType_Conv2DFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Conv2dTransposeFusion *value_as_Conv2dTransposeFusion() const { + return value_type() == mindspore::schema::PrimitiveType_Conv2dTransposeFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Cos *value_as_Cos() const { + return value_type() == mindspore::schema::PrimitiveType_Cos ? static_cast(value()) : nullptr; + } + const mindspore::schema::ConstantOfShape *value_as_ConstantOfShape() const { + return value_type() == mindspore::schema::PrimitiveType_ConstantOfShape ? static_cast(value()) : nullptr; + } + const mindspore::schema::Crop *value_as_Crop() const { + return value_type() == mindspore::schema::PrimitiveType_Crop ? static_cast(value()) : nullptr; + } + const mindspore::schema::CustomExtractFeatures *value_as_CustomExtractFeatures() const { + return value_type() == mindspore::schema::PrimitiveType_CustomExtractFeatures ? static_cast(value()) : nullptr; + } + const mindspore::schema::CustomNormalize *value_as_CustomNormalize() const { + return value_type() == mindspore::schema::PrimitiveType_CustomNormalize ? static_cast(value()) : nullptr; + } + const mindspore::schema::CustomPredict *value_as_CustomPredict() const { + return value_type() == mindspore::schema::PrimitiveType_CustomPredict ? static_cast(value()) : nullptr; + } + const mindspore::schema::DeConv2DGradFilter *value_as_DeConv2DGradFilter() const { + return value_type() == mindspore::schema::PrimitiveType_DeConv2DGradFilter ? static_cast(value()) : nullptr; + } + const mindspore::schema::Depend *value_as_Depend() const { + return value_type() == mindspore::schema::PrimitiveType_Depend ? static_cast(value()) : nullptr; + } + const mindspore::schema::DepthToSpace *value_as_DepthToSpace() const { + return value_type() == mindspore::schema::PrimitiveType_DepthToSpace ? static_cast(value()) : nullptr; + } + const mindspore::schema::DetectionPostProcess *value_as_DetectionPostProcess() const { + return value_type() == mindspore::schema::PrimitiveType_DetectionPostProcess ? static_cast(value()) : nullptr; + } + const mindspore::schema::DivFusion *value_as_DivFusion() const { + return value_type() == mindspore::schema::PrimitiveType_DivFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::DivGrad *value_as_DivGrad() const { + return value_type() == mindspore::schema::PrimitiveType_DivGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Dropout *value_as_Dropout() const { + return value_type() == mindspore::schema::PrimitiveType_Dropout ? static_cast(value()) : nullptr; + } + const mindspore::schema::DropoutGrad *value_as_DropoutGrad() const { + return value_type() == mindspore::schema::PrimitiveType_DropoutGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Elu *value_as_Elu() const { + return value_type() == mindspore::schema::PrimitiveType_Elu ? static_cast(value()) : nullptr; + } + const mindspore::schema::Eltwise *value_as_Eltwise() const { + return value_type() == mindspore::schema::PrimitiveType_Eltwise ? static_cast(value()) : nullptr; + } + const mindspore::schema::Equal *value_as_Equal() const { + return value_type() == mindspore::schema::PrimitiveType_Equal ? static_cast(value()) : nullptr; + } + const mindspore::schema::EmbeddingLookupFusion *value_as_EmbeddingLookupFusion() const { + return value_type() == mindspore::schema::PrimitiveType_EmbeddingLookupFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::ExpFusion *value_as_ExpFusion() const { + return value_type() == mindspore::schema::PrimitiveType_ExpFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::ExpandDims *value_as_ExpandDims() const { + return value_type() == mindspore::schema::PrimitiveType_ExpandDims ? static_cast(value()) : nullptr; + } + const mindspore::schema::FakeQuantWithMinMaxVars *value_as_FakeQuantWithMinMaxVars() const { + return value_type() == mindspore::schema::PrimitiveType_FakeQuantWithMinMaxVars ? static_cast(value()) : nullptr; + } + const mindspore::schema::FakeQuantWithMinMaxVarsPerChannel *value_as_FakeQuantWithMinMaxVarsPerChannel() const { + return value_type() == mindspore::schema::PrimitiveType_FakeQuantWithMinMaxVarsPerChannel ? static_cast(value()) : nullptr; + } + const mindspore::schema::FftReal *value_as_FftReal() const { + return value_type() == mindspore::schema::PrimitiveType_FftReal ? static_cast(value()) : nullptr; + } + const mindspore::schema::FftImag *value_as_FftImag() const { + return value_type() == mindspore::schema::PrimitiveType_FftImag ? static_cast(value()) : nullptr; + } + const mindspore::schema::Flatten *value_as_Flatten() const { + return value_type() == mindspore::schema::PrimitiveType_Flatten ? static_cast(value()) : nullptr; + } + const mindspore::schema::FlattenGrad *value_as_FlattenGrad() const { + return value_type() == mindspore::schema::PrimitiveType_FlattenGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Floor *value_as_Floor() const { + return value_type() == mindspore::schema::PrimitiveType_Floor ? static_cast(value()) : nullptr; + } + const mindspore::schema::FloorDiv *value_as_FloorDiv() const { + return value_type() == mindspore::schema::PrimitiveType_FloorDiv ? static_cast(value()) : nullptr; + } + const mindspore::schema::FloorMod *value_as_FloorMod() const { + return value_type() == mindspore::schema::PrimitiveType_FloorMod ? static_cast(value()) : nullptr; + } + const mindspore::schema::Fill *value_as_Fill() const { + return value_type() == mindspore::schema::PrimitiveType_Fill ? static_cast(value()) : nullptr; + } + const mindspore::schema::FullConnection *value_as_FullConnection() const { + return value_type() == mindspore::schema::PrimitiveType_FullConnection ? static_cast(value()) : nullptr; + } + const mindspore::schema::FusedBatchNorm *value_as_FusedBatchNorm() const { + return value_type() == mindspore::schema::PrimitiveType_FusedBatchNorm ? static_cast(value()) : nullptr; + } + const mindspore::schema::Gather *value_as_Gather() const { + return value_type() == mindspore::schema::PrimitiveType_Gather ? static_cast(value()) : nullptr; + } + const mindspore::schema::GatherNd *value_as_GatherNd() const { + return value_type() == mindspore::schema::PrimitiveType_GatherNd ? static_cast(value()) : nullptr; + } + const mindspore::schema::Greater *value_as_Greater() const { + return value_type() == mindspore::schema::PrimitiveType_Greater ? static_cast(value()) : nullptr; + } + const mindspore::schema::GreaterEqual *value_as_GreaterEqual() const { + return value_type() == mindspore::schema::PrimitiveType_GreaterEqual ? static_cast(value()) : nullptr; + } + const mindspore::schema::HashtableLookup *value_as_HashtableLookup() const { + return value_type() == mindspore::schema::PrimitiveType_HashtableLookup ? static_cast(value()) : nullptr; + } + const mindspore::schema::InstanceNorm *value_as_InstanceNorm() const { + return value_type() == mindspore::schema::PrimitiveType_InstanceNorm ? static_cast(value()) : nullptr; + } + const mindspore::schema::LayerNormFusion *value_as_LayerNormFusion() const { + return value_type() == mindspore::schema::PrimitiveType_LayerNormFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::LeakyRelu *value_as_LeakyRelu() const { + return value_type() == mindspore::schema::PrimitiveType_LeakyRelu ? static_cast(value()) : nullptr; + } + const mindspore::schema::Less *value_as_Less() const { + return value_type() == mindspore::schema::PrimitiveType_Less ? static_cast(value()) : nullptr; + } + const mindspore::schema::LessEqual *value_as_LessEqual() const { + return value_type() == mindspore::schema::PrimitiveType_LessEqual ? static_cast(value()) : nullptr; + } + const mindspore::schema::Log *value_as_Log() const { + return value_type() == mindspore::schema::PrimitiveType_Log ? static_cast(value()) : nullptr; + } + const mindspore::schema::LogGrad *value_as_LogGrad() const { + return value_type() == mindspore::schema::PrimitiveType_LogGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::LogicalAnd *value_as_LogicalAnd() const { + return value_type() == mindspore::schema::PrimitiveType_LogicalAnd ? static_cast(value()) : nullptr; + } + const mindspore::schema::LogicalNot *value_as_LogicalNot() const { + return value_type() == mindspore::schema::PrimitiveType_LogicalNot ? static_cast(value()) : nullptr; + } + const mindspore::schema::LogicalOr *value_as_LogicalOr() const { + return value_type() == mindspore::schema::PrimitiveType_LogicalOr ? static_cast(value()) : nullptr; + } + const mindspore::schema::LpNormalization *value_as_LpNormalization() const { + return value_type() == mindspore::schema::PrimitiveType_LpNormalization ? static_cast(value()) : nullptr; + } + const mindspore::schema::LRN *value_as_LRN() const { + return value_type() == mindspore::schema::PrimitiveType_LRN ? static_cast(value()) : nullptr; + } + const mindspore::schema::LshProjection *value_as_LshProjection() const { + return value_type() == mindspore::schema::PrimitiveType_LshProjection ? static_cast(value()) : nullptr; + } + const mindspore::schema::LSTM *value_as_LSTM() const { + return value_type() == mindspore::schema::PrimitiveType_LSTM ? static_cast(value()) : nullptr; + } + const mindspore::schema::L2NormalizeFusion *value_as_L2NormalizeFusion() const { + return value_type() == mindspore::schema::PrimitiveType_L2NormalizeFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::MatMul *value_as_MatMul() const { + return value_type() == mindspore::schema::PrimitiveType_MatMul ? static_cast(value()) : nullptr; + } + const mindspore::schema::Maximum *value_as_Maximum() const { + return value_type() == mindspore::schema::PrimitiveType_Maximum ? static_cast(value()) : nullptr; + } + const mindspore::schema::MaximumGrad *value_as_MaximumGrad() const { + return value_type() == mindspore::schema::PrimitiveType_MaximumGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::MaxPoolFusion *value_as_MaxPoolFusion() const { + return value_type() == mindspore::schema::PrimitiveType_MaxPoolFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::MaxPoolGrad *value_as_MaxPoolGrad() const { + return value_type() == mindspore::schema::PrimitiveType_MaxPoolGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Merge *value_as_Merge() const { + return value_type() == mindspore::schema::PrimitiveType_Merge ? static_cast(value()) : nullptr; + } + const mindspore::schema::Mfcc *value_as_Mfcc() const { + return value_type() == mindspore::schema::PrimitiveType_Mfcc ? static_cast(value()) : nullptr; + } + const mindspore::schema::Minimum *value_as_Minimum() const { + return value_type() == mindspore::schema::PrimitiveType_Minimum ? static_cast(value()) : nullptr; + } + const mindspore::schema::MinimumGrad *value_as_MinimumGrad() const { + return value_type() == mindspore::schema::PrimitiveType_MinimumGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Mod *value_as_Mod() const { + return value_type() == mindspore::schema::PrimitiveType_Mod ? static_cast(value()) : nullptr; + } + const mindspore::schema::MulFusion *value_as_MulFusion() const { + return value_type() == mindspore::schema::PrimitiveType_MulFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::MulGrad *value_as_MulGrad() const { + return value_type() == mindspore::schema::PrimitiveType_MulGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Neg *value_as_Neg() const { + return value_type() == mindspore::schema::PrimitiveType_Neg ? static_cast(value()) : nullptr; + } + const mindspore::schema::NegGrad *value_as_NegGrad() const { + return value_type() == mindspore::schema::PrimitiveType_NegGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::NotEqual *value_as_NotEqual() const { + return value_type() == mindspore::schema::PrimitiveType_NotEqual ? static_cast(value()) : nullptr; + } + const mindspore::schema::NonMaxSuppression *value_as_NonMaxSuppression() const { + return value_type() == mindspore::schema::PrimitiveType_NonMaxSuppression ? static_cast(value()) : nullptr; + } + const mindspore::schema::OneHot *value_as_OneHot() const { + return value_type() == mindspore::schema::PrimitiveType_OneHot ? static_cast(value()) : nullptr; + } + const mindspore::schema::OnesLike *value_as_OnesLike() const { + return value_type() == mindspore::schema::PrimitiveType_OnesLike ? static_cast(value()) : nullptr; + } + const mindspore::schema::PadFusion *value_as_PadFusion() const { + return value_type() == mindspore::schema::PrimitiveType_PadFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::PartialFusion *value_as_PartialFusion() const { + return value_type() == mindspore::schema::PrimitiveType_PartialFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::PowerGrad *value_as_PowerGrad() const { + return value_type() == mindspore::schema::PrimitiveType_PowerGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::PowFusion *value_as_PowFusion() const { + return value_type() == mindspore::schema::PrimitiveType_PowFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::PriorBox *value_as_PriorBox() const { + return value_type() == mindspore::schema::PrimitiveType_PriorBox ? static_cast(value()) : nullptr; + } + const mindspore::schema::PReLUFusion *value_as_PReLUFusion() const { + return value_type() == mindspore::schema::PrimitiveType_PReLUFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::QuantDTypeCast *value_as_QuantDTypeCast() const { + return value_type() == mindspore::schema::PrimitiveType_QuantDTypeCast ? static_cast(value()) : nullptr; + } + const mindspore::schema::Rank *value_as_Rank() const { + return value_type() == mindspore::schema::PrimitiveType_Rank ? static_cast(value()) : nullptr; + } + const mindspore::schema::Range *value_as_Range() const { + return value_type() == mindspore::schema::PrimitiveType_Range ? static_cast(value()) : nullptr; + } + const mindspore::schema::Reciprocal *value_as_Reciprocal() const { + return value_type() == mindspore::schema::PrimitiveType_Reciprocal ? static_cast(value()) : nullptr; + } + const mindspore::schema::RealDiv *value_as_RealDiv() const { + return value_type() == mindspore::schema::PrimitiveType_RealDiv ? static_cast(value()) : nullptr; + } + const mindspore::schema::ReduceFusion *value_as_ReduceFusion() const { + return value_type() == mindspore::schema::PrimitiveType_ReduceFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Reshape *value_as_Reshape() const { + return value_type() == mindspore::schema::PrimitiveType_Reshape ? static_cast(value()) : nullptr; + } + const mindspore::schema::Resize *value_as_Resize() const { + return value_type() == mindspore::schema::PrimitiveType_Resize ? static_cast(value()) : nullptr; + } + const mindspore::schema::ReverseSequence *value_as_ReverseSequence() const { + return value_type() == mindspore::schema::PrimitiveType_ReverseSequence ? static_cast(value()) : nullptr; + } + const mindspore::schema::ReverseV2 *value_as_ReverseV2() const { + return value_type() == mindspore::schema::PrimitiveType_ReverseV2 ? static_cast(value()) : nullptr; + } + const mindspore::schema::Rfft *value_as_Rfft() const { + return value_type() == mindspore::schema::PrimitiveType_Rfft ? static_cast(value()) : nullptr; + } + const mindspore::schema::ROIPooling *value_as_ROIPooling() const { + return value_type() == mindspore::schema::PrimitiveType_ROIPooling ? static_cast(value()) : nullptr; + } + const mindspore::schema::Round *value_as_Round() const { + return value_type() == mindspore::schema::PrimitiveType_Round ? static_cast(value()) : nullptr; + } + const mindspore::schema::Rsqrt *value_as_Rsqrt() const { + return value_type() == mindspore::schema::PrimitiveType_Rsqrt ? static_cast(value()) : nullptr; + } + const mindspore::schema::ScaleFusion *value_as_ScaleFusion() const { + return value_type() == mindspore::schema::PrimitiveType_ScaleFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::ScatterNd *value_as_ScatterNd() const { + return value_type() == mindspore::schema::PrimitiveType_ScatterNd ? static_cast(value()) : nullptr; + } + const mindspore::schema::SGD *value_as_SGD() const { + return value_type() == mindspore::schema::PrimitiveType_SGD ? static_cast(value()) : nullptr; + } + const mindspore::schema::Shape *value_as_Shape() const { + return value_type() == mindspore::schema::PrimitiveType_Shape ? static_cast(value()) : nullptr; + } + const mindspore::schema::SigmoidCrossEntropyWithLogits *value_as_SigmoidCrossEntropyWithLogits() const { + return value_type() == mindspore::schema::PrimitiveType_SigmoidCrossEntropyWithLogits ? static_cast(value()) : nullptr; + } + const mindspore::schema::SigmoidCrossEntropyWithLogitsGrad *value_as_SigmoidCrossEntropyWithLogitsGrad() const { + return value_type() == mindspore::schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Sin *value_as_Sin() const { + return value_type() == mindspore::schema::PrimitiveType_Sin ? static_cast(value()) : nullptr; + } + const mindspore::schema::SkipGram *value_as_SkipGram() const { + return value_type() == mindspore::schema::PrimitiveType_SkipGram ? static_cast(value()) : nullptr; + } + const mindspore::schema::SliceFusion *value_as_SliceFusion() const { + return value_type() == mindspore::schema::PrimitiveType_SliceFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::SmoothL1Loss *value_as_SmoothL1Loss() const { + return value_type() == mindspore::schema::PrimitiveType_SmoothL1Loss ? static_cast(value()) : nullptr; + } + const mindspore::schema::SmoothL1LossGrad *value_as_SmoothL1LossGrad() const { + return value_type() == mindspore::schema::PrimitiveType_SmoothL1LossGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Softmax *value_as_Softmax() const { + return value_type() == mindspore::schema::PrimitiveType_Softmax ? static_cast(value()) : nullptr; + } + const mindspore::schema::SoftmaxCrossEntropyWithLogits *value_as_SoftmaxCrossEntropyWithLogits() const { + return value_type() == mindspore::schema::PrimitiveType_SoftmaxCrossEntropyWithLogits ? static_cast(value()) : nullptr; + } + const mindspore::schema::SpaceToBatch *value_as_SpaceToBatch() const { + return value_type() == mindspore::schema::PrimitiveType_SpaceToBatch ? static_cast(value()) : nullptr; + } + const mindspore::schema::SpaceToBatchND *value_as_SpaceToBatchND() const { + return value_type() == mindspore::schema::PrimitiveType_SpaceToBatchND ? static_cast(value()) : nullptr; + } + const mindspore::schema::SpaceToDepth *value_as_SpaceToDepth() const { + return value_type() == mindspore::schema::PrimitiveType_SpaceToDepth ? static_cast(value()) : nullptr; + } + const mindspore::schema::SparseSoftmaxCrossEntropyWithLogits *value_as_SparseSoftmaxCrossEntropyWithLogits() const { + return value_type() == mindspore::schema::PrimitiveType_SparseSoftmaxCrossEntropyWithLogits ? static_cast(value()) : nullptr; + } + const mindspore::schema::SparseToDense *value_as_SparseToDense() const { + return value_type() == mindspore::schema::PrimitiveType_SparseToDense ? static_cast(value()) : nullptr; + } + const mindspore::schema::Split *value_as_Split() const { + return value_type() == mindspore::schema::PrimitiveType_Split ? static_cast(value()) : nullptr; + } + const mindspore::schema::Sqrt *value_as_Sqrt() const { + return value_type() == mindspore::schema::PrimitiveType_Sqrt ? static_cast(value()) : nullptr; + } + const mindspore::schema::Squeeze *value_as_Squeeze() const { + return value_type() == mindspore::schema::PrimitiveType_Squeeze ? static_cast(value()) : nullptr; + } + const mindspore::schema::Square *value_as_Square() const { + return value_type() == mindspore::schema::PrimitiveType_Square ? static_cast(value()) : nullptr; + } + const mindspore::schema::SquaredDifference *value_as_SquaredDifference() const { + return value_type() == mindspore::schema::PrimitiveType_SquaredDifference ? static_cast(value()) : nullptr; + } + const mindspore::schema::Stack *value_as_Stack() const { + return value_type() == mindspore::schema::PrimitiveType_Stack ? static_cast(value()) : nullptr; + } + const mindspore::schema::StridedSlice *value_as_StridedSlice() const { + return value_type() == mindspore::schema::PrimitiveType_StridedSlice ? static_cast(value()) : nullptr; + } + const mindspore::schema::SubFusion *value_as_SubFusion() const { + return value_type() == mindspore::schema::PrimitiveType_SubFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::SubGrad *value_as_SubGrad() const { + return value_type() == mindspore::schema::PrimitiveType_SubGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Switch *value_as_Switch() const { + return value_type() == mindspore::schema::PrimitiveType_Switch ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorListFromTensor *value_as_TensorListFromTensor() const { + return value_type() == mindspore::schema::PrimitiveType_TensorListFromTensor ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorListGetItem *value_as_TensorListGetItem() const { + return value_type() == mindspore::schema::PrimitiveType_TensorListGetItem ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorListReserve *value_as_TensorListReserve() const { + return value_type() == mindspore::schema::PrimitiveType_TensorListReserve ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorListSetItem *value_as_TensorListSetItem() const { + return value_type() == mindspore::schema::PrimitiveType_TensorListSetItem ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorListStack *value_as_TensorListStack() const { + return value_type() == mindspore::schema::PrimitiveType_TensorListStack ? static_cast(value()) : nullptr; + } + const mindspore::schema::TileFusion *value_as_TileFusion() const { + return value_type() == mindspore::schema::PrimitiveType_TileFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::TopKFusion *value_as_TopKFusion() const { + return value_type() == mindspore::schema::PrimitiveType_TopKFusion ? static_cast(value()) : nullptr; + } + const mindspore::schema::Transpose *value_as_Transpose() const { + return value_type() == mindspore::schema::PrimitiveType_Transpose ? static_cast(value()) : nullptr; + } + const mindspore::schema::Unique *value_as_Unique() const { + return value_type() == mindspore::schema::PrimitiveType_Unique ? static_cast(value()) : nullptr; + } + const mindspore::schema::UnsortedSegmentSum *value_as_UnsortedSegmentSum() const { + return value_type() == mindspore::schema::PrimitiveType_UnsortedSegmentSum ? static_cast(value()) : nullptr; + } + const mindspore::schema::Unsqueeze *value_as_Unsqueeze() const { + return value_type() == mindspore::schema::PrimitiveType_Unsqueeze ? static_cast(value()) : nullptr; + } + const mindspore::schema::Unstack *value_as_Unstack() const { + return value_type() == mindspore::schema::PrimitiveType_Unstack ? static_cast(value()) : nullptr; + } + const mindspore::schema::LSTMGrad *value_as_LSTMGrad() const { + return value_type() == mindspore::schema::PrimitiveType_LSTMGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Where *value_as_Where() const { + return value_type() == mindspore::schema::PrimitiveType_Where ? static_cast(value()) : nullptr; + } + const mindspore::schema::ZerosLike *value_as_ZerosLike() const { + return value_type() == mindspore::schema::PrimitiveType_ZerosLike ? static_cast(value()) : nullptr; + } + const mindspore::schema::Select *value_as_Select() const { + return value_type() == mindspore::schema::PrimitiveType_Select ? static_cast(value()) : nullptr; + } + const mindspore::schema::ScatterNdUpdate *value_as_ScatterNdUpdate() const { + return value_type() == mindspore::schema::PrimitiveType_ScatterNdUpdate ? static_cast(value()) : nullptr; + } + const mindspore::schema::GRU *value_as_GRU() const { + return value_type() == mindspore::schema::PrimitiveType_GRU ? static_cast(value()) : nullptr; + } + const mindspore::schema::NonZero *value_as_NonZero() const { + return value_type() == mindspore::schema::PrimitiveType_NonZero ? static_cast(value()) : nullptr; + } + const mindspore::schema::InvertPermutation *value_as_InvertPermutation() const { + return value_type() == mindspore::schema::PrimitiveType_InvertPermutation ? static_cast(value()) : nullptr; + } + const mindspore::schema::Size *value_as_Size() const { + return value_type() == mindspore::schema::PrimitiveType_Size ? static_cast(value()) : nullptr; + } + const mindspore::schema::RandomStandardNormal *value_as_RandomStandardNormal() const { + return value_type() == mindspore::schema::PrimitiveType_RandomStandardNormal ? static_cast(value()) : nullptr; + } + const mindspore::schema::CropAndResize *value_as_CropAndResize() const { + return value_type() == mindspore::schema::PrimitiveType_CropAndResize ? static_cast(value()) : nullptr; + } + const mindspore::schema::Erf *value_as_Erf() const { + return value_type() == mindspore::schema::PrimitiveType_Erf ? static_cast(value()) : nullptr; + } + const mindspore::schema::StridedSliceGrad *value_as_StridedSliceGrad() const { + return value_type() == mindspore::schema::PrimitiveType_StridedSliceGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::IsFinite *value_as_IsFinite() const { + return value_type() == mindspore::schema::PrimitiveType_IsFinite ? static_cast(value()) : nullptr; + } + const mindspore::schema::LinSpace *value_as_LinSpace() const { + return value_type() == mindspore::schema::PrimitiveType_LinSpace ? static_cast(value()) : nullptr; + } + const mindspore::schema::UniformReal *value_as_UniformReal() const { + return value_type() == mindspore::schema::PrimitiveType_UniformReal ? static_cast(value()) : nullptr; + } + const mindspore::schema::AbsGrad *value_as_AbsGrad() const { + return value_type() == mindspore::schema::PrimitiveType_AbsGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::RsqrtGrad *value_as_RsqrtGrad() const { + return value_type() == mindspore::schema::PrimitiveType_RsqrtGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::SqrtGrad *value_as_SqrtGrad() const { + return value_type() == mindspore::schema::PrimitiveType_SqrtGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::LayerNormGrad *value_as_LayerNormGrad() const { + return value_type() == mindspore::schema::PrimitiveType_LayerNormGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::ResizeGrad *value_as_ResizeGrad() const { + return value_type() == mindspore::schema::PrimitiveType_ResizeGrad ? static_cast(value()) : nullptr; + } + const mindspore::schema::Splice *value_as_Splice() const { + return value_type() == mindspore::schema::PrimitiveType_Splice ? static_cast(value()) : nullptr; + } + const mindspore::schema::LogSoftmax *value_as_LogSoftmax() const { + return value_type() == mindspore::schema::PrimitiveType_LogSoftmax ? static_cast(value()) : nullptr; + } + const mindspore::schema::Call *value_as_Call() const { + return value_type() == mindspore::schema::PrimitiveType_Call ? static_cast(value()) : nullptr; + } + const mindspore::schema::Custom *value_as_Custom() const { + return value_type() == mindspore::schema::PrimitiveType_Custom ? static_cast(value()) : nullptr; + } + const mindspore::schema::CumSum *value_as_CumSum() const { + return value_type() == mindspore::schema::PrimitiveType_CumSum ? static_cast(value()) : nullptr; + } + const mindspore::schema::SplitWithOverlap *value_as_SplitWithOverlap() const { + return value_type() == mindspore::schema::PrimitiveType_SplitWithOverlap ? static_cast(value()) : nullptr; + } + const mindspore::schema::GenOP *value_as_GenOP() const { + return value_type() == mindspore::schema::PrimitiveType_GenOP ? static_cast(value()) : nullptr; + } + const mindspore::schema::RaggedRange *value_as_RaggedRange() const { + return value_type() == mindspore::schema::PrimitiveType_RaggedRange ? static_cast(value()) : nullptr; + } + const mindspore::schema::GLU *value_as_GLU() const { + return value_type() == mindspore::schema::PrimitiveType_GLU ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorArray *value_as_TensorArray() const { + return value_type() == mindspore::schema::PrimitiveType_TensorArray ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorArrayRead *value_as_TensorArrayRead() const { + return value_type() == mindspore::schema::PrimitiveType_TensorArrayRead ? static_cast(value()) : nullptr; + } + const mindspore::schema::TensorArrayWrite *value_as_TensorArrayWrite() const { + return value_type() == mindspore::schema::PrimitiveType_TensorArrayWrite ? static_cast(value()) : nullptr; + } + const mindspore::schema::Affine *value_as_Affine() const { + return value_type() == mindspore::schema::PrimitiveType_Affine ? static_cast(value()) : nullptr; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VALUE_TYPE) && + VerifyOffset(verifier, VT_VALUE) && + VerifyPrimitiveType(verifier, value(), value_type()) && + verifier.EndTable(); + } +}; + +template<> inline const mindspore::schema::Abs *Primitive::value_as() const { + return value_as_Abs(); +} + +template<> inline const mindspore::schema::Activation *Primitive::value_as() const { + return value_as_Activation(); +} + +template<> inline const mindspore::schema::ActivationGrad *Primitive::value_as() const { + return value_as_ActivationGrad(); +} + +template<> inline const mindspore::schema::Adam *Primitive::value_as() const { + return value_as_Adam(); +} + +template<> inline const mindspore::schema::AddFusion *Primitive::value_as() const { + return value_as_AddFusion(); +} + +template<> inline const mindspore::schema::AdderFusion *Primitive::value_as() const { + return value_as_AdderFusion(); +} + +template<> inline const mindspore::schema::AddGrad *Primitive::value_as() const { + return value_as_AddGrad(); +} + +template<> inline const mindspore::schema::AddN *Primitive::value_as() const { + return value_as_AddN(); +} + +template<> inline const mindspore::schema::All *Primitive::value_as() const { + return value_as_All(); +} + +template<> inline const mindspore::schema::ApplyMomentum *Primitive::value_as() const { + return value_as_ApplyMomentum(); +} + +template<> inline const mindspore::schema::ArgMaxFusion *Primitive::value_as() const { + return value_as_ArgMaxFusion(); +} + +template<> inline const mindspore::schema::ArgMinFusion *Primitive::value_as() const { + return value_as_ArgMinFusion(); +} + +template<> inline const mindspore::schema::Assert *Primitive::value_as() const { + return value_as_Assert(); +} + +template<> inline const mindspore::schema::Assign *Primitive::value_as() const { + return value_as_Assign(); +} + +template<> inline const mindspore::schema::AssignAdd *Primitive::value_as() const { + return value_as_AssignAdd(); +} + +template<> inline const mindspore::schema::AudioSpectrogram *Primitive::value_as() const { + return value_as_AudioSpectrogram(); +} + +template<> inline const mindspore::schema::AvgPoolFusion *Primitive::value_as() const { + return value_as_AvgPoolFusion(); +} + +template<> inline const mindspore::schema::AvgPoolGrad *Primitive::value_as() const { + return value_as_AvgPoolGrad(); +} + +template<> inline const mindspore::schema::BatchNorm *Primitive::value_as() const { + return value_as_BatchNorm(); +} + +template<> inline const mindspore::schema::BatchNormGrad *Primitive::value_as() const { + return value_as_BatchNormGrad(); +} + +template<> inline const mindspore::schema::BatchToSpace *Primitive::value_as() const { + return value_as_BatchToSpace(); +} + +template<> inline const mindspore::schema::BatchToSpaceND *Primitive::value_as() const { + return value_as_BatchToSpaceND(); +} + +template<> inline const mindspore::schema::BiasAdd *Primitive::value_as() const { + return value_as_BiasAdd(); +} + +template<> inline const mindspore::schema::BinaryCrossEntropy *Primitive::value_as() const { + return value_as_BinaryCrossEntropy(); +} + +template<> inline const mindspore::schema::BinaryCrossEntropyGrad *Primitive::value_as() const { + return value_as_BinaryCrossEntropyGrad(); +} + +template<> inline const mindspore::schema::BiasAddGrad *Primitive::value_as() const { + return value_as_BiasAddGrad(); +} + +template<> inline const mindspore::schema::BroadcastTo *Primitive::value_as() const { + return value_as_BroadcastTo(); +} + +template<> inline const mindspore::schema::Cast *Primitive::value_as() const { + return value_as_Cast(); +} + +template<> inline const mindspore::schema::Ceil *Primitive::value_as() const { + return value_as_Ceil(); +} + +template<> inline const mindspore::schema::Clip *Primitive::value_as() const { + return value_as_Clip(); +} + +template<> inline const mindspore::schema::Concat *Primitive::value_as() const { + return value_as_Concat(); +} + +template<> inline const mindspore::schema::Attention *Primitive::value_as() const { + return value_as_Attention(); +} + +template<> inline const mindspore::schema::Conv2DBackpropFilterFusion *Primitive::value_as() const { + return value_as_Conv2DBackpropFilterFusion(); +} + +template<> inline const mindspore::schema::Conv2DBackpropInputFusion *Primitive::value_as() const { + return value_as_Conv2DBackpropInputFusion(); +} + +template<> inline const mindspore::schema::Conv2DFusion *Primitive::value_as() const { + return value_as_Conv2DFusion(); +} + +template<> inline const mindspore::schema::Conv2dTransposeFusion *Primitive::value_as() const { + return value_as_Conv2dTransposeFusion(); +} + +template<> inline const mindspore::schema::Cos *Primitive::value_as() const { + return value_as_Cos(); +} + +template<> inline const mindspore::schema::ConstantOfShape *Primitive::value_as() const { + return value_as_ConstantOfShape(); +} + +template<> inline const mindspore::schema::Crop *Primitive::value_as() const { + return value_as_Crop(); +} + +template<> inline const mindspore::schema::CustomExtractFeatures *Primitive::value_as() const { + return value_as_CustomExtractFeatures(); +} + +template<> inline const mindspore::schema::CustomNormalize *Primitive::value_as() const { + return value_as_CustomNormalize(); +} + +template<> inline const mindspore::schema::CustomPredict *Primitive::value_as() const { + return value_as_CustomPredict(); +} + +template<> inline const mindspore::schema::DeConv2DGradFilter *Primitive::value_as() const { + return value_as_DeConv2DGradFilter(); +} + +template<> inline const mindspore::schema::Depend *Primitive::value_as() const { + return value_as_Depend(); +} + +template<> inline const mindspore::schema::DepthToSpace *Primitive::value_as() const { + return value_as_DepthToSpace(); +} + +template<> inline const mindspore::schema::DetectionPostProcess *Primitive::value_as() const { + return value_as_DetectionPostProcess(); +} + +template<> inline const mindspore::schema::DivFusion *Primitive::value_as() const { + return value_as_DivFusion(); +} + +template<> inline const mindspore::schema::DivGrad *Primitive::value_as() const { + return value_as_DivGrad(); +} + +template<> inline const mindspore::schema::Dropout *Primitive::value_as() const { + return value_as_Dropout(); +} + +template<> inline const mindspore::schema::DropoutGrad *Primitive::value_as() const { + return value_as_DropoutGrad(); +} + +template<> inline const mindspore::schema::Elu *Primitive::value_as() const { + return value_as_Elu(); +} + +template<> inline const mindspore::schema::Eltwise *Primitive::value_as() const { + return value_as_Eltwise(); +} + +template<> inline const mindspore::schema::Equal *Primitive::value_as() const { + return value_as_Equal(); +} + +template<> inline const mindspore::schema::EmbeddingLookupFusion *Primitive::value_as() const { + return value_as_EmbeddingLookupFusion(); +} + +template<> inline const mindspore::schema::ExpFusion *Primitive::value_as() const { + return value_as_ExpFusion(); +} + +template<> inline const mindspore::schema::ExpandDims *Primitive::value_as() const { + return value_as_ExpandDims(); +} + +template<> inline const mindspore::schema::FakeQuantWithMinMaxVars *Primitive::value_as() const { + return value_as_FakeQuantWithMinMaxVars(); +} + +template<> inline const mindspore::schema::FakeQuantWithMinMaxVarsPerChannel *Primitive::value_as() const { + return value_as_FakeQuantWithMinMaxVarsPerChannel(); +} + +template<> inline const mindspore::schema::FftReal *Primitive::value_as() const { + return value_as_FftReal(); +} + +template<> inline const mindspore::schema::FftImag *Primitive::value_as() const { + return value_as_FftImag(); +} + +template<> inline const mindspore::schema::Flatten *Primitive::value_as() const { + return value_as_Flatten(); +} + +template<> inline const mindspore::schema::FlattenGrad *Primitive::value_as() const { + return value_as_FlattenGrad(); +} + +template<> inline const mindspore::schema::Floor *Primitive::value_as() const { + return value_as_Floor(); +} + +template<> inline const mindspore::schema::FloorDiv *Primitive::value_as() const { + return value_as_FloorDiv(); +} + +template<> inline const mindspore::schema::FloorMod *Primitive::value_as() const { + return value_as_FloorMod(); +} + +template<> inline const mindspore::schema::Fill *Primitive::value_as() const { + return value_as_Fill(); +} + +template<> inline const mindspore::schema::FullConnection *Primitive::value_as() const { + return value_as_FullConnection(); +} + +template<> inline const mindspore::schema::FusedBatchNorm *Primitive::value_as() const { + return value_as_FusedBatchNorm(); +} + +template<> inline const mindspore::schema::Gather *Primitive::value_as() const { + return value_as_Gather(); +} + +template<> inline const mindspore::schema::GatherNd *Primitive::value_as() const { + return value_as_GatherNd(); +} + +template<> inline const mindspore::schema::Greater *Primitive::value_as() const { + return value_as_Greater(); +} + +template<> inline const mindspore::schema::GreaterEqual *Primitive::value_as() const { + return value_as_GreaterEqual(); +} + +template<> inline const mindspore::schema::HashtableLookup *Primitive::value_as() const { + return value_as_HashtableLookup(); +} + +template<> inline const mindspore::schema::InstanceNorm *Primitive::value_as() const { + return value_as_InstanceNorm(); +} + +template<> inline const mindspore::schema::LayerNormFusion *Primitive::value_as() const { + return value_as_LayerNormFusion(); +} + +template<> inline const mindspore::schema::LeakyRelu *Primitive::value_as() const { + return value_as_LeakyRelu(); +} + +template<> inline const mindspore::schema::Less *Primitive::value_as() const { + return value_as_Less(); +} + +template<> inline const mindspore::schema::LessEqual *Primitive::value_as() const { + return value_as_LessEqual(); +} + +template<> inline const mindspore::schema::Log *Primitive::value_as() const { + return value_as_Log(); +} + +template<> inline const mindspore::schema::LogGrad *Primitive::value_as() const { + return value_as_LogGrad(); +} + +template<> inline const mindspore::schema::LogicalAnd *Primitive::value_as() const { + return value_as_LogicalAnd(); +} + +template<> inline const mindspore::schema::LogicalNot *Primitive::value_as() const { + return value_as_LogicalNot(); +} + +template<> inline const mindspore::schema::LogicalOr *Primitive::value_as() const { + return value_as_LogicalOr(); +} + +template<> inline const mindspore::schema::LpNormalization *Primitive::value_as() const { + return value_as_LpNormalization(); +} + +template<> inline const mindspore::schema::LRN *Primitive::value_as() const { + return value_as_LRN(); +} + +template<> inline const mindspore::schema::LshProjection *Primitive::value_as() const { + return value_as_LshProjection(); +} + +template<> inline const mindspore::schema::LSTM *Primitive::value_as() const { + return value_as_LSTM(); +} + +template<> inline const mindspore::schema::L2NormalizeFusion *Primitive::value_as() const { + return value_as_L2NormalizeFusion(); +} + +template<> inline const mindspore::schema::MatMul *Primitive::value_as() const { + return value_as_MatMul(); +} + +template<> inline const mindspore::schema::Maximum *Primitive::value_as() const { + return value_as_Maximum(); +} + +template<> inline const mindspore::schema::MaximumGrad *Primitive::value_as() const { + return value_as_MaximumGrad(); +} + +template<> inline const mindspore::schema::MaxPoolFusion *Primitive::value_as() const { + return value_as_MaxPoolFusion(); +} + +template<> inline const mindspore::schema::MaxPoolGrad *Primitive::value_as() const { + return value_as_MaxPoolGrad(); +} + +template<> inline const mindspore::schema::Merge *Primitive::value_as() const { + return value_as_Merge(); +} + +template<> inline const mindspore::schema::Mfcc *Primitive::value_as() const { + return value_as_Mfcc(); +} + +template<> inline const mindspore::schema::Minimum *Primitive::value_as() const { + return value_as_Minimum(); +} + +template<> inline const mindspore::schema::MinimumGrad *Primitive::value_as() const { + return value_as_MinimumGrad(); +} + +template<> inline const mindspore::schema::Mod *Primitive::value_as() const { + return value_as_Mod(); +} + +template<> inline const mindspore::schema::MulFusion *Primitive::value_as() const { + return value_as_MulFusion(); +} + +template<> inline const mindspore::schema::MulGrad *Primitive::value_as() const { + return value_as_MulGrad(); +} + +template<> inline const mindspore::schema::Neg *Primitive::value_as() const { + return value_as_Neg(); +} + +template<> inline const mindspore::schema::NegGrad *Primitive::value_as() const { + return value_as_NegGrad(); +} + +template<> inline const mindspore::schema::NotEqual *Primitive::value_as() const { + return value_as_NotEqual(); +} + +template<> inline const mindspore::schema::NonMaxSuppression *Primitive::value_as() const { + return value_as_NonMaxSuppression(); +} + +template<> inline const mindspore::schema::OneHot *Primitive::value_as() const { + return value_as_OneHot(); +} + +template<> inline const mindspore::schema::OnesLike *Primitive::value_as() const { + return value_as_OnesLike(); +} + +template<> inline const mindspore::schema::PadFusion *Primitive::value_as() const { + return value_as_PadFusion(); +} + +template<> inline const mindspore::schema::PartialFusion *Primitive::value_as() const { + return value_as_PartialFusion(); +} + +template<> inline const mindspore::schema::PowerGrad *Primitive::value_as() const { + return value_as_PowerGrad(); +} + +template<> inline const mindspore::schema::PowFusion *Primitive::value_as() const { + return value_as_PowFusion(); +} + +template<> inline const mindspore::schema::PriorBox *Primitive::value_as() const { + return value_as_PriorBox(); +} + +template<> inline const mindspore::schema::PReLUFusion *Primitive::value_as() const { + return value_as_PReLUFusion(); +} + +template<> inline const mindspore::schema::QuantDTypeCast *Primitive::value_as() const { + return value_as_QuantDTypeCast(); +} + +template<> inline const mindspore::schema::Rank *Primitive::value_as() const { + return value_as_Rank(); +} + +template<> inline const mindspore::schema::Range *Primitive::value_as() const { + return value_as_Range(); +} + +template<> inline const mindspore::schema::Reciprocal *Primitive::value_as() const { + return value_as_Reciprocal(); +} + +template<> inline const mindspore::schema::RealDiv *Primitive::value_as() const { + return value_as_RealDiv(); +} + +template<> inline const mindspore::schema::ReduceFusion *Primitive::value_as() const { + return value_as_ReduceFusion(); +} + +template<> inline const mindspore::schema::Reshape *Primitive::value_as() const { + return value_as_Reshape(); +} + +template<> inline const mindspore::schema::Resize *Primitive::value_as() const { + return value_as_Resize(); +} + +template<> inline const mindspore::schema::ReverseSequence *Primitive::value_as() const { + return value_as_ReverseSequence(); +} + +template<> inline const mindspore::schema::ReverseV2 *Primitive::value_as() const { + return value_as_ReverseV2(); +} + +template<> inline const mindspore::schema::Rfft *Primitive::value_as() const { + return value_as_Rfft(); +} + +template<> inline const mindspore::schema::ROIPooling *Primitive::value_as() const { + return value_as_ROIPooling(); +} + +template<> inline const mindspore::schema::Round *Primitive::value_as() const { + return value_as_Round(); +} + +template<> inline const mindspore::schema::Rsqrt *Primitive::value_as() const { + return value_as_Rsqrt(); +} + +template<> inline const mindspore::schema::ScaleFusion *Primitive::value_as() const { + return value_as_ScaleFusion(); +} + +template<> inline const mindspore::schema::ScatterNd *Primitive::value_as() const { + return value_as_ScatterNd(); +} + +template<> inline const mindspore::schema::SGD *Primitive::value_as() const { + return value_as_SGD(); +} + +template<> inline const mindspore::schema::Shape *Primitive::value_as() const { + return value_as_Shape(); +} + +template<> inline const mindspore::schema::SigmoidCrossEntropyWithLogits *Primitive::value_as() const { + return value_as_SigmoidCrossEntropyWithLogits(); +} + +template<> inline const mindspore::schema::SigmoidCrossEntropyWithLogitsGrad *Primitive::value_as() const { + return value_as_SigmoidCrossEntropyWithLogitsGrad(); +} + +template<> inline const mindspore::schema::Sin *Primitive::value_as() const { + return value_as_Sin(); +} + +template<> inline const mindspore::schema::SkipGram *Primitive::value_as() const { + return value_as_SkipGram(); +} + +template<> inline const mindspore::schema::SliceFusion *Primitive::value_as() const { + return value_as_SliceFusion(); +} + +template<> inline const mindspore::schema::SmoothL1Loss *Primitive::value_as() const { + return value_as_SmoothL1Loss(); +} + +template<> inline const mindspore::schema::SmoothL1LossGrad *Primitive::value_as() const { + return value_as_SmoothL1LossGrad(); +} + +template<> inline const mindspore::schema::Softmax *Primitive::value_as() const { + return value_as_Softmax(); +} + +template<> inline const mindspore::schema::SoftmaxCrossEntropyWithLogits *Primitive::value_as() const { + return value_as_SoftmaxCrossEntropyWithLogits(); +} + +template<> inline const mindspore::schema::SpaceToBatch *Primitive::value_as() const { + return value_as_SpaceToBatch(); +} + +template<> inline const mindspore::schema::SpaceToBatchND *Primitive::value_as() const { + return value_as_SpaceToBatchND(); +} + +template<> inline const mindspore::schema::SpaceToDepth *Primitive::value_as() const { + return value_as_SpaceToDepth(); +} + +template<> inline const mindspore::schema::SparseSoftmaxCrossEntropyWithLogits *Primitive::value_as() const { + return value_as_SparseSoftmaxCrossEntropyWithLogits(); +} + +template<> inline const mindspore::schema::SparseToDense *Primitive::value_as() const { + return value_as_SparseToDense(); +} + +template<> inline const mindspore::schema::Split *Primitive::value_as() const { + return value_as_Split(); +} + +template<> inline const mindspore::schema::Sqrt *Primitive::value_as() const { + return value_as_Sqrt(); +} + +template<> inline const mindspore::schema::Squeeze *Primitive::value_as() const { + return value_as_Squeeze(); +} + +template<> inline const mindspore::schema::Square *Primitive::value_as() const { + return value_as_Square(); +} + +template<> inline const mindspore::schema::SquaredDifference *Primitive::value_as() const { + return value_as_SquaredDifference(); +} + +template<> inline const mindspore::schema::Stack *Primitive::value_as() const { + return value_as_Stack(); +} + +template<> inline const mindspore::schema::StridedSlice *Primitive::value_as() const { + return value_as_StridedSlice(); +} + +template<> inline const mindspore::schema::SubFusion *Primitive::value_as() const { + return value_as_SubFusion(); +} + +template<> inline const mindspore::schema::SubGrad *Primitive::value_as() const { + return value_as_SubGrad(); +} + +template<> inline const mindspore::schema::Switch *Primitive::value_as() const { + return value_as_Switch(); +} + +template<> inline const mindspore::schema::TensorListFromTensor *Primitive::value_as() const { + return value_as_TensorListFromTensor(); +} + +template<> inline const mindspore::schema::TensorListGetItem *Primitive::value_as() const { + return value_as_TensorListGetItem(); +} + +template<> inline const mindspore::schema::TensorListReserve *Primitive::value_as() const { + return value_as_TensorListReserve(); +} + +template<> inline const mindspore::schema::TensorListSetItem *Primitive::value_as() const { + return value_as_TensorListSetItem(); +} + +template<> inline const mindspore::schema::TensorListStack *Primitive::value_as() const { + return value_as_TensorListStack(); +} + +template<> inline const mindspore::schema::TileFusion *Primitive::value_as() const { + return value_as_TileFusion(); +} + +template<> inline const mindspore::schema::TopKFusion *Primitive::value_as() const { + return value_as_TopKFusion(); +} + +template<> inline const mindspore::schema::Transpose *Primitive::value_as() const { + return value_as_Transpose(); +} + +template<> inline const mindspore::schema::Unique *Primitive::value_as() const { + return value_as_Unique(); +} + +template<> inline const mindspore::schema::UnsortedSegmentSum *Primitive::value_as() const { + return value_as_UnsortedSegmentSum(); +} + +template<> inline const mindspore::schema::Unsqueeze *Primitive::value_as() const { + return value_as_Unsqueeze(); +} + +template<> inline const mindspore::schema::Unstack *Primitive::value_as() const { + return value_as_Unstack(); +} + +template<> inline const mindspore::schema::LSTMGrad *Primitive::value_as() const { + return value_as_LSTMGrad(); +} + +template<> inline const mindspore::schema::Where *Primitive::value_as() const { + return value_as_Where(); +} + +template<> inline const mindspore::schema::ZerosLike *Primitive::value_as() const { + return value_as_ZerosLike(); +} + +template<> inline const mindspore::schema::Select *Primitive::value_as() const { + return value_as_Select(); +} + +template<> inline const mindspore::schema::ScatterNdUpdate *Primitive::value_as() const { + return value_as_ScatterNdUpdate(); +} + +template<> inline const mindspore::schema::GRU *Primitive::value_as() const { + return value_as_GRU(); +} + +template<> inline const mindspore::schema::NonZero *Primitive::value_as() const { + return value_as_NonZero(); +} + +template<> inline const mindspore::schema::InvertPermutation *Primitive::value_as() const { + return value_as_InvertPermutation(); +} + +template<> inline const mindspore::schema::Size *Primitive::value_as() const { + return value_as_Size(); +} + +template<> inline const mindspore::schema::RandomStandardNormal *Primitive::value_as() const { + return value_as_RandomStandardNormal(); +} + +template<> inline const mindspore::schema::CropAndResize *Primitive::value_as() const { + return value_as_CropAndResize(); +} + +template<> inline const mindspore::schema::Erf *Primitive::value_as() const { + return value_as_Erf(); +} + +template<> inline const mindspore::schema::StridedSliceGrad *Primitive::value_as() const { + return value_as_StridedSliceGrad(); +} + +template<> inline const mindspore::schema::IsFinite *Primitive::value_as() const { + return value_as_IsFinite(); +} + +template<> inline const mindspore::schema::LinSpace *Primitive::value_as() const { + return value_as_LinSpace(); +} + +template<> inline const mindspore::schema::UniformReal *Primitive::value_as() const { + return value_as_UniformReal(); +} + +template<> inline const mindspore::schema::AbsGrad *Primitive::value_as() const { + return value_as_AbsGrad(); +} + +template<> inline const mindspore::schema::RsqrtGrad *Primitive::value_as() const { + return value_as_RsqrtGrad(); +} + +template<> inline const mindspore::schema::SqrtGrad *Primitive::value_as() const { + return value_as_SqrtGrad(); +} + +template<> inline const mindspore::schema::LayerNormGrad *Primitive::value_as() const { + return value_as_LayerNormGrad(); +} + +template<> inline const mindspore::schema::ResizeGrad *Primitive::value_as() const { + return value_as_ResizeGrad(); +} + +template<> inline const mindspore::schema::Splice *Primitive::value_as() const { + return value_as_Splice(); +} + +template<> inline const mindspore::schema::LogSoftmax *Primitive::value_as() const { + return value_as_LogSoftmax(); +} + +template<> inline const mindspore::schema::Call *Primitive::value_as() const { + return value_as_Call(); +} + +template<> inline const mindspore::schema::Custom *Primitive::value_as() const { + return value_as_Custom(); +} + +template<> inline const mindspore::schema::CumSum *Primitive::value_as() const { + return value_as_CumSum(); +} + +template<> inline const mindspore::schema::SplitWithOverlap *Primitive::value_as() const { + return value_as_SplitWithOverlap(); +} + +template<> inline const mindspore::schema::GenOP *Primitive::value_as() const { + return value_as_GenOP(); +} + +template<> inline const mindspore::schema::RaggedRange *Primitive::value_as() const { + return value_as_RaggedRange(); +} + +template<> inline const mindspore::schema::GLU *Primitive::value_as() const { + return value_as_GLU(); +} + +template<> inline const mindspore::schema::TensorArray *Primitive::value_as() const { + return value_as_TensorArray(); +} + +template<> inline const mindspore::schema::TensorArrayRead *Primitive::value_as() const { + return value_as_TensorArrayRead(); +} + +template<> inline const mindspore::schema::TensorArrayWrite *Primitive::value_as() const { + return value_as_TensorArrayWrite(); +} + +template<> inline const mindspore::schema::Affine *Primitive::value_as() const { + return value_as_Affine(); +} + +struct PrimitiveBuilder { + typedef Primitive Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_value_type(mindspore::schema::PrimitiveType value_type) { + fbb_.AddElement(Primitive::VT_VALUE_TYPE, static_cast(value_type), 0); + } + void add_value(flatbuffers::Offset value) { + fbb_.AddOffset(Primitive::VT_VALUE, value); + } + explicit PrimitiveBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePrimitive( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::PrimitiveType value_type = mindspore::schema::PrimitiveType_NONE, + flatbuffers::Offset value = 0) { + PrimitiveBuilder builder_(_fbb); + builder_.add_value(value); + builder_.add_value_type(value_type); + return builder_.Finish(); +} + +struct CNode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CNodeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_PRIMITIVE = 8, + VT_INPUTINDEX = 10, + VT_OUTPUTINDEX = 12, + VT_QUANTTYPE = 14, + VT_DEVICETYPE = 16 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + const mindspore::schema::Primitive *primitive() const { + return GetPointer(VT_PRIMITIVE); + } + const flatbuffers::Vector *inputIndex() const { + return GetPointer *>(VT_INPUTINDEX); + } + const flatbuffers::Vector *outputIndex() const { + return GetPointer *>(VT_OUTPUTINDEX); + } + mindspore::schema::QuantType quantType() const { + return static_cast(GetField(VT_QUANTTYPE, 0)); + } + int32_t deviceType() const { + return GetField(VT_DEVICETYPE, -1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_PRIMITIVE) && + verifier.VerifyTable(primitive()) && + VerifyOffset(verifier, VT_INPUTINDEX) && + verifier.VerifyVector(inputIndex()) && + VerifyOffset(verifier, VT_OUTPUTINDEX) && + verifier.VerifyVector(outputIndex()) && + VerifyField(verifier, VT_QUANTTYPE) && + VerifyField(verifier, VT_DEVICETYPE) && + verifier.EndTable(); + } +}; + +struct CNodeBuilder { + typedef CNode Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(CNode::VT_NAME, name); + } + void add_primitive(flatbuffers::Offset primitive) { + fbb_.AddOffset(CNode::VT_PRIMITIVE, primitive); + } + void add_inputIndex(flatbuffers::Offset> inputIndex) { + fbb_.AddOffset(CNode::VT_INPUTINDEX, inputIndex); + } + void add_outputIndex(flatbuffers::Offset> outputIndex) { + fbb_.AddOffset(CNode::VT_OUTPUTINDEX, outputIndex); + } + void add_quantType(mindspore::schema::QuantType quantType) { + fbb_.AddElement(CNode::VT_QUANTTYPE, static_cast(quantType), 0); + } + void add_deviceType(int32_t deviceType) { + fbb_.AddElement(CNode::VT_DEVICETYPE, deviceType, -1); + } + explicit CNodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCNode( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset primitive = 0, + flatbuffers::Offset> inputIndex = 0, + flatbuffers::Offset> outputIndex = 0, + mindspore::schema::QuantType quantType = mindspore::schema::QuantType_QUANT_NONE, + int32_t deviceType = -1) { + CNodeBuilder builder_(_fbb); + builder_.add_deviceType(deviceType); + builder_.add_quantType(quantType); + builder_.add_outputIndex(outputIndex); + builder_.add_inputIndex(inputIndex); + builder_.add_primitive(primitive); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCNodeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + flatbuffers::Offset primitive = 0, + const std::vector *inputIndex = nullptr, + const std::vector *outputIndex = nullptr, + mindspore::schema::QuantType quantType = mindspore::schema::QuantType_QUANT_NONE, + int32_t deviceType = -1) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto inputIndex__ = inputIndex ? _fbb.CreateVector(*inputIndex) : 0; + auto outputIndex__ = outputIndex ? _fbb.CreateVector(*outputIndex) : 0; + return mindspore::schema::CreateCNode( + _fbb, + name__, + primitive, + inputIndex__, + outputIndex__, + quantType, + deviceType); +} + +struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SubGraphBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_INPUTINDICES = 6, + VT_OUTPUTINDICES = 8, + VT_NODEINDICES = 10, + VT_TENSORINDICES = 12 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + const flatbuffers::Vector *inputIndices() const { + return GetPointer *>(VT_INPUTINDICES); + } + const flatbuffers::Vector *outputIndices() const { + return GetPointer *>(VT_OUTPUTINDICES); + } + const flatbuffers::Vector *nodeIndices() const { + return GetPointer *>(VT_NODEINDICES); + } + const flatbuffers::Vector *tensorIndices() const { + return GetPointer *>(VT_TENSORINDICES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_INPUTINDICES) && + verifier.VerifyVector(inputIndices()) && + VerifyOffset(verifier, VT_OUTPUTINDICES) && + verifier.VerifyVector(outputIndices()) && + VerifyOffset(verifier, VT_NODEINDICES) && + verifier.VerifyVector(nodeIndices()) && + VerifyOffset(verifier, VT_TENSORINDICES) && + verifier.VerifyVector(tensorIndices()) && + verifier.EndTable(); + } +}; + +struct SubGraphBuilder { + typedef SubGraph Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(SubGraph::VT_NAME, name); + } + void add_inputIndices(flatbuffers::Offset> inputIndices) { + fbb_.AddOffset(SubGraph::VT_INPUTINDICES, inputIndices); + } + void add_outputIndices(flatbuffers::Offset> outputIndices) { + fbb_.AddOffset(SubGraph::VT_OUTPUTINDICES, outputIndices); + } + void add_nodeIndices(flatbuffers::Offset> nodeIndices) { + fbb_.AddOffset(SubGraph::VT_NODEINDICES, nodeIndices); + } + void add_tensorIndices(flatbuffers::Offset> tensorIndices) { + fbb_.AddOffset(SubGraph::VT_TENSORINDICES, tensorIndices); + } + explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSubGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset> inputIndices = 0, + flatbuffers::Offset> outputIndices = 0, + flatbuffers::Offset> nodeIndices = 0, + flatbuffers::Offset> tensorIndices = 0) { + SubGraphBuilder builder_(_fbb); + builder_.add_tensorIndices(tensorIndices); + builder_.add_nodeIndices(nodeIndices); + builder_.add_outputIndices(outputIndices); + builder_.add_inputIndices(inputIndices); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSubGraphDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const std::vector *inputIndices = nullptr, + const std::vector *outputIndices = nullptr, + const std::vector *nodeIndices = nullptr, + const std::vector *tensorIndices = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto inputIndices__ = inputIndices ? _fbb.CreateVector(*inputIndices) : 0; + auto outputIndices__ = outputIndices ? _fbb.CreateVector(*outputIndices) : 0; + auto nodeIndices__ = nodeIndices ? _fbb.CreateVector(*nodeIndices) : 0; + auto tensorIndices__ = tensorIndices ? _fbb.CreateVector(*tensorIndices) : 0; + return mindspore::schema::CreateSubGraph( + _fbb, + name__, + inputIndices__, + outputIndices__, + nodeIndices__, + tensorIndices__); +} + +struct MetaGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MetaGraphBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_VERSION = 6, + VT_FMKTYPE = 8, + VT_INPUTINDEX = 10, + VT_OUTPUTINDEX = 12, + VT_MEMPOOLSIZE = 14, + VT_NODES = 16, + VT_ALLTENSORS = 18, + VT_SUBGRAPH = 20, + VT_OBFUSCATE = 22, + VT_OBFMETADATA = 24 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + const flatbuffers::String *version() const { + return GetPointer(VT_VERSION); + } + int32_t fmkType() const { + return GetField(VT_FMKTYPE, 0); + } + const flatbuffers::Vector *inputIndex() const { + return GetPointer *>(VT_INPUTINDEX); + } + const flatbuffers::Vector *outputIndex() const { + return GetPointer *>(VT_OUTPUTINDEX); + } + uint32_t mempoolSize() const { + return GetField(VT_MEMPOOLSIZE, 0); + } + const flatbuffers::Vector> *nodes() const { + return GetPointer> *>(VT_NODES); + } + const flatbuffers::Vector> *allTensors() const { + return GetPointer> *>(VT_ALLTENSORS); + } + const flatbuffers::Vector> *subGraph() const { + return GetPointer> *>(VT_SUBGRAPH); + } + bool obfuscate() const { + return GetField(VT_OBFUSCATE, 0) != 0; + } + const flatbuffers::Vector *obfMetaData() const { + return GetPointer *>(VT_OBFMETADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_VERSION) && + verifier.VerifyString(version()) && + VerifyField(verifier, VT_FMKTYPE) && + VerifyOffset(verifier, VT_INPUTINDEX) && + verifier.VerifyVector(inputIndex()) && + VerifyOffset(verifier, VT_OUTPUTINDEX) && + verifier.VerifyVector(outputIndex()) && + VerifyField(verifier, VT_MEMPOOLSIZE) && + VerifyOffset(verifier, VT_NODES) && + verifier.VerifyVector(nodes()) && + verifier.VerifyVectorOfTables(nodes()) && + VerifyOffset(verifier, VT_ALLTENSORS) && + verifier.VerifyVector(allTensors()) && + verifier.VerifyVectorOfTables(allTensors()) && + VerifyOffset(verifier, VT_SUBGRAPH) && + verifier.VerifyVector(subGraph()) && + verifier.VerifyVectorOfTables(subGraph()) && + VerifyField(verifier, VT_OBFUSCATE) && + VerifyOffset(verifier, VT_OBFMETADATA) && + verifier.VerifyVector(obfMetaData()) && + verifier.EndTable(); + } +}; + +struct MetaGraphBuilder { + typedef MetaGraph Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(MetaGraph::VT_NAME, name); + } + void add_version(flatbuffers::Offset version) { + fbb_.AddOffset(MetaGraph::VT_VERSION, version); + } + void add_fmkType(int32_t fmkType) { + fbb_.AddElement(MetaGraph::VT_FMKTYPE, fmkType, 0); + } + void add_inputIndex(flatbuffers::Offset> inputIndex) { + fbb_.AddOffset(MetaGraph::VT_INPUTINDEX, inputIndex); + } + void add_outputIndex(flatbuffers::Offset> outputIndex) { + fbb_.AddOffset(MetaGraph::VT_OUTPUTINDEX, outputIndex); + } + void add_mempoolSize(uint32_t mempoolSize) { + fbb_.AddElement(MetaGraph::VT_MEMPOOLSIZE, mempoolSize, 0); + } + void add_nodes(flatbuffers::Offset>> nodes) { + fbb_.AddOffset(MetaGraph::VT_NODES, nodes); + } + void add_allTensors(flatbuffers::Offset>> allTensors) { + fbb_.AddOffset(MetaGraph::VT_ALLTENSORS, allTensors); + } + void add_subGraph(flatbuffers::Offset>> subGraph) { + fbb_.AddOffset(MetaGraph::VT_SUBGRAPH, subGraph); + } + void add_obfuscate(bool obfuscate) { + fbb_.AddElement(MetaGraph::VT_OBFUSCATE, static_cast(obfuscate), 0); + } + void add_obfMetaData(flatbuffers::Offset> obfMetaData) { + fbb_.AddOffset(MetaGraph::VT_OBFMETADATA, obfMetaData); + } + explicit MetaGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMetaGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset version = 0, + int32_t fmkType = 0, + flatbuffers::Offset> inputIndex = 0, + flatbuffers::Offset> outputIndex = 0, + uint32_t mempoolSize = 0, + flatbuffers::Offset>> nodes = 0, + flatbuffers::Offset>> allTensors = 0, + flatbuffers::Offset>> subGraph = 0, + bool obfuscate = false, + flatbuffers::Offset> obfMetaData = 0) { + MetaGraphBuilder builder_(_fbb); + builder_.add_obfMetaData(obfMetaData); + builder_.add_subGraph(subGraph); + builder_.add_allTensors(allTensors); + builder_.add_nodes(nodes); + builder_.add_mempoolSize(mempoolSize); + builder_.add_outputIndex(outputIndex); + builder_.add_inputIndex(inputIndex); + builder_.add_fmkType(fmkType); + builder_.add_version(version); + builder_.add_name(name); + builder_.add_obfuscate(obfuscate); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMetaGraphDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *version = nullptr, + int32_t fmkType = 0, + const std::vector *inputIndex = nullptr, + const std::vector *outputIndex = nullptr, + uint32_t mempoolSize = 0, + const std::vector> *nodes = nullptr, + const std::vector> *allTensors = nullptr, + const std::vector> *subGraph = nullptr, + bool obfuscate = false, + const std::vector *obfMetaData = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto version__ = version ? _fbb.CreateString(version) : 0; + auto inputIndex__ = inputIndex ? _fbb.CreateVector(*inputIndex) : 0; + auto outputIndex__ = outputIndex ? _fbb.CreateVector(*outputIndex) : 0; + auto nodes__ = nodes ? _fbb.CreateVector>(*nodes) : 0; + auto allTensors__ = allTensors ? _fbb.CreateVector>(*allTensors) : 0; + auto subGraph__ = subGraph ? _fbb.CreateVector>(*subGraph) : 0; + auto obfMetaData__ = obfMetaData ? _fbb.CreateVector(*obfMetaData) : 0; + return mindspore::schema::CreateMetaGraph( + _fbb, + name__, + version__, + fmkType, + inputIndex__, + outputIndex__, + mempoolSize, + nodes__, + allTensors__, + subGraph__, + obfuscate, + obfMetaData__); +} + +inline const mindspore::schema::MetaGraph *GetMetaGraph(const void *buf) { + return flatbuffers::GetRoot(buf); +} + +inline const mindspore::schema::MetaGraph *GetSizePrefixedMetaGraph(const void *buf) { + return flatbuffers::GetSizePrefixedRoot(buf); +} + +inline const char *MetaGraphIdentifier() { + return "MSL2"; +} + +inline bool MetaGraphBufferHasIdentifier(const void *buf) { + return flatbuffers::BufferHasIdentifier( + buf, MetaGraphIdentifier()); +} + +inline bool VerifyMetaGraphBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(MetaGraphIdentifier()); +} + +inline bool VerifySizePrefixedMetaGraphBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(MetaGraphIdentifier()); +} + +inline const char *MetaGraphExtension() { + return "ms"; +} + +inline void FinishMetaGraphBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.Finish(root, MetaGraphIdentifier()); +} + +inline void FinishSizePrefixedMetaGraphBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root, MetaGraphIdentifier()); +} + +} // namespace schema +} // namespace mindspore + +#endif // FLATBUFFERS_GENERATED_MODEL_MINDSPORE_SCHEMA_H_ diff --git a/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/ops_generated.h b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/ops_generated.h new file mode 100644 index 0000000..19ecf9a --- /dev/null +++ b/application_example/android/classification/entry/src/main/cpp/minddata-lite/include/schema/ops_generated.h @@ -0,0 +1,13470 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_OPS_MINDSPORE_SCHEMA_H_ +#define FLATBUFFERS_GENERATED_OPS_MINDSPORE_SCHEMA_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "ops_types_generated.h" + +namespace mindspore { +namespace schema { + +struct Abs; +struct AbsBuilder; + +struct Activation; +struct ActivationBuilder; + +struct ActivationGrad; +struct ActivationGradBuilder; + +struct Adam; +struct AdamBuilder; + +struct AddFusion; +struct AddFusionBuilder; + +struct AdderFusion; +struct AdderFusionBuilder; + +struct AddGrad; +struct AddGradBuilder; + +struct AddN; +struct AddNBuilder; + +struct All; +struct AllBuilder; + +struct ApplyMomentum; +struct ApplyMomentumBuilder; + +struct ArgMaxFusion; +struct ArgMaxFusionBuilder; + +struct ArgMinFusion; +struct ArgMinFusionBuilder; + +struct Assert; +struct AssertBuilder; + +struct Assign; +struct AssignBuilder; + +struct AssignAdd; +struct AssignAddBuilder; + +struct AudioSpectrogram; +struct AudioSpectrogramBuilder; + +struct AvgPoolFusion; +struct AvgPoolFusionBuilder; + +struct AvgPoolGrad; +struct AvgPoolGradBuilder; + +struct BatchNorm; +struct BatchNormBuilder; + +struct BatchNormGrad; +struct BatchNormGradBuilder; + +struct BatchToSpace; +struct BatchToSpaceBuilder; + +struct BatchToSpaceND; +struct BatchToSpaceNDBuilder; + +struct BiasAdd; +struct BiasAddBuilder; + +struct BinaryCrossEntropy; +struct BinaryCrossEntropyBuilder; + +struct BinaryCrossEntropyGrad; +struct BinaryCrossEntropyGradBuilder; + +struct BiasAddGrad; +struct BiasAddGradBuilder; + +struct BroadcastTo; +struct BroadcastToBuilder; + +struct Cast; +struct CastBuilder; + +struct Ceil; +struct CeilBuilder; + +struct Clip; +struct ClipBuilder; + +struct Concat; +struct ConcatBuilder; + +struct Attention; +struct AttentionBuilder; + +struct Conv2DBackpropFilterFusion; +struct Conv2DBackpropFilterFusionBuilder; + +struct Conv2DBackpropInputFusion; +struct Conv2DBackpropInputFusionBuilder; + +struct Conv2DFusion; +struct Conv2DFusionBuilder; + +struct Conv2dTransposeFusion; +struct Conv2dTransposeFusionBuilder; + +struct Cos; +struct CosBuilder; + +struct ConstantOfShape; +struct ConstantOfShapeBuilder; + +struct Crop; +struct CropBuilder; + +struct CustomExtractFeatures; +struct CustomExtractFeaturesBuilder; + +struct CustomNormalize; +struct CustomNormalizeBuilder; + +struct CustomPredict; +struct CustomPredictBuilder; + +struct DeConv2DGradFilter; +struct DeConv2DGradFilterBuilder; + +struct Depend; +struct DependBuilder; + +struct DepthToSpace; +struct DepthToSpaceBuilder; + +struct DetectionPostProcess; +struct DetectionPostProcessBuilder; + +struct DivFusion; +struct DivFusionBuilder; + +struct DivGrad; +struct DivGradBuilder; + +struct Dropout; +struct DropoutBuilder; + +struct DropoutGrad; +struct DropoutGradBuilder; + +struct Elu; +struct EluBuilder; + +struct Eltwise; +struct EltwiseBuilder; + +struct Equal; +struct EqualBuilder; + +struct EmbeddingLookupFusion; +struct EmbeddingLookupFusionBuilder; + +struct ExpFusion; +struct ExpFusionBuilder; + +struct ExpandDims; +struct ExpandDimsBuilder; + +struct FakeQuantWithMinMaxVars; +struct FakeQuantWithMinMaxVarsBuilder; + +struct FakeQuantWithMinMaxVarsPerChannel; +struct FakeQuantWithMinMaxVarsPerChannelBuilder; + +struct FftReal; +struct FftRealBuilder; + +struct FftImag; +struct FftImagBuilder; + +struct Flatten; +struct FlattenBuilder; + +struct FlattenGrad; +struct FlattenGradBuilder; + +struct Floor; +struct FloorBuilder; + +struct FloorDiv; +struct FloorDivBuilder; + +struct FloorMod; +struct FloorModBuilder; + +struct Fill; +struct FillBuilder; + +struct FullConnection; +struct FullConnectionBuilder; + +struct FusedBatchNorm; +struct FusedBatchNormBuilder; + +struct Gather; +struct GatherBuilder; + +struct GatherNd; +struct GatherNdBuilder; + +struct Greater; +struct GreaterBuilder; + +struct GreaterEqual; +struct GreaterEqualBuilder; + +struct HashtableLookup; +struct HashtableLookupBuilder; + +struct InstanceNorm; +struct InstanceNormBuilder; + +struct LayerNormFusion; +struct LayerNormFusionBuilder; + +struct LeakyRelu; +struct LeakyReluBuilder; + +struct Less; +struct LessBuilder; + +struct LessEqual; +struct LessEqualBuilder; + +struct Log; +struct LogBuilder; + +struct LogGrad; +struct LogGradBuilder; + +struct LogicalAnd; +struct LogicalAndBuilder; + +struct LogicalNot; +struct LogicalNotBuilder; + +struct LogicalOr; +struct LogicalOrBuilder; + +struct LpNormalization; +struct LpNormalizationBuilder; + +struct LRN; +struct LRNBuilder; + +struct LshProjection; +struct LshProjectionBuilder; + +struct LSTM; +struct LSTMBuilder; + +struct LSTMGrad; +struct LSTMGradBuilder; + +struct L2NormalizeFusion; +struct L2NormalizeFusionBuilder; + +struct MatMul; +struct MatMulBuilder; + +struct Maximum; +struct MaximumBuilder; + +struct MaximumGrad; +struct MaximumGradBuilder; + +struct MaxPoolFusion; +struct MaxPoolFusionBuilder; + +struct MaxPoolGrad; +struct MaxPoolGradBuilder; + +struct Merge; +struct MergeBuilder; + +struct Mfcc; +struct MfccBuilder; + +struct Minimum; +struct MinimumBuilder; + +struct MinimumGrad; +struct MinimumGradBuilder; + +struct Mod; +struct ModBuilder; + +struct MulFusion; +struct MulFusionBuilder; + +struct MulGrad; +struct MulGradBuilder; + +struct Neg; +struct NegBuilder; + +struct NegGrad; +struct NegGradBuilder; + +struct NotEqual; +struct NotEqualBuilder; + +struct NonMaxSuppression; +struct NonMaxSuppressionBuilder; + +struct OneHot; +struct OneHotBuilder; + +struct OnesLike; +struct OnesLikeBuilder; + +struct PadFusion; +struct PadFusionBuilder; + +struct PartialFusion; +struct PartialFusionBuilder; + +struct PowerGrad; +struct PowerGradBuilder; + +struct PowFusion; +struct PowFusionBuilder; + +struct PriorBox; +struct PriorBoxBuilder; + +struct PReLUFusion; +struct PReLUFusionBuilder; + +struct Rank; +struct RankBuilder; + +struct Range; +struct RangeBuilder; + +struct Reciprocal; +struct ReciprocalBuilder; + +struct RealDiv; +struct RealDivBuilder; + +struct ReduceFusion; +struct ReduceFusionBuilder; + +struct Reshape; +struct ReshapeBuilder; + +struct Resize; +struct ResizeBuilder; + +struct ReverseSequence; +struct ReverseSequenceBuilder; + +struct ReverseV2; +struct ReverseV2Builder; + +struct Rfft; +struct RfftBuilder; + +struct ROIPooling; +struct ROIPoolingBuilder; + +struct Round; +struct RoundBuilder; + +struct Rsqrt; +struct RsqrtBuilder; + +struct QuantDTypeCast; +struct QuantDTypeCastBuilder; + +struct ScaleFusion; +struct ScaleFusionBuilder; + +struct ScatterNd; +struct ScatterNdBuilder; + +struct SGD; +struct SGDBuilder; + +struct Shape; +struct ShapeBuilder; + +struct SigmoidCrossEntropyWithLogits; +struct SigmoidCrossEntropyWithLogitsBuilder; + +struct SigmoidCrossEntropyWithLogitsGrad; +struct SigmoidCrossEntropyWithLogitsGradBuilder; + +struct Sin; +struct SinBuilder; + +struct SkipGram; +struct SkipGramBuilder; + +struct SliceFusion; +struct SliceFusionBuilder; + +struct SmoothL1Loss; +struct SmoothL1LossBuilder; + +struct SmoothL1LossGrad; +struct SmoothL1LossGradBuilder; + +struct Softmax; +struct SoftmaxBuilder; + +struct SoftmaxCrossEntropyWithLogits; +struct SoftmaxCrossEntropyWithLogitsBuilder; + +struct SpaceToBatch; +struct SpaceToBatchBuilder; + +struct SpaceToBatchND; +struct SpaceToBatchNDBuilder; + +struct SpaceToDepth; +struct SpaceToDepthBuilder; + +struct SparseSoftmaxCrossEntropyWithLogits; +struct SparseSoftmaxCrossEntropyWithLogitsBuilder; + +struct SparseToDense; +struct SparseToDenseBuilder; + +struct Split; +struct SplitBuilder; + +struct Sqrt; +struct SqrtBuilder; + +struct Squeeze; +struct SqueezeBuilder; + +struct Square; +struct SquareBuilder; + +struct SquaredDifference; +struct SquaredDifferenceBuilder; + +struct Stack; +struct StackBuilder; + +struct StridedSlice; +struct StridedSliceBuilder; + +struct SubFusion; +struct SubFusionBuilder; + +struct SubGrad; +struct SubGradBuilder; + +struct Switch; +struct SwitchBuilder; + +struct TensorListFromTensor; +struct TensorListFromTensorBuilder; + +struct TensorListGetItem; +struct TensorListGetItemBuilder; + +struct TensorListReserve; +struct TensorListReserveBuilder; + +struct TensorListSetItem; +struct TensorListSetItemBuilder; + +struct TensorListStack; +struct TensorListStackBuilder; + +struct TileFusion; +struct TileFusionBuilder; + +struct TopKFusion; +struct TopKFusionBuilder; + +struct Transpose; +struct TransposeBuilder; + +struct Unique; +struct UniqueBuilder; + +struct UnsortedSegmentSum; +struct UnsortedSegmentSumBuilder; + +struct Unsqueeze; +struct UnsqueezeBuilder; + +struct Unstack; +struct UnstackBuilder; + +struct Where; +struct WhereBuilder; + +struct ZerosLike; +struct ZerosLikeBuilder; + +struct Select; +struct SelectBuilder; + +struct GRU; +struct GRUBuilder; + +struct NonZero; +struct NonZeroBuilder; + +struct InvertPermutation; +struct InvertPermutationBuilder; + +struct Size; +struct SizeBuilder; + +struct RandomStandardNormal; +struct RandomStandardNormalBuilder; + +struct CropAndResize; +struct CropAndResizeBuilder; + +struct Erf; +struct ErfBuilder; + +struct StridedSliceGrad; +struct StridedSliceGradBuilder; + +struct IsFinite; +struct IsFiniteBuilder; + +struct LinSpace; +struct LinSpaceBuilder; + +struct UniformReal; +struct UniformRealBuilder; + +struct AbsGrad; +struct AbsGradBuilder; + +struct RsqrtGrad; +struct RsqrtGradBuilder; + +struct SqrtGrad; +struct SqrtGradBuilder; + +struct LayerNormGrad; +struct LayerNormGradBuilder; + +struct ResizeGrad; +struct ResizeGradBuilder; + +struct Splice; +struct SpliceBuilder; + +struct LogSoftmax; +struct LogSoftmaxBuilder; + +struct Call; +struct CallBuilder; + +struct CumSum; +struct CumSumBuilder; + +struct Custom; +struct CustomBuilder; + +struct SplitWithOverlap; +struct SplitWithOverlapBuilder; + +struct GenOP; +struct GenOPBuilder; + +struct RaggedRange; +struct RaggedRangeBuilder; + +struct GLU; +struct GLUBuilder; + +struct TensorArray; +struct TensorArrayBuilder; + +struct TensorArrayRead; +struct TensorArrayReadBuilder; + +struct TensorArrayWrite; +struct TensorArrayWriteBuilder; + +struct Affine; +struct AffineBuilder; + +struct ScatterNdUpdate; +struct ScatterNdUpdateBuilder; + +enum PrimitiveType : uint8_t { + PrimitiveType_NONE = 0, + PrimitiveType_Abs = 1, + PrimitiveType_Activation = 2, + PrimitiveType_ActivationGrad = 3, + PrimitiveType_Adam = 4, + PrimitiveType_AddFusion = 5, + PrimitiveType_AdderFusion = 6, + PrimitiveType_AddGrad = 7, + PrimitiveType_AddN = 8, + PrimitiveType_All = 9, + PrimitiveType_ApplyMomentum = 10, + PrimitiveType_ArgMaxFusion = 11, + PrimitiveType_ArgMinFusion = 12, + PrimitiveType_Assert = 13, + PrimitiveType_Assign = 14, + PrimitiveType_AssignAdd = 15, + PrimitiveType_AudioSpectrogram = 16, + PrimitiveType_AvgPoolFusion = 17, + PrimitiveType_AvgPoolGrad = 18, + PrimitiveType_BatchNorm = 19, + PrimitiveType_BatchNormGrad = 20, + PrimitiveType_BatchToSpace = 21, + PrimitiveType_BatchToSpaceND = 22, + PrimitiveType_BiasAdd = 23, + PrimitiveType_BinaryCrossEntropy = 24, + PrimitiveType_BinaryCrossEntropyGrad = 25, + PrimitiveType_BiasAddGrad = 26, + PrimitiveType_BroadcastTo = 27, + PrimitiveType_Cast = 28, + PrimitiveType_Ceil = 29, + PrimitiveType_Clip = 30, + PrimitiveType_Concat = 31, + PrimitiveType_Attention = 32, + PrimitiveType_Conv2DBackpropFilterFusion = 33, + PrimitiveType_Conv2DBackpropInputFusion = 34, + PrimitiveType_Conv2DFusion = 35, + PrimitiveType_Conv2dTransposeFusion = 36, + PrimitiveType_Cos = 37, + PrimitiveType_ConstantOfShape = 38, + PrimitiveType_Crop = 39, + PrimitiveType_CustomExtractFeatures = 40, + PrimitiveType_CustomNormalize = 41, + PrimitiveType_CustomPredict = 42, + PrimitiveType_DeConv2DGradFilter = 43, + PrimitiveType_Depend = 44, + PrimitiveType_DepthToSpace = 45, + PrimitiveType_DetectionPostProcess = 46, + PrimitiveType_DivFusion = 47, + PrimitiveType_DivGrad = 48, + PrimitiveType_Dropout = 49, + PrimitiveType_DropoutGrad = 50, + PrimitiveType_Elu = 51, + PrimitiveType_Eltwise = 52, + PrimitiveType_Equal = 53, + PrimitiveType_EmbeddingLookupFusion = 54, + PrimitiveType_ExpFusion = 55, + PrimitiveType_ExpandDims = 56, + PrimitiveType_FakeQuantWithMinMaxVars = 57, + PrimitiveType_FakeQuantWithMinMaxVarsPerChannel = 58, + PrimitiveType_FftReal = 59, + PrimitiveType_FftImag = 60, + PrimitiveType_Flatten = 61, + PrimitiveType_FlattenGrad = 62, + PrimitiveType_Floor = 63, + PrimitiveType_FloorDiv = 64, + PrimitiveType_FloorMod = 65, + PrimitiveType_Fill = 66, + PrimitiveType_FullConnection = 67, + PrimitiveType_FusedBatchNorm = 68, + PrimitiveType_Gather = 69, + PrimitiveType_GatherNd = 70, + PrimitiveType_Greater = 71, + PrimitiveType_GreaterEqual = 72, + PrimitiveType_HashtableLookup = 73, + PrimitiveType_InstanceNorm = 74, + PrimitiveType_LayerNormFusion = 75, + PrimitiveType_LeakyRelu = 76, + PrimitiveType_Less = 77, + PrimitiveType_LessEqual = 78, + PrimitiveType_Log = 79, + PrimitiveType_LogGrad = 80, + PrimitiveType_LogicalAnd = 81, + PrimitiveType_LogicalNot = 82, + PrimitiveType_LogicalOr = 83, + PrimitiveType_LpNormalization = 84, + PrimitiveType_LRN = 85, + PrimitiveType_LshProjection = 86, + PrimitiveType_LSTM = 87, + PrimitiveType_L2NormalizeFusion = 88, + PrimitiveType_MatMul = 89, + PrimitiveType_Maximum = 90, + PrimitiveType_MaximumGrad = 91, + PrimitiveType_MaxPoolFusion = 92, + PrimitiveType_MaxPoolGrad = 93, + PrimitiveType_Merge = 94, + PrimitiveType_Mfcc = 95, + PrimitiveType_Minimum = 96, + PrimitiveType_MinimumGrad = 97, + PrimitiveType_Mod = 98, + PrimitiveType_MulFusion = 99, + PrimitiveType_MulGrad = 100, + PrimitiveType_Neg = 101, + PrimitiveType_NegGrad = 102, + PrimitiveType_NotEqual = 103, + PrimitiveType_NonMaxSuppression = 104, + PrimitiveType_OneHot = 105, + PrimitiveType_OnesLike = 106, + PrimitiveType_PadFusion = 107, + PrimitiveType_PartialFusion = 108, + PrimitiveType_PowerGrad = 109, + PrimitiveType_PowFusion = 110, + PrimitiveType_PriorBox = 111, + PrimitiveType_PReLUFusion = 112, + PrimitiveType_QuantDTypeCast = 113, + PrimitiveType_Rank = 114, + PrimitiveType_Range = 115, + PrimitiveType_Reciprocal = 116, + PrimitiveType_RealDiv = 117, + PrimitiveType_ReduceFusion = 118, + PrimitiveType_Reshape = 119, + PrimitiveType_Resize = 120, + PrimitiveType_ReverseSequence = 121, + PrimitiveType_ReverseV2 = 122, + PrimitiveType_Rfft = 123, + PrimitiveType_ROIPooling = 124, + PrimitiveType_Round = 125, + PrimitiveType_Rsqrt = 126, + PrimitiveType_ScaleFusion = 127, + PrimitiveType_ScatterNd = 128, + PrimitiveType_SGD = 129, + PrimitiveType_Shape = 130, + PrimitiveType_SigmoidCrossEntropyWithLogits = 131, + PrimitiveType_SigmoidCrossEntropyWithLogitsGrad = 132, + PrimitiveType_Sin = 133, + PrimitiveType_SkipGram = 134, + PrimitiveType_SliceFusion = 135, + PrimitiveType_SmoothL1Loss = 136, + PrimitiveType_SmoothL1LossGrad = 137, + PrimitiveType_Softmax = 138, + PrimitiveType_SoftmaxCrossEntropyWithLogits = 139, + PrimitiveType_SpaceToBatch = 140, + PrimitiveType_SpaceToBatchND = 141, + PrimitiveType_SpaceToDepth = 142, + PrimitiveType_SparseSoftmaxCrossEntropyWithLogits = 143, + PrimitiveType_SparseToDense = 144, + PrimitiveType_Split = 145, + PrimitiveType_Sqrt = 146, + PrimitiveType_Squeeze = 147, + PrimitiveType_Square = 148, + PrimitiveType_SquaredDifference = 149, + PrimitiveType_Stack = 150, + PrimitiveType_StridedSlice = 151, + PrimitiveType_SubFusion = 152, + PrimitiveType_SubGrad = 153, + PrimitiveType_Switch = 154, + PrimitiveType_TensorListFromTensor = 155, + PrimitiveType_TensorListGetItem = 156, + PrimitiveType_TensorListReserve = 157, + PrimitiveType_TensorListSetItem = 158, + PrimitiveType_TensorListStack = 159, + PrimitiveType_TileFusion = 160, + PrimitiveType_TopKFusion = 161, + PrimitiveType_Transpose = 162, + PrimitiveType_Unique = 163, + PrimitiveType_UnsortedSegmentSum = 164, + PrimitiveType_Unsqueeze = 165, + PrimitiveType_Unstack = 166, + PrimitiveType_LSTMGrad = 167, + PrimitiveType_Where = 168, + PrimitiveType_ZerosLike = 169, + PrimitiveType_Select = 170, + PrimitiveType_ScatterNdUpdate = 171, + PrimitiveType_GRU = 172, + PrimitiveType_NonZero = 173, + PrimitiveType_InvertPermutation = 174, + PrimitiveType_Size = 175, + PrimitiveType_RandomStandardNormal = 176, + PrimitiveType_CropAndResize = 177, + PrimitiveType_Erf = 178, + PrimitiveType_StridedSliceGrad = 179, + PrimitiveType_IsFinite = 180, + PrimitiveType_LinSpace = 181, + PrimitiveType_UniformReal = 182, + PrimitiveType_AbsGrad = 183, + PrimitiveType_RsqrtGrad = 184, + PrimitiveType_SqrtGrad = 185, + PrimitiveType_LayerNormGrad = 186, + PrimitiveType_ResizeGrad = 187, + PrimitiveType_Splice = 188, + PrimitiveType_LogSoftmax = 189, + PrimitiveType_Call = 190, + PrimitiveType_Custom = 191, + PrimitiveType_CumSum = 192, + PrimitiveType_SplitWithOverlap = 193, + PrimitiveType_GenOP = 194, + PrimitiveType_RaggedRange = 195, + PrimitiveType_GLU = 196, + PrimitiveType_TensorArray = 197, + PrimitiveType_TensorArrayRead = 198, + PrimitiveType_TensorArrayWrite = 199, + PrimitiveType_Affine = 200, + PrimitiveType_MIN = PrimitiveType_NONE, + PrimitiveType_MAX = PrimitiveType_Affine +}; + +inline const PrimitiveType (&EnumValuesPrimitiveType())[201] { + static const PrimitiveType values[] = { + PrimitiveType_NONE, + PrimitiveType_Abs, + PrimitiveType_Activation, + PrimitiveType_ActivationGrad, + PrimitiveType_Adam, + PrimitiveType_AddFusion, + PrimitiveType_AdderFusion, + PrimitiveType_AddGrad, + PrimitiveType_AddN, + PrimitiveType_All, + PrimitiveType_ApplyMomentum, + PrimitiveType_ArgMaxFusion, + PrimitiveType_ArgMinFusion, + PrimitiveType_Assert, + PrimitiveType_Assign, + PrimitiveType_AssignAdd, + PrimitiveType_AudioSpectrogram, + PrimitiveType_AvgPoolFusion, + PrimitiveType_AvgPoolGrad, + PrimitiveType_BatchNorm, + PrimitiveType_BatchNormGrad, + PrimitiveType_BatchToSpace, + PrimitiveType_BatchToSpaceND, + PrimitiveType_BiasAdd, + PrimitiveType_BinaryCrossEntropy, + PrimitiveType_BinaryCrossEntropyGrad, + PrimitiveType_BiasAddGrad, + PrimitiveType_BroadcastTo, + PrimitiveType_Cast, + PrimitiveType_Ceil, + PrimitiveType_Clip, + PrimitiveType_Concat, + PrimitiveType_Attention, + PrimitiveType_Conv2DBackpropFilterFusion, + PrimitiveType_Conv2DBackpropInputFusion, + PrimitiveType_Conv2DFusion, + PrimitiveType_Conv2dTransposeFusion, + PrimitiveType_Cos, + PrimitiveType_ConstantOfShape, + PrimitiveType_Crop, + PrimitiveType_CustomExtractFeatures, + PrimitiveType_CustomNormalize, + PrimitiveType_CustomPredict, + PrimitiveType_DeConv2DGradFilter, + PrimitiveType_Depend, + PrimitiveType_DepthToSpace, + PrimitiveType_DetectionPostProcess, + PrimitiveType_DivFusion, + PrimitiveType_DivGrad, + PrimitiveType_Dropout, + PrimitiveType_DropoutGrad, + PrimitiveType_Elu, + PrimitiveType_Eltwise, + PrimitiveType_Equal, + PrimitiveType_EmbeddingLookupFusion, + PrimitiveType_ExpFusion, + PrimitiveType_ExpandDims, + PrimitiveType_FakeQuantWithMinMaxVars, + PrimitiveType_FakeQuantWithMinMaxVarsPerChannel, + PrimitiveType_FftReal, + PrimitiveType_FftImag, + PrimitiveType_Flatten, + PrimitiveType_FlattenGrad, + PrimitiveType_Floor, + PrimitiveType_FloorDiv, + PrimitiveType_FloorMod, + PrimitiveType_Fill, + PrimitiveType_FullConnection, + PrimitiveType_FusedBatchNorm, + PrimitiveType_Gather, + PrimitiveType_GatherNd, + PrimitiveType_Greater, + PrimitiveType_GreaterEqual, + PrimitiveType_HashtableLookup, + PrimitiveType_InstanceNorm, + PrimitiveType_LayerNormFusion, + PrimitiveType_LeakyRelu, + PrimitiveType_Less, + PrimitiveType_LessEqual, + PrimitiveType_Log, + PrimitiveType_LogGrad, + PrimitiveType_LogicalAnd, + PrimitiveType_LogicalNot, + PrimitiveType_LogicalOr, + PrimitiveType_LpNormalization, + PrimitiveType_LRN, + PrimitiveType_LshProjection, + PrimitiveType_LSTM, + PrimitiveType_L2NormalizeFusion, + PrimitiveType_MatMul, + PrimitiveType_Maximum, + PrimitiveType_MaximumGrad, + PrimitiveType_MaxPoolFusion, + PrimitiveType_MaxPoolGrad, + PrimitiveType_Merge, + PrimitiveType_Mfcc, + PrimitiveType_Minimum, + PrimitiveType_MinimumGrad, + PrimitiveType_Mod, + PrimitiveType_MulFusion, + PrimitiveType_MulGrad, + PrimitiveType_Neg, + PrimitiveType_NegGrad, + PrimitiveType_NotEqual, + PrimitiveType_NonMaxSuppression, + PrimitiveType_OneHot, + PrimitiveType_OnesLike, + PrimitiveType_PadFusion, + PrimitiveType_PartialFusion, + PrimitiveType_PowerGrad, + PrimitiveType_PowFusion, + PrimitiveType_PriorBox, + PrimitiveType_PReLUFusion, + PrimitiveType_QuantDTypeCast, + PrimitiveType_Rank, + PrimitiveType_Range, + PrimitiveType_Reciprocal, + PrimitiveType_RealDiv, + PrimitiveType_ReduceFusion, + PrimitiveType_Reshape, + PrimitiveType_Resize, + PrimitiveType_ReverseSequence, + PrimitiveType_ReverseV2, + PrimitiveType_Rfft, + PrimitiveType_ROIPooling, + PrimitiveType_Round, + PrimitiveType_Rsqrt, + PrimitiveType_ScaleFusion, + PrimitiveType_ScatterNd, + PrimitiveType_SGD, + PrimitiveType_Shape, + PrimitiveType_SigmoidCrossEntropyWithLogits, + PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, + PrimitiveType_Sin, + PrimitiveType_SkipGram, + PrimitiveType_SliceFusion, + PrimitiveType_SmoothL1Loss, + PrimitiveType_SmoothL1LossGrad, + PrimitiveType_Softmax, + PrimitiveType_SoftmaxCrossEntropyWithLogits, + PrimitiveType_SpaceToBatch, + PrimitiveType_SpaceToBatchND, + PrimitiveType_SpaceToDepth, + PrimitiveType_SparseSoftmaxCrossEntropyWithLogits, + PrimitiveType_SparseToDense, + PrimitiveType_Split, + PrimitiveType_Sqrt, + PrimitiveType_Squeeze, + PrimitiveType_Square, + PrimitiveType_SquaredDifference, + PrimitiveType_Stack, + PrimitiveType_StridedSlice, + PrimitiveType_SubFusion, + PrimitiveType_SubGrad, + PrimitiveType_Switch, + PrimitiveType_TensorListFromTensor, + PrimitiveType_TensorListGetItem, + PrimitiveType_TensorListReserve, + PrimitiveType_TensorListSetItem, + PrimitiveType_TensorListStack, + PrimitiveType_TileFusion, + PrimitiveType_TopKFusion, + PrimitiveType_Transpose, + PrimitiveType_Unique, + PrimitiveType_UnsortedSegmentSum, + PrimitiveType_Unsqueeze, + PrimitiveType_Unstack, + PrimitiveType_LSTMGrad, + PrimitiveType_Where, + PrimitiveType_ZerosLike, + PrimitiveType_Select, + PrimitiveType_ScatterNdUpdate, + PrimitiveType_GRU, + PrimitiveType_NonZero, + PrimitiveType_InvertPermutation, + PrimitiveType_Size, + PrimitiveType_RandomStandardNormal, + PrimitiveType_CropAndResize, + PrimitiveType_Erf, + PrimitiveType_StridedSliceGrad, + PrimitiveType_IsFinite, + PrimitiveType_LinSpace, + PrimitiveType_UniformReal, + PrimitiveType_AbsGrad, + PrimitiveType_RsqrtGrad, + PrimitiveType_SqrtGrad, + PrimitiveType_LayerNormGrad, + PrimitiveType_ResizeGrad, + PrimitiveType_Splice, + PrimitiveType_LogSoftmax, + PrimitiveType_Call, + PrimitiveType_Custom, + PrimitiveType_CumSum, + PrimitiveType_SplitWithOverlap, + PrimitiveType_GenOP, + PrimitiveType_RaggedRange, + PrimitiveType_GLU, + PrimitiveType_TensorArray, + PrimitiveType_TensorArrayRead, + PrimitiveType_TensorArrayWrite, + PrimitiveType_Affine + }; + return values; +} + +inline const char * const *EnumNamesPrimitiveType() { + static const char * const names[202] = { + "NONE", + "Abs", + "Activation", + "ActivationGrad", + "Adam", + "AddFusion", + "AdderFusion", + "AddGrad", + "AddN", + "All", + "ApplyMomentum", + "ArgMaxFusion", + "ArgMinFusion", + "Assert", + "Assign", + "AssignAdd", + "AudioSpectrogram", + "AvgPoolFusion", + "AvgPoolGrad", + "BatchNorm", + "BatchNormGrad", + "BatchToSpace", + "BatchToSpaceND", + "BiasAdd", + "BinaryCrossEntropy", + "BinaryCrossEntropyGrad", + "BiasAddGrad", + "BroadcastTo", + "Cast", + "Ceil", + "Clip", + "Concat", + "Attention", + "Conv2DBackpropFilterFusion", + "Conv2DBackpropInputFusion", + "Conv2DFusion", + "Conv2dTransposeFusion", + "Cos", + "ConstantOfShape", + "Crop", + "CustomExtractFeatures", + "CustomNormalize", + "CustomPredict", + "DeConv2DGradFilter", + "Depend", + "DepthToSpace", + "DetectionPostProcess", + "DivFusion", + "DivGrad", + "Dropout", + "DropoutGrad", + "Elu", + "Eltwise", + "Equal", + "EmbeddingLookupFusion", + "ExpFusion", + "ExpandDims", + "FakeQuantWithMinMaxVars", + "FakeQuantWithMinMaxVarsPerChannel", + "FftReal", + "FftImag", + "Flatten", + "FlattenGrad", + "Floor", + "FloorDiv", + "FloorMod", + "Fill", + "FullConnection", + "FusedBatchNorm", + "Gather", + "GatherNd", + "Greater", + "GreaterEqual", + "HashtableLookup", + "InstanceNorm", + "LayerNormFusion", + "LeakyRelu", + "Less", + "LessEqual", + "Log", + "LogGrad", + "LogicalAnd", + "LogicalNot", + "LogicalOr", + "LpNormalization", + "LRN", + "LshProjection", + "LSTM", + "L2NormalizeFusion", + "MatMul", + "Maximum", + "MaximumGrad", + "MaxPoolFusion", + "MaxPoolGrad", + "Merge", + "Mfcc", + "Minimum", + "MinimumGrad", + "Mod", + "MulFusion", + "MulGrad", + "Neg", + "NegGrad", + "NotEqual", + "NonMaxSuppression", + "OneHot", + "OnesLike", + "PadFusion", + "PartialFusion", + "PowerGrad", + "PowFusion", + "PriorBox", + "PReLUFusion", + "QuantDTypeCast", + "Rank", + "Range", + "Reciprocal", + "RealDiv", + "ReduceFusion", + "Reshape", + "Resize", + "ReverseSequence", + "ReverseV2", + "Rfft", + "ROIPooling", + "Round", + "Rsqrt", + "ScaleFusion", + "ScatterNd", + "SGD", + "Shape", + "SigmoidCrossEntropyWithLogits", + "SigmoidCrossEntropyWithLogitsGrad", + "Sin", + "SkipGram", + "SliceFusion", + "SmoothL1Loss", + "SmoothL1LossGrad", + "Softmax", + "SoftmaxCrossEntropyWithLogits", + "SpaceToBatch", + "SpaceToBatchND", + "SpaceToDepth", + "SparseSoftmaxCrossEntropyWithLogits", + "SparseToDense", + "Split", + "Sqrt", + "Squeeze", + "Square", + "SquaredDifference", + "Stack", + "StridedSlice", + "SubFusion", + "SubGrad", + "Switch", + "TensorListFromTensor", + "TensorListGetItem", + "TensorListReserve", + "TensorListSetItem", + "TensorListStack", + "TileFusion", + "TopKFusion", + "Transpose", + "Unique", + "UnsortedSegmentSum", + "Unsqueeze", + "Unstack", + "LSTMGrad", + "Where", + "ZerosLike", + "Select", + "ScatterNdUpdate", + "GRU", + "NonZero", + "InvertPermutation", + "Size", + "RandomStandardNormal", + "CropAndResize", + "Erf", + "StridedSliceGrad", + "IsFinite", + "LinSpace", + "UniformReal", + "AbsGrad", + "RsqrtGrad", + "SqrtGrad", + "LayerNormGrad", + "ResizeGrad", + "Splice", + "LogSoftmax", + "Call", + "Custom", + "CumSum", + "SplitWithOverlap", + "GenOP", + "RaggedRange", + "GLU", + "TensorArray", + "TensorArrayRead", + "TensorArrayWrite", + "Affine", + nullptr + }; + return names; +} + +inline const char *EnumNamePrimitiveType(PrimitiveType e) { + if (flatbuffers::IsOutRange(e, PrimitiveType_NONE, PrimitiveType_Affine)) return ""; + const size_t index = static_cast(e); + return EnumNamesPrimitiveType()[index]; +} + +template struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NONE; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Abs; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Activation; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ActivationGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Adam; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AddFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AdderFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AddGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AddN; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_All; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ApplyMomentum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ArgMaxFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ArgMinFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Assert; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Assign; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AssignAdd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AudioSpectrogram; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AvgPoolFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AvgPoolGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BatchNorm; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BatchNormGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BatchToSpace; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BatchToSpaceND; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BiasAdd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BinaryCrossEntropy; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BinaryCrossEntropyGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BiasAddGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BroadcastTo; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Cast; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Ceil; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Clip; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Concat; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Attention; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Conv2DBackpropFilterFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Conv2DBackpropInputFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Conv2DFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Conv2dTransposeFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Cos; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ConstantOfShape; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Crop; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CustomExtractFeatures; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CustomNormalize; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CustomPredict; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DeConv2DGradFilter; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Depend; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DepthToSpace; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DetectionPostProcess; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DivFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DivGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Dropout; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DropoutGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Elu; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Eltwise; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Equal; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_EmbeddingLookupFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ExpFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ExpandDims; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FakeQuantWithMinMaxVars; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FakeQuantWithMinMaxVarsPerChannel; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FftReal; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FftImag; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Flatten; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FlattenGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Floor; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FloorDiv; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FloorMod; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Fill; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FullConnection; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FusedBatchNorm; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Gather; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GatherNd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Greater; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GreaterEqual; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_HashtableLookup; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_InstanceNorm; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LayerNormFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LeakyRelu; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Less; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LessEqual; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Log; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LogGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LogicalAnd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LogicalNot; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LogicalOr; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LpNormalization; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LRN; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LshProjection; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LSTM; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_L2NormalizeFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MatMul; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Maximum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MaximumGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MaxPoolFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MaxPoolGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Merge; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Mfcc; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Minimum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MinimumGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Mod; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MulFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MulGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Neg; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NegGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NotEqual; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NonMaxSuppression; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_OneHot; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_OnesLike; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_PadFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_PartialFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_PowerGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_PowFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_PriorBox; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_PReLUFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_QuantDTypeCast; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Rank; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Range; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Reciprocal; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_RealDiv; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ReduceFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Reshape; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Resize; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ReverseSequence; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ReverseV2; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Rfft; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ROIPooling; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Round; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Rsqrt; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ScaleFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ScatterNd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SGD; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Shape; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SigmoidCrossEntropyWithLogits; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SigmoidCrossEntropyWithLogitsGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Sin; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SkipGram; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SliceFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SmoothL1Loss; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SmoothL1LossGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Softmax; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SoftmaxCrossEntropyWithLogits; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SpaceToBatch; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SpaceToBatchND; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SpaceToDepth; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SparseSoftmaxCrossEntropyWithLogits; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SparseToDense; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Split; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Sqrt; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Squeeze; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Square; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SquaredDifference; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Stack; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_StridedSlice; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SubFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SubGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Switch; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorListFromTensor; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorListGetItem; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorListReserve; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorListSetItem; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorListStack; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TileFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TopKFusion; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Transpose; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Unique; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_UnsortedSegmentSum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Unsqueeze; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Unstack; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LSTMGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Where; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ZerosLike; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Select; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ScatterNdUpdate; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GRU; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NonZero; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_InvertPermutation; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Size; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_RandomStandardNormal; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CropAndResize; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Erf; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_StridedSliceGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_IsFinite; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LinSpace; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_UniformReal; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AbsGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_RsqrtGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SqrtGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LayerNormGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ResizeGrad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Splice; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LogSoftmax; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Call; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Custom; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CumSum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SplitWithOverlap; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GenOP; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_RaggedRange; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GLU; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorArray; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorArrayRead; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TensorArrayWrite; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Affine; +}; + +bool VerifyPrimitiveType(flatbuffers::Verifier &verifier, const void *obj, PrimitiveType type); +bool VerifyPrimitiveTypeVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); + +struct Abs FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AbsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AbsBuilder { + typedef Abs Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AbsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAbs( + flatbuffers::FlatBufferBuilder &_fbb) { + AbsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Activation FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ActivationBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATION_TYPE = 4, + VT_ALPHA = 6, + VT_MIN_VAL = 8, + VT_MAX_VAL = 10, + VT_APPROXIMATE = 12 + }; + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + float min_val() const { + return GetField(VT_MIN_VAL, 0.0f); + } + float max_val() const { + return GetField(VT_MAX_VAL, 0.0f); + } + bool approximate() const { + return GetField(VT_APPROXIMATE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + VerifyField(verifier, VT_ALPHA) && + VerifyField(verifier, VT_MIN_VAL) && + VerifyField(verifier, VT_MAX_VAL) && + VerifyField(verifier, VT_APPROXIMATE) && + verifier.EndTable(); + } +}; + +struct ActivationBuilder { + typedef Activation Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(Activation::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + void add_alpha(float alpha) { + fbb_.AddElement(Activation::VT_ALPHA, alpha, 0.0f); + } + void add_min_val(float min_val) { + fbb_.AddElement(Activation::VT_MIN_VAL, min_val, 0.0f); + } + void add_max_val(float max_val) { + fbb_.AddElement(Activation::VT_MAX_VAL, max_val, 0.0f); + } + void add_approximate(bool approximate) { + fbb_.AddElement(Activation::VT_APPROXIMATE, static_cast(approximate), 0); + } + explicit ActivationBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateActivation( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION, + float alpha = 0.0f, + float min_val = 0.0f, + float max_val = 0.0f, + bool approximate = false) { + ActivationBuilder builder_(_fbb); + builder_.add_max_val(max_val); + builder_.add_min_val(min_val); + builder_.add_alpha(alpha); + builder_.add_approximate(approximate); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct ActivationGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ActivationGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATION_TYPE = 4, + VT_ALPHA = 6 + }; + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + VerifyField(verifier, VT_ALPHA) && + verifier.EndTable(); + } +}; + +struct ActivationGradBuilder { + typedef ActivationGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(ActivationGrad::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + void add_alpha(float alpha) { + fbb_.AddElement(ActivationGrad::VT_ALPHA, alpha, 0.0f); + } + explicit ActivationGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateActivationGrad( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION, + float alpha = 0.0f) { + ActivationGradBuilder builder_(_fbb); + builder_.add_alpha(alpha); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct Adam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AdamBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_USE_LOCKING = 4, + VT_USE_NESTEROV = 6 + }; + bool use_locking() const { + return GetField(VT_USE_LOCKING, 0) != 0; + } + bool use_nesterov() const { + return GetField(VT_USE_NESTEROV, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_USE_LOCKING) && + VerifyField(verifier, VT_USE_NESTEROV) && + verifier.EndTable(); + } +}; + +struct AdamBuilder { + typedef Adam Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_use_locking(bool use_locking) { + fbb_.AddElement(Adam::VT_USE_LOCKING, static_cast(use_locking), 0); + } + void add_use_nesterov(bool use_nesterov) { + fbb_.AddElement(Adam::VT_USE_NESTEROV, static_cast(use_nesterov), 0); + } + explicit AdamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAdam( + flatbuffers::FlatBufferBuilder &_fbb, + bool use_locking = false, + bool use_nesterov = false) { + AdamBuilder builder_(_fbb); + builder_.add_use_nesterov(use_nesterov); + builder_.add_use_locking(use_locking); + return builder_.Finish(); +} + +struct AddFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AddFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATION_TYPE = 4 + }; + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct AddFusionBuilder { + typedef AddFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(AddFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit AddFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + AddFusionBuilder builder_(_fbb); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct AdderFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AdderFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_KERNEL_SIZE = 6, + VT_STRIDE = 8, + VT_DILATION = 10, + VT_PAD_MODE = 12, + VT_PAD_LIST = 14, + VT_GROUP = 16, + VT_IN_CHANNEL = 18, + VT_OUT_CHANNEL = 20, + VT_ACTIVATION_TYPE = 22 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *dilation() const { + return GetPointer *>(VT_DILATION); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + const flatbuffers::Vector *pad_list() const { + return GetPointer *>(VT_PAD_LIST); + } + int64_t group() const { + return GetField(VT_GROUP, 0); + } + int64_t in_channel() const { + return GetField(VT_IN_CHANNEL, 0); + } + int64_t out_channel() const { + return GetField(VT_OUT_CHANNEL, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyOffset(verifier, VT_PAD_LIST) && + verifier.VerifyVector(pad_list()) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_IN_CHANNEL) && + VerifyField(verifier, VT_OUT_CHANNEL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct AdderFusionBuilder { + typedef AdderFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(AdderFusion::VT_FORMAT, static_cast(format), 0); + } + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(AdderFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(AdderFusion::VT_STRIDE, stride); + } + void add_dilation(flatbuffers::Offset> dilation) { + fbb_.AddOffset(AdderFusion::VT_DILATION, dilation); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(AdderFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_pad_list(flatbuffers::Offset> pad_list) { + fbb_.AddOffset(AdderFusion::VT_PAD_LIST, pad_list); + } + void add_group(int64_t group) { + fbb_.AddElement(AdderFusion::VT_GROUP, group, 0); + } + void add_in_channel(int64_t in_channel) { + fbb_.AddElement(AdderFusion::VT_IN_CHANNEL, in_channel, 0); + } + void add_out_channel(int64_t out_channel) { + fbb_.AddElement(AdderFusion::VT_OUT_CHANNEL, out_channel, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(AdderFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit AdderFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAdderFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> dilation = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + flatbuffers::Offset> pad_list = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + AdderFusionBuilder builder_(_fbb); + builder_.add_out_channel(out_channel); + builder_.add_in_channel(in_channel); + builder_.add_group(group); + builder_.add_pad_list(pad_list); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_kernel_size(kernel_size); + builder_.add_format(format); + builder_.add_activation_type(activation_type); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateAdderFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + const std::vector *kernel_size = nullptr, + const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + const std::vector *pad_list = nullptr, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + auto pad_list__ = pad_list ? _fbb.CreateVector(*pad_list) : 0; + return mindspore::schema::CreateAdderFusion( + _fbb, + format, + kernel_size__, + stride__, + dilation__, + pad_mode, + pad_list__, + group, + in_channel, + out_channel, + activation_type); +} + +struct AddGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AddGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AddGradBuilder { + typedef AddGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + AddGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AddN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AddNBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AddNBuilder { + typedef AddN Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddNBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddN( + flatbuffers::FlatBufferBuilder &_fbb) { + AddNBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct All FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AllBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEP_DIMS = 4 + }; + int64_t keep_dims() const { + return GetField(VT_KEEP_DIMS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEP_DIMS) && + verifier.EndTable(); + } +}; + +struct AllBuilder { + typedef All Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_dims(int64_t keep_dims) { + fbb_.AddElement(All::VT_KEEP_DIMS, keep_dims, 0); + } + explicit AllBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAll( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t keep_dims = 0) { + AllBuilder builder_(_fbb); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +struct ApplyMomentum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ApplyMomentumBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_USE_NESTEROV = 4, + VT_USE_LOCKING = 6, + VT_GRADIENT_SCALE = 8 + }; + bool use_nesterov() const { + return GetField(VT_USE_NESTEROV, 0) != 0; + } + bool use_locking() const { + return GetField(VT_USE_LOCKING, 0) != 0; + } + float gradient_scale() const { + return GetField(VT_GRADIENT_SCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_USE_NESTEROV) && + VerifyField(verifier, VT_USE_LOCKING) && + VerifyField(verifier, VT_GRADIENT_SCALE) && + verifier.EndTable(); + } +}; + +struct ApplyMomentumBuilder { + typedef ApplyMomentum Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_use_nesterov(bool use_nesterov) { + fbb_.AddElement(ApplyMomentum::VT_USE_NESTEROV, static_cast(use_nesterov), 0); + } + void add_use_locking(bool use_locking) { + fbb_.AddElement(ApplyMomentum::VT_USE_LOCKING, static_cast(use_locking), 0); + } + void add_gradient_scale(float gradient_scale) { + fbb_.AddElement(ApplyMomentum::VT_GRADIENT_SCALE, gradient_scale, 0.0f); + } + explicit ApplyMomentumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateApplyMomentum( + flatbuffers::FlatBufferBuilder &_fbb, + bool use_nesterov = false, + bool use_locking = false, + float gradient_scale = 0.0f) { + ApplyMomentumBuilder builder_(_fbb); + builder_.add_gradient_scale(gradient_scale); + builder_.add_use_locking(use_locking); + builder_.add_use_nesterov(use_nesterov); + return builder_.Finish(); +} + +struct ArgMaxFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ArgMaxFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_TOP_K = 6, + VT_KEEP_DIMS = 8, + VT_OUT_MAX_VALUE = 10 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + int64_t top_k() const { + return GetField(VT_TOP_K, 1LL); + } + bool keep_dims() const { + return GetField(VT_KEEP_DIMS, 0) != 0; + } + bool out_max_value() const { + return GetField(VT_OUT_MAX_VALUE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_TOP_K) && + VerifyField(verifier, VT_KEEP_DIMS) && + VerifyField(verifier, VT_OUT_MAX_VALUE) && + verifier.EndTable(); + } +}; + +struct ArgMaxFusionBuilder { + typedef ArgMaxFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(ArgMaxFusion::VT_AXIS, axis, 0); + } + void add_top_k(int64_t top_k) { + fbb_.AddElement(ArgMaxFusion::VT_TOP_K, top_k, 1LL); + } + void add_keep_dims(bool keep_dims) { + fbb_.AddElement(ArgMaxFusion::VT_KEEP_DIMS, static_cast(keep_dims), 0); + } + void add_out_max_value(bool out_max_value) { + fbb_.AddElement(ArgMaxFusion::VT_OUT_MAX_VALUE, static_cast(out_max_value), 0); + } + explicit ArgMaxFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMaxFusion( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + int64_t top_k = 1LL, + bool keep_dims = false, + bool out_max_value = false) { + ArgMaxFusionBuilder builder_(_fbb); + builder_.add_top_k(top_k); + builder_.add_axis(axis); + builder_.add_out_max_value(out_max_value); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +struct ArgMinFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ArgMinFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_TOP_K = 6, + VT_KEEP_DIMS = 8, + VT_OUT_MAX_VALUE = 10 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + int64_t top_k() const { + return GetField(VT_TOP_K, 0); + } + bool keep_dims() const { + return GetField(VT_KEEP_DIMS, 0) != 0; + } + bool out_max_value() const { + return GetField(VT_OUT_MAX_VALUE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_TOP_K) && + VerifyField(verifier, VT_KEEP_DIMS) && + VerifyField(verifier, VT_OUT_MAX_VALUE) && + verifier.EndTable(); + } +}; + +struct ArgMinFusionBuilder { + typedef ArgMinFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(ArgMinFusion::VT_AXIS, axis, 0); + } + void add_top_k(int64_t top_k) { + fbb_.AddElement(ArgMinFusion::VT_TOP_K, top_k, 0); + } + void add_keep_dims(bool keep_dims) { + fbb_.AddElement(ArgMinFusion::VT_KEEP_DIMS, static_cast(keep_dims), 0); + } + void add_out_max_value(bool out_max_value) { + fbb_.AddElement(ArgMinFusion::VT_OUT_MAX_VALUE, static_cast(out_max_value), 0); + } + explicit ArgMinFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMinFusion( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + int64_t top_k = 0, + bool keep_dims = false, + bool out_max_value = false) { + ArgMinFusionBuilder builder_(_fbb); + builder_.add_top_k(top_k); + builder_.add_axis(axis); + builder_.add_out_max_value(out_max_value); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +struct Assert FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AssertBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SUMMARIZE = 4 + }; + int64_t summarize() const { + return GetField(VT_SUMMARIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SUMMARIZE) && + verifier.EndTable(); + } +}; + +struct AssertBuilder { + typedef Assert Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_summarize(int64_t summarize) { + fbb_.AddElement(Assert::VT_SUMMARIZE, summarize, 0); + } + explicit AssertBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAssert( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t summarize = 0) { + AssertBuilder builder_(_fbb); + builder_.add_summarize(summarize); + return builder_.Finish(); +} + +struct Assign FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AssignBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AssignBuilder { + typedef Assign Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AssignBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAssign( + flatbuffers::FlatBufferBuilder &_fbb) { + AssignBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AssignAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AssignAddBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AssignAddBuilder { + typedef AssignAdd Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AssignAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAssignAdd( + flatbuffers::FlatBufferBuilder &_fbb) { + AssignAddBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AudioSpectrogram FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AudioSpectrogramBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_WINDOW_SIZE = 4, + VT_STRIDE = 6, + VT_MAG_SQUARE = 8 + }; + int64_t window_size() const { + return GetField(VT_WINDOW_SIZE, 0); + } + int64_t stride() const { + return GetField(VT_STRIDE, 0); + } + bool mag_square() const { + return GetField(VT_MAG_SQUARE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_WINDOW_SIZE) && + VerifyField(verifier, VT_STRIDE) && + VerifyField(verifier, VT_MAG_SQUARE) && + verifier.EndTable(); + } +}; + +struct AudioSpectrogramBuilder { + typedef AudioSpectrogram Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_window_size(int64_t window_size) { + fbb_.AddElement(AudioSpectrogram::VT_WINDOW_SIZE, window_size, 0); + } + void add_stride(int64_t stride) { + fbb_.AddElement(AudioSpectrogram::VT_STRIDE, stride, 0); + } + void add_mag_square(bool mag_square) { + fbb_.AddElement(AudioSpectrogram::VT_MAG_SQUARE, static_cast(mag_square), 0); + } + explicit AudioSpectrogramBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAudioSpectrogram( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t window_size = 0, + int64_t stride = 0, + bool mag_square = false) { + AudioSpectrogramBuilder builder_(_fbb); + builder_.add_stride(stride); + builder_.add_window_size(window_size); + builder_.add_mag_square(mag_square); + return builder_.Finish(); +} + +struct AvgPoolFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AvgPoolFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KERNEL_SIZE = 4, + VT_STRIDES = 6, + VT_PAD = 8, + VT_PAD_MODE = 10, + VT_ROUND_MODE = 12, + VT_FORMAT = 14, + VT_GLOBAL = 16, + VT_ACTIVATION_TYPE = 18 + }; + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + const flatbuffers::Vector *pad() const { + return GetPointer *>(VT_PAD); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + mindspore::schema::RoundMode round_mode() const { + return static_cast(GetField(VT_ROUND_MODE, 0)); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool global() const { + return GetField(VT_GLOBAL, 0) != 0; + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyOffset(verifier, VT_PAD) && + verifier.VerifyVector(pad()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyField(verifier, VT_ROUND_MODE) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_GLOBAL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct AvgPoolFusionBuilder { + typedef AvgPoolFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(AvgPoolFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_strides(flatbuffers::Offset> strides) { + fbb_.AddOffset(AvgPoolFusion::VT_STRIDES, strides); + } + void add_pad(flatbuffers::Offset> pad) { + fbb_.AddOffset(AvgPoolFusion::VT_PAD, pad); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(AvgPoolFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_round_mode(mindspore::schema::RoundMode round_mode) { + fbb_.AddElement(AvgPoolFusion::VT_ROUND_MODE, static_cast(round_mode), 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(AvgPoolFusion::VT_FORMAT, static_cast(format), 0); + } + void add_global(bool global) { + fbb_.AddElement(AvgPoolFusion::VT_GLOBAL, static_cast(global), 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(AvgPoolFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit AvgPoolFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAvgPoolFusion( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> strides = 0, + flatbuffers::Offset> pad = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::RoundMode round_mode = mindspore::schema::RoundMode_FLOOR, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + bool global = false, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + AvgPoolFusionBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_pad(pad); + builder_.add_strides(strides); + builder_.add_kernel_size(kernel_size); + builder_.add_activation_type(activation_type); + builder_.add_global(global); + builder_.add_round_mode(round_mode); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateAvgPoolFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *kernel_size = nullptr, + const std::vector *strides = nullptr, + const std::vector *pad = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::RoundMode round_mode = mindspore::schema::RoundMode_FLOOR, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + bool global = false, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + return mindspore::schema::CreateAvgPoolFusion( + _fbb, + kernel_size__, + strides__, + pad__, + pad_mode, + round_mode, + format, + global, + activation_type); +} + +struct AvgPoolGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AvgPoolGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KERNEL_SIZE = 4, + VT_STRIDES = 6, + VT_PAD_MODE = 8, + VT_FORMAT = 10 + }; + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct AvgPoolGradBuilder { + typedef AvgPoolGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(AvgPoolGrad::VT_KERNEL_SIZE, kernel_size); + } + void add_strides(flatbuffers::Offset> strides) { + fbb_.AddOffset(AvgPoolGrad::VT_STRIDES, strides); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(AvgPoolGrad::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(AvgPoolGrad::VT_FORMAT, static_cast(format), 0); + } + explicit AvgPoolGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAvgPoolGrad( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> strides = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + AvgPoolGradBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_strides(strides); + builder_.add_kernel_size(kernel_size); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateAvgPoolGradDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *kernel_size = nullptr, + const std::vector *strides = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + return mindspore::schema::CreateAvgPoolGrad( + _fbb, + kernel_size__, + strides__, + pad_mode, + format); +} + +struct BatchNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchNormBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPSILON = 4, + VT_FORMAT = 6, + VT_IS_TRAINING = 8 + }; + float epsilon() const { + return GetField(VT_EPSILON, 0.0f); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool is_training() const { + return GetField(VT_IS_TRAINING, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPSILON) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_IS_TRAINING) && + verifier.EndTable(); + } +}; + +struct BatchNormBuilder { + typedef BatchNorm Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) { + fbb_.AddElement(BatchNorm::VT_EPSILON, epsilon, 0.0f); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(BatchNorm::VT_FORMAT, static_cast(format), 0); + } + void add_is_training(bool is_training) { + fbb_.AddElement(BatchNorm::VT_IS_TRAINING, static_cast(is_training), 0); + } + explicit BatchNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchNorm( + flatbuffers::FlatBufferBuilder &_fbb, + float epsilon = 0.0f, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + bool is_training = false) { + BatchNormBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_epsilon(epsilon); + builder_.add_is_training(is_training); + return builder_.Finish(); +} + +struct BatchNormGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchNormGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPSILON = 4 + }; + float epsilon() const { + return GetField(VT_EPSILON, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPSILON) && + verifier.EndTable(); + } +}; + +struct BatchNormGradBuilder { + typedef BatchNormGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) { + fbb_.AddElement(BatchNormGrad::VT_EPSILON, epsilon, 0.0f); + } + explicit BatchNormGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchNormGrad( + flatbuffers::FlatBufferBuilder &_fbb, + float epsilon = 0.0f) { + BatchNormGradBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + return builder_.Finish(); +} + +struct BatchToSpace FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchToSpaceBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4, + VT_CROPS = 6 + }; + const flatbuffers::Vector *block_size() const { + return GetPointer *>(VT_BLOCK_SIZE); + } + const mindspore::schema::Vec2D *crops() const { + return GetPointer(VT_CROPS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCK_SIZE) && + verifier.VerifyVector(block_size()) && + VerifyOffset(verifier, VT_CROPS) && + verifier.VerifyTable(crops()) && + verifier.EndTable(); + } +}; + +struct BatchToSpaceBuilder { + typedef BatchToSpace Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(flatbuffers::Offset> block_size) { + fbb_.AddOffset(BatchToSpace::VT_BLOCK_SIZE, block_size); + } + void add_crops(flatbuffers::Offset crops) { + fbb_.AddOffset(BatchToSpace::VT_CROPS, crops); + } + explicit BatchToSpaceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchToSpace( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> block_size = 0, + flatbuffers::Offset crops = 0) { + BatchToSpaceBuilder builder_(_fbb); + builder_.add_crops(crops); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBatchToSpaceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *block_size = nullptr, + flatbuffers::Offset crops = 0) { + auto block_size__ = block_size ? _fbb.CreateVector(*block_size) : 0; + return mindspore::schema::CreateBatchToSpace( + _fbb, + block_size__, + crops); +} + +struct BatchToSpaceND FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchToSpaceNDBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SHAPE = 4, + VT_CROPS = 6 + }; + const flatbuffers::Vector *block_shape() const { + return GetPointer *>(VT_BLOCK_SHAPE); + } + const mindspore::schema::Vec2D *crops() const { + return GetPointer(VT_CROPS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCK_SHAPE) && + verifier.VerifyVector(block_shape()) && + VerifyOffset(verifier, VT_CROPS) && + verifier.VerifyTable(crops()) && + verifier.EndTable(); + } +}; + +struct BatchToSpaceNDBuilder { + typedef BatchToSpaceND Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_shape(flatbuffers::Offset> block_shape) { + fbb_.AddOffset(BatchToSpaceND::VT_BLOCK_SHAPE, block_shape); + } + void add_crops(flatbuffers::Offset crops) { + fbb_.AddOffset(BatchToSpaceND::VT_CROPS, crops); + } + explicit BatchToSpaceNDBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchToSpaceND( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> block_shape = 0, + flatbuffers::Offset crops = 0) { + BatchToSpaceNDBuilder builder_(_fbb); + builder_.add_crops(crops); + builder_.add_block_shape(block_shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBatchToSpaceNDDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *block_shape = nullptr, + flatbuffers::Offset crops = 0) { + auto block_shape__ = block_shape ? _fbb.CreateVector(*block_shape) : 0; + return mindspore::schema::CreateBatchToSpaceND( + _fbb, + block_shape__, + crops); +} + +struct BiasAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BiasAddBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct BiasAddBuilder { + typedef BiasAdd Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(BiasAdd::VT_FORMAT, static_cast(format), 0); + } + explicit BiasAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBiasAdd( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + BiasAddBuilder builder_(_fbb); + builder_.add_format(format); + return builder_.Finish(); +} + +struct BinaryCrossEntropy FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BinaryCrossEntropyBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_REDUCTION = 4 + }; + mindspore::schema::Reduction reduction() const { + return static_cast(GetField(VT_REDUCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_REDUCTION) && + verifier.EndTable(); + } +}; + +struct BinaryCrossEntropyBuilder { + typedef BinaryCrossEntropy Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_reduction(mindspore::schema::Reduction reduction) { + fbb_.AddElement(BinaryCrossEntropy::VT_REDUCTION, static_cast(reduction), 0); + } + explicit BinaryCrossEntropyBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBinaryCrossEntropy( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Reduction reduction = mindspore::schema::Reduction_REDUCTION_SUM) { + BinaryCrossEntropyBuilder builder_(_fbb); + builder_.add_reduction(reduction); + return builder_.Finish(); +} + +struct BinaryCrossEntropyGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BinaryCrossEntropyGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_REDUCTION = 4 + }; + mindspore::schema::Reduction reduction() const { + return static_cast(GetField(VT_REDUCTION, 1)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_REDUCTION) && + verifier.EndTable(); + } +}; + +struct BinaryCrossEntropyGradBuilder { + typedef BinaryCrossEntropyGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_reduction(mindspore::schema::Reduction reduction) { + fbb_.AddElement(BinaryCrossEntropyGrad::VT_REDUCTION, static_cast(reduction), 1); + } + explicit BinaryCrossEntropyGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBinaryCrossEntropyGrad( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Reduction reduction = mindspore::schema::Reduction_MEAN) { + BinaryCrossEntropyGradBuilder builder_(_fbb); + builder_.add_reduction(reduction); + return builder_.Finish(); +} + +struct BiasAddGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BiasAddGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct BiasAddGradBuilder { + typedef BiasAddGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit BiasAddGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBiasAddGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + BiasAddGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BroadcastTo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BroadcastToBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SHAPE = 4 + }; + const flatbuffers::Vector *shape() const { + return GetPointer *>(VT_SHAPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && + verifier.EndTable(); + } +}; + +struct BroadcastToBuilder { + typedef BroadcastTo Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_shape(flatbuffers::Offset> shape) { + fbb_.AddOffset(BroadcastTo::VT_SHAPE, shape); + } + explicit BroadcastToBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBroadcastTo( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> shape = 0) { + BroadcastToBuilder builder_(_fbb); + builder_.add_shape(shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBroadcastToDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *shape = nullptr) { + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + return mindspore::schema::CreateBroadcastTo( + _fbb, + shape__); +} + +struct Cast FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CastBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CastBuilder { + typedef Cast Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CastBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCast( + flatbuffers::FlatBufferBuilder &_fbb) { + CastBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Ceil FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CeilBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CeilBuilder { + typedef Ceil Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CeilBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCeil( + flatbuffers::FlatBufferBuilder &_fbb) { + CeilBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Clip FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ClipBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MAX = 4, + VT_MIN = 6 + }; + float max() const { + return GetField(VT_MAX, 0.0f); + } + float min() const { + return GetField(VT_MIN, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MAX) && + VerifyField(verifier, VT_MIN) && + verifier.EndTable(); + } +}; + +struct ClipBuilder { + typedef Clip Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_max(float max) { + fbb_.AddElement(Clip::VT_MAX, max, 0.0f); + } + void add_min(float min) { + fbb_.AddElement(Clip::VT_MIN, min, 0.0f); + } + explicit ClipBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateClip( + flatbuffers::FlatBufferBuilder &_fbb, + float max = 0.0f, + float min = 0.0f) { + ClipBuilder builder_(_fbb); + builder_.add_min(min); + builder_.add_max(max); + return builder_.Finish(); +} + +struct Concat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ConcatBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct ConcatBuilder { + typedef Concat Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(Concat::VT_AXIS, axis, 0); + } + explicit ConcatBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConcat( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0) { + ConcatBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct Attention FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AttentionBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AttentionBuilder { + typedef Attention Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AttentionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAttention( + flatbuffers::FlatBufferBuilder &_fbb) { + AttentionBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Conv2DBackpropFilterFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv2DBackpropFilterFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_KERNEL_SIZE = 6, + VT_STRIDE = 8, + VT_DILATION = 10, + VT_PAD_MODE = 12, + VT_PAD_LIST = 14, + VT_MODE = 16, + VT_GROUP = 18, + VT_IN_CHANNEL = 20, + VT_OUT_CHANNEL = 22, + VT_ACTIVATION_TYPE = 24 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *dilation() const { + return GetPointer *>(VT_DILATION); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + const flatbuffers::Vector *pad_list() const { + return GetPointer *>(VT_PAD_LIST); + } + int64_t mode() const { + return GetField(VT_MODE, 0); + } + int64_t group() const { + return GetField(VT_GROUP, 0); + } + int64_t in_channel() const { + return GetField(VT_IN_CHANNEL, 0); + } + int64_t out_channel() const { + return GetField(VT_OUT_CHANNEL, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyOffset(verifier, VT_PAD_LIST) && + verifier.VerifyVector(pad_list()) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_IN_CHANNEL) && + VerifyField(verifier, VT_OUT_CHANNEL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct Conv2DBackpropFilterFusionBuilder { + typedef Conv2DBackpropFilterFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_FORMAT, static_cast(format), 0); + } + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(Conv2DBackpropFilterFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(Conv2DBackpropFilterFusion::VT_STRIDE, stride); + } + void add_dilation(flatbuffers::Offset> dilation) { + fbb_.AddOffset(Conv2DBackpropFilterFusion::VT_DILATION, dilation); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_pad_list(flatbuffers::Offset> pad_list) { + fbb_.AddOffset(Conv2DBackpropFilterFusion::VT_PAD_LIST, pad_list); + } + void add_mode(int64_t mode) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_MODE, mode, 0); + } + void add_group(int64_t group) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_GROUP, group, 0); + } + void add_in_channel(int64_t in_channel) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_IN_CHANNEL, in_channel, 0); + } + void add_out_channel(int64_t out_channel) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_OUT_CHANNEL, out_channel, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(Conv2DBackpropFilterFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit Conv2DBackpropFilterFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2DBackpropFilterFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> dilation = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + flatbuffers::Offset> pad_list = 0, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + Conv2DBackpropFilterFusionBuilder builder_(_fbb); + builder_.add_out_channel(out_channel); + builder_.add_in_channel(in_channel); + builder_.add_group(group); + builder_.add_mode(mode); + builder_.add_pad_list(pad_list); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_kernel_size(kernel_size); + builder_.add_format(format); + builder_.add_activation_type(activation_type); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConv2DBackpropFilterFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + const std::vector *kernel_size = nullptr, + const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + const std::vector *pad_list = nullptr, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + auto pad_list__ = pad_list ? _fbb.CreateVector(*pad_list) : 0; + return mindspore::schema::CreateConv2DBackpropFilterFusion( + _fbb, + format, + kernel_size__, + stride__, + dilation__, + pad_mode, + pad_list__, + mode, + group, + in_channel, + out_channel, + activation_type); +} + +struct Conv2DBackpropInputFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv2DBackpropInputFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_KERNEL_SIZE = 6, + VT_STRIDE = 8, + VT_DILATION = 10, + VT_PAD_MODE = 12, + VT_PAD = 14, + VT_PAD_LIST = 16, + VT_MODE = 18, + VT_GROUP = 20, + VT_IN_CHANNEL = 22, + VT_OUT_CHANNEL = 24, + VT_ACTIVATION_TYPE = 26 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *dilation() const { + return GetPointer *>(VT_DILATION); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + const flatbuffers::Vector *pad() const { + return GetPointer *>(VT_PAD); + } + const flatbuffers::Vector *pad_list() const { + return GetPointer *>(VT_PAD_LIST); + } + int64_t mode() const { + return GetField(VT_MODE, 0); + } + int64_t group() const { + return GetField(VT_GROUP, 0); + } + int64_t in_channel() const { + return GetField(VT_IN_CHANNEL, 0); + } + int64_t out_channel() const { + return GetField(VT_OUT_CHANNEL, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyOffset(verifier, VT_PAD) && + verifier.VerifyVector(pad()) && + VerifyOffset(verifier, VT_PAD_LIST) && + verifier.VerifyVector(pad_list()) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_IN_CHANNEL) && + VerifyField(verifier, VT_OUT_CHANNEL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct Conv2DBackpropInputFusionBuilder { + typedef Conv2DBackpropInputFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_FORMAT, static_cast(format), 0); + } + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(Conv2DBackpropInputFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(Conv2DBackpropInputFusion::VT_STRIDE, stride); + } + void add_dilation(flatbuffers::Offset> dilation) { + fbb_.AddOffset(Conv2DBackpropInputFusion::VT_DILATION, dilation); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_pad(flatbuffers::Offset> pad) { + fbb_.AddOffset(Conv2DBackpropInputFusion::VT_PAD, pad); + } + void add_pad_list(flatbuffers::Offset> pad_list) { + fbb_.AddOffset(Conv2DBackpropInputFusion::VT_PAD_LIST, pad_list); + } + void add_mode(int64_t mode) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_MODE, mode, 0); + } + void add_group(int64_t group) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_GROUP, group, 0); + } + void add_in_channel(int64_t in_channel) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_IN_CHANNEL, in_channel, 0); + } + void add_out_channel(int64_t out_channel) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_OUT_CHANNEL, out_channel, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(Conv2DBackpropInputFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit Conv2DBackpropInputFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2DBackpropInputFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> dilation = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + flatbuffers::Offset> pad = 0, + flatbuffers::Offset> pad_list = 0, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + Conv2DBackpropInputFusionBuilder builder_(_fbb); + builder_.add_out_channel(out_channel); + builder_.add_in_channel(in_channel); + builder_.add_group(group); + builder_.add_mode(mode); + builder_.add_pad_list(pad_list); + builder_.add_pad(pad); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_kernel_size(kernel_size); + builder_.add_format(format); + builder_.add_activation_type(activation_type); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConv2DBackpropInputFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + const std::vector *kernel_size = nullptr, + const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + const std::vector *pad = nullptr, + const std::vector *pad_list = nullptr, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + auto pad_list__ = pad_list ? _fbb.CreateVector(*pad_list) : 0; + return mindspore::schema::CreateConv2DBackpropInputFusion( + _fbb, + format, + kernel_size__, + stride__, + dilation__, + pad_mode, + pad__, + pad_list__, + mode, + group, + in_channel, + out_channel, + activation_type); +} + +struct Conv2DFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv2DFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_KERNEL_SIZE = 6, + VT_STRIDE = 8, + VT_DILATION = 10, + VT_PAD_MODE = 12, + VT_PAD_LIST = 14, + VT_MODE = 16, + VT_GROUP = 18, + VT_IN_CHANNEL = 20, + VT_OUT_CHANNEL = 22, + VT_ACTIVATION_TYPE = 24 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *dilation() const { + return GetPointer *>(VT_DILATION); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + const flatbuffers::Vector *pad_list() const { + return GetPointer *>(VT_PAD_LIST); + } + int64_t mode() const { + return GetField(VT_MODE, 0); + } + int64_t group() const { + return GetField(VT_GROUP, 0); + } + int64_t in_channel() const { + return GetField(VT_IN_CHANNEL, 0); + } + int64_t out_channel() const { + return GetField(VT_OUT_CHANNEL, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyOffset(verifier, VT_PAD_LIST) && + verifier.VerifyVector(pad_list()) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_IN_CHANNEL) && + VerifyField(verifier, VT_OUT_CHANNEL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct Conv2DFusionBuilder { + typedef Conv2DFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(Conv2DFusion::VT_FORMAT, static_cast(format), 0); + } + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(Conv2DFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(Conv2DFusion::VT_STRIDE, stride); + } + void add_dilation(flatbuffers::Offset> dilation) { + fbb_.AddOffset(Conv2DFusion::VT_DILATION, dilation); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(Conv2DFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_pad_list(flatbuffers::Offset> pad_list) { + fbb_.AddOffset(Conv2DFusion::VT_PAD_LIST, pad_list); + } + void add_mode(int64_t mode) { + fbb_.AddElement(Conv2DFusion::VT_MODE, mode, 0); + } + void add_group(int64_t group) { + fbb_.AddElement(Conv2DFusion::VT_GROUP, group, 0); + } + void add_in_channel(int64_t in_channel) { + fbb_.AddElement(Conv2DFusion::VT_IN_CHANNEL, in_channel, 0); + } + void add_out_channel(int64_t out_channel) { + fbb_.AddElement(Conv2DFusion::VT_OUT_CHANNEL, out_channel, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(Conv2DFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit Conv2DFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2DFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> dilation = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + flatbuffers::Offset> pad_list = 0, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + Conv2DFusionBuilder builder_(_fbb); + builder_.add_out_channel(out_channel); + builder_.add_in_channel(in_channel); + builder_.add_group(group); + builder_.add_mode(mode); + builder_.add_pad_list(pad_list); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_kernel_size(kernel_size); + builder_.add_format(format); + builder_.add_activation_type(activation_type); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConv2DFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + const std::vector *kernel_size = nullptr, + const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + const std::vector *pad_list = nullptr, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + auto pad_list__ = pad_list ? _fbb.CreateVector(*pad_list) : 0; + return mindspore::schema::CreateConv2DFusion( + _fbb, + format, + kernel_size__, + stride__, + dilation__, + pad_mode, + pad_list__, + mode, + group, + in_channel, + out_channel, + activation_type); +} + +struct Conv2dTransposeFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv2dTransposeFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_KERNEL_SIZE = 6, + VT_STRIDE = 8, + VT_DILATION = 10, + VT_PAD_MODE = 12, + VT_PAD = 14, + VT_PAD_LIST = 16, + VT_MODE = 18, + VT_GROUP = 20, + VT_IN_CHANNEL = 22, + VT_OUT_CHANNEL = 24, + VT_ACTIVATION_TYPE = 26, + VT_OUTPUT_PADDINGS = 28 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *dilation() const { + return GetPointer *>(VT_DILATION); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + const flatbuffers::Vector *pad() const { + return GetPointer *>(VT_PAD); + } + const flatbuffers::Vector *pad_list() const { + return GetPointer *>(VT_PAD_LIST); + } + int64_t mode() const { + return GetField(VT_MODE, 0); + } + int64_t group() const { + return GetField(VT_GROUP, 0); + } + int64_t in_channel() const { + return GetField(VT_IN_CHANNEL, 0); + } + int64_t out_channel() const { + return GetField(VT_OUT_CHANNEL, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + const flatbuffers::Vector *output_paddings() const { + return GetPointer *>(VT_OUTPUT_PADDINGS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyOffset(verifier, VT_PAD) && + verifier.VerifyVector(pad()) && + VerifyOffset(verifier, VT_PAD_LIST) && + verifier.VerifyVector(pad_list()) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_IN_CHANNEL) && + VerifyField(verifier, VT_OUT_CHANNEL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + VerifyOffset(verifier, VT_OUTPUT_PADDINGS) && + verifier.VerifyVector(output_paddings()) && + verifier.EndTable(); + } +}; + +struct Conv2dTransposeFusionBuilder { + typedef Conv2dTransposeFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(Conv2dTransposeFusion::VT_FORMAT, static_cast(format), 0); + } + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(Conv2dTransposeFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(Conv2dTransposeFusion::VT_STRIDE, stride); + } + void add_dilation(flatbuffers::Offset> dilation) { + fbb_.AddOffset(Conv2dTransposeFusion::VT_DILATION, dilation); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(Conv2dTransposeFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_pad(flatbuffers::Offset> pad) { + fbb_.AddOffset(Conv2dTransposeFusion::VT_PAD, pad); + } + void add_pad_list(flatbuffers::Offset> pad_list) { + fbb_.AddOffset(Conv2dTransposeFusion::VT_PAD_LIST, pad_list); + } + void add_mode(int64_t mode) { + fbb_.AddElement(Conv2dTransposeFusion::VT_MODE, mode, 0); + } + void add_group(int64_t group) { + fbb_.AddElement(Conv2dTransposeFusion::VT_GROUP, group, 0); + } + void add_in_channel(int64_t in_channel) { + fbb_.AddElement(Conv2dTransposeFusion::VT_IN_CHANNEL, in_channel, 0); + } + void add_out_channel(int64_t out_channel) { + fbb_.AddElement(Conv2dTransposeFusion::VT_OUT_CHANNEL, out_channel, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(Conv2dTransposeFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + void add_output_paddings(flatbuffers::Offset> output_paddings) { + fbb_.AddOffset(Conv2dTransposeFusion::VT_OUTPUT_PADDINGS, output_paddings); + } + explicit Conv2dTransposeFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2dTransposeFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> dilation = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + flatbuffers::Offset> pad = 0, + flatbuffers::Offset> pad_list = 0, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION, + flatbuffers::Offset> output_paddings = 0) { + Conv2dTransposeFusionBuilder builder_(_fbb); + builder_.add_out_channel(out_channel); + builder_.add_in_channel(in_channel); + builder_.add_group(group); + builder_.add_mode(mode); + builder_.add_output_paddings(output_paddings); + builder_.add_pad_list(pad_list); + builder_.add_pad(pad); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_kernel_size(kernel_size); + builder_.add_format(format); + builder_.add_activation_type(activation_type); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConv2dTransposeFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + const std::vector *kernel_size = nullptr, + const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + const std::vector *pad = nullptr, + const std::vector *pad_list = nullptr, + int64_t mode = 0, + int64_t group = 0, + int64_t in_channel = 0, + int64_t out_channel = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION, + const std::vector *output_paddings = nullptr) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + auto pad_list__ = pad_list ? _fbb.CreateVector(*pad_list) : 0; + auto output_paddings__ = output_paddings ? _fbb.CreateVector(*output_paddings) : 0; + return mindspore::schema::CreateConv2dTransposeFusion( + _fbb, + format, + kernel_size__, + stride__, + dilation__, + pad_mode, + pad__, + pad_list__, + mode, + group, + in_channel, + out_channel, + activation_type, + output_paddings__); +} + +struct Cos FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CosBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CosBuilder { + typedef Cos Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CosBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCos( + flatbuffers::FlatBufferBuilder &_fbb) { + CosBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ConstantOfShape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ConstantOfShapeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA_TYPE = 4, + VT_VALUE = 6 + }; + int64_t data_type() const { + return GetField(VT_DATA_TYPE, 0); + } + const flatbuffers::Vector *value() const { + return GetPointer *>(VT_VALUE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DATA_TYPE) && + VerifyOffset(verifier, VT_VALUE) && + verifier.VerifyVector(value()) && + verifier.EndTable(); + } +}; + +struct ConstantOfShapeBuilder { + typedef ConstantOfShape Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_data_type(int64_t data_type) { + fbb_.AddElement(ConstantOfShape::VT_DATA_TYPE, data_type, 0); + } + void add_value(flatbuffers::Offset> value) { + fbb_.AddOffset(ConstantOfShape::VT_VALUE, value); + } + explicit ConstantOfShapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConstantOfShape( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t data_type = 0, + flatbuffers::Offset> value = 0) { + ConstantOfShapeBuilder builder_(_fbb); + builder_.add_data_type(data_type); + builder_.add_value(value); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConstantOfShapeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t data_type = 0, + const std::vector *value = nullptr) { + auto value__ = value ? _fbb.CreateVector(*value) : 0; + return mindspore::schema::CreateConstantOfShape( + _fbb, + data_type, + value__); +} + +struct Crop FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CropBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_OFFSETS = 6 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + const flatbuffers::Vector *offsets() const { + return GetPointer *>(VT_OFFSETS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyOffset(verifier, VT_OFFSETS) && + verifier.VerifyVector(offsets()) && + verifier.EndTable(); + } +}; + +struct CropBuilder { + typedef Crop Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(Crop::VT_AXIS, axis, 0); + } + void add_offsets(flatbuffers::Offset> offsets) { + fbb_.AddOffset(Crop::VT_OFFSETS, offsets); + } + explicit CropBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCrop( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + flatbuffers::Offset> offsets = 0) { + CropBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_offsets(offsets); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCropDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + const std::vector *offsets = nullptr) { + auto offsets__ = offsets ? _fbb.CreateVector(*offsets) : 0; + return mindspore::schema::CreateCrop( + _fbb, + axis, + offsets__); +} + +struct CustomExtractFeatures FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CustomExtractFeaturesBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CustomExtractFeaturesBuilder { + typedef CustomExtractFeatures Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CustomExtractFeaturesBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCustomExtractFeatures( + flatbuffers::FlatBufferBuilder &_fbb) { + CustomExtractFeaturesBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CustomNormalize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CustomNormalizeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CustomNormalizeBuilder { + typedef CustomNormalize Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CustomNormalizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCustomNormalize( + flatbuffers::FlatBufferBuilder &_fbb) { + CustomNormalizeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CustomPredict FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CustomPredictBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT_NUM = 4, + VT_WEIGHT_THRESHOLD = 6 + }; + int64_t output_num() const { + return GetField(VT_OUTPUT_NUM, 0); + } + float weight_threshold() const { + return GetField(VT_WEIGHT_THRESHOLD, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUT_NUM) && + VerifyField(verifier, VT_WEIGHT_THRESHOLD) && + verifier.EndTable(); + } +}; + +struct CustomPredictBuilder { + typedef CustomPredict Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_num(int64_t output_num) { + fbb_.AddElement(CustomPredict::VT_OUTPUT_NUM, output_num, 0); + } + void add_weight_threshold(float weight_threshold) { + fbb_.AddElement(CustomPredict::VT_WEIGHT_THRESHOLD, weight_threshold, 0.0f); + } + explicit CustomPredictBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCustomPredict( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t output_num = 0, + float weight_threshold = 0.0f) { + CustomPredictBuilder builder_(_fbb); + builder_.add_output_num(output_num); + builder_.add_weight_threshold(weight_threshold); + return builder_.Finish(); +} + +struct DeConv2DGradFilter FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DeConv2DGradFilterBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IN_CHANNEL = 4, + VT_OUT_CHANNEL = 6, + VT_KERNEL_SIZE = 8, + VT_PAD_MODE = 10, + VT_PAD_LIST = 12, + VT_STRIDE = 14, + VT_DILATION = 16, + VT_GROUP = 18, + VT_FORMAT = 20, + VT_ACTIVATION_TYPE = 22 + }; + int64_t in_channel() const { + return GetField(VT_IN_CHANNEL, 0); + } + int64_t out_channel() const { + return GetField(VT_OUT_CHANNEL, 0); + } + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + const flatbuffers::Vector *pad_list() const { + return GetPointer *>(VT_PAD_LIST); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *dilation() const { + return GetPointer *>(VT_DILATION); + } + int64_t group() const { + return GetField(VT_GROUP, 0); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IN_CHANNEL) && + VerifyField(verifier, VT_OUT_CHANNEL) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyOffset(verifier, VT_PAD_LIST) && + verifier.VerifyVector(pad_list()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_DILATION) && + verifier.VerifyVector(dilation()) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct DeConv2DGradFilterBuilder { + typedef DeConv2DGradFilter Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_in_channel(int64_t in_channel) { + fbb_.AddElement(DeConv2DGradFilter::VT_IN_CHANNEL, in_channel, 0); + } + void add_out_channel(int64_t out_channel) { + fbb_.AddElement(DeConv2DGradFilter::VT_OUT_CHANNEL, out_channel, 0); + } + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(DeConv2DGradFilter::VT_KERNEL_SIZE, kernel_size); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(DeConv2DGradFilter::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_pad_list(flatbuffers::Offset> pad_list) { + fbb_.AddOffset(DeConv2DGradFilter::VT_PAD_LIST, pad_list); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(DeConv2DGradFilter::VT_STRIDE, stride); + } + void add_dilation(flatbuffers::Offset> dilation) { + fbb_.AddOffset(DeConv2DGradFilter::VT_DILATION, dilation); + } + void add_group(int64_t group) { + fbb_.AddElement(DeConv2DGradFilter::VT_GROUP, group, 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(DeConv2DGradFilter::VT_FORMAT, static_cast(format), 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(DeConv2DGradFilter::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit DeConv2DGradFilterBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDeConv2DGradFilter( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t in_channel = 0, + int64_t out_channel = 0, + flatbuffers::Offset> kernel_size = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + flatbuffers::Offset> pad_list = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> dilation = 0, + int64_t group = 0, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + DeConv2DGradFilterBuilder builder_(_fbb); + builder_.add_group(group); + builder_.add_out_channel(out_channel); + builder_.add_in_channel(in_channel); + builder_.add_format(format); + builder_.add_dilation(dilation); + builder_.add_stride(stride); + builder_.add_pad_list(pad_list); + builder_.add_kernel_size(kernel_size); + builder_.add_activation_type(activation_type); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateDeConv2DGradFilterDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t in_channel = 0, + int64_t out_channel = 0, + const std::vector *kernel_size = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + const std::vector *pad_list = nullptr, + const std::vector *stride = nullptr, + const std::vector *dilation = nullptr, + int64_t group = 0, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto pad_list__ = pad_list ? _fbb.CreateVector(*pad_list) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto dilation__ = dilation ? _fbb.CreateVector(*dilation) : 0; + return mindspore::schema::CreateDeConv2DGradFilter( + _fbb, + in_channel, + out_channel, + kernel_size__, + pad_mode, + pad_list__, + stride__, + dilation__, + group, + format, + activation_type); +} + +struct Depend FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DependBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct DependBuilder { + typedef Depend Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DependBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepend( + flatbuffers::FlatBufferBuilder &_fbb) { + DependBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct DepthToSpace FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DepthToSpaceBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4, + VT_FORMAT = 6 + }; + int64_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct DepthToSpaceBuilder { + typedef DepthToSpace Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int64_t block_size) { + fbb_.AddElement(DepthToSpace::VT_BLOCK_SIZE, block_size, 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(DepthToSpace::VT_FORMAT, static_cast(format), 0); + } + explicit DepthToSpaceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthToSpace( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t block_size = 0, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + DepthToSpaceBuilder builder_(_fbb); + builder_.add_block_size(block_size); + builder_.add_format(format); + return builder_.Finish(); +} + +struct DetectionPostProcess FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DetectionPostProcessBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_INPUT_SIZE = 6, + VT_SCALE = 8, + VT_NMS_IOU_THRESHOLD = 10, + VT_NMS_SCORE_THRESHOLD = 12, + VT_MAX_DETECTIONS = 14, + VT_DETECTIONS_PER_CLASS = 16, + VT_MAX_CLASSES_PER_DETECTION = 18, + VT_NUM_CLASSES = 20, + VT_USE_REGULAR_NMS = 22, + VT_OUT_QUANTIZED = 24 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int64_t input_size() const { + return GetField(VT_INPUT_SIZE, 0); + } + const flatbuffers::Vector *scale() const { + return GetPointer *>(VT_SCALE); + } + float nms_iou_threshold() const { + return GetField(VT_NMS_IOU_THRESHOLD, 0.0f); + } + float nms_score_threshold() const { + return GetField(VT_NMS_SCORE_THRESHOLD, 0.0f); + } + int64_t max_detections() const { + return GetField(VT_MAX_DETECTIONS, 0); + } + int64_t detections_per_class() const { + return GetField(VT_DETECTIONS_PER_CLASS, 0); + } + int64_t max_classes_per_detection() const { + return GetField(VT_MAX_CLASSES_PER_DETECTION, 0); + } + int64_t num_classes() const { + return GetField(VT_NUM_CLASSES, 0); + } + bool use_regular_nms() const { + return GetField(VT_USE_REGULAR_NMS, 0) != 0; + } + bool out_quantized() const { + return GetField(VT_OUT_QUANTIZED, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_INPUT_SIZE) && + VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && + VerifyField(verifier, VT_NMS_IOU_THRESHOLD) && + VerifyField(verifier, VT_NMS_SCORE_THRESHOLD) && + VerifyField(verifier, VT_MAX_DETECTIONS) && + VerifyField(verifier, VT_DETECTIONS_PER_CLASS) && + VerifyField(verifier, VT_MAX_CLASSES_PER_DETECTION) && + VerifyField(verifier, VT_NUM_CLASSES) && + VerifyField(verifier, VT_USE_REGULAR_NMS) && + VerifyField(verifier, VT_OUT_QUANTIZED) && + verifier.EndTable(); + } +}; + +struct DetectionPostProcessBuilder { + typedef DetectionPostProcess Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(DetectionPostProcess::VT_FORMAT, static_cast(format), 0); + } + void add_input_size(int64_t input_size) { + fbb_.AddElement(DetectionPostProcess::VT_INPUT_SIZE, input_size, 0); + } + void add_scale(flatbuffers::Offset> scale) { + fbb_.AddOffset(DetectionPostProcess::VT_SCALE, scale); + } + void add_nms_iou_threshold(float nms_iou_threshold) { + fbb_.AddElement(DetectionPostProcess::VT_NMS_IOU_THRESHOLD, nms_iou_threshold, 0.0f); + } + void add_nms_score_threshold(float nms_score_threshold) { + fbb_.AddElement(DetectionPostProcess::VT_NMS_SCORE_THRESHOLD, nms_score_threshold, 0.0f); + } + void add_max_detections(int64_t max_detections) { + fbb_.AddElement(DetectionPostProcess::VT_MAX_DETECTIONS, max_detections, 0); + } + void add_detections_per_class(int64_t detections_per_class) { + fbb_.AddElement(DetectionPostProcess::VT_DETECTIONS_PER_CLASS, detections_per_class, 0); + } + void add_max_classes_per_detection(int64_t max_classes_per_detection) { + fbb_.AddElement(DetectionPostProcess::VT_MAX_CLASSES_PER_DETECTION, max_classes_per_detection, 0); + } + void add_num_classes(int64_t num_classes) { + fbb_.AddElement(DetectionPostProcess::VT_NUM_CLASSES, num_classes, 0); + } + void add_use_regular_nms(bool use_regular_nms) { + fbb_.AddElement(DetectionPostProcess::VT_USE_REGULAR_NMS, static_cast(use_regular_nms), 0); + } + void add_out_quantized(bool out_quantized) { + fbb_.AddElement(DetectionPostProcess::VT_OUT_QUANTIZED, static_cast(out_quantized), 0); + } + explicit DetectionPostProcessBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDetectionPostProcess( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + int64_t input_size = 0, + flatbuffers::Offset> scale = 0, + float nms_iou_threshold = 0.0f, + float nms_score_threshold = 0.0f, + int64_t max_detections = 0, + int64_t detections_per_class = 0, + int64_t max_classes_per_detection = 0, + int64_t num_classes = 0, + bool use_regular_nms = false, + bool out_quantized = false) { + DetectionPostProcessBuilder builder_(_fbb); + builder_.add_num_classes(num_classes); + builder_.add_max_classes_per_detection(max_classes_per_detection); + builder_.add_detections_per_class(detections_per_class); + builder_.add_max_detections(max_detections); + builder_.add_input_size(input_size); + builder_.add_nms_score_threshold(nms_score_threshold); + builder_.add_nms_iou_threshold(nms_iou_threshold); + builder_.add_scale(scale); + builder_.add_format(format); + builder_.add_out_quantized(out_quantized); + builder_.add_use_regular_nms(use_regular_nms); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateDetectionPostProcessDirect( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + int64_t input_size = 0, + const std::vector *scale = nullptr, + float nms_iou_threshold = 0.0f, + float nms_score_threshold = 0.0f, + int64_t max_detections = 0, + int64_t detections_per_class = 0, + int64_t max_classes_per_detection = 0, + int64_t num_classes = 0, + bool use_regular_nms = false, + bool out_quantized = false) { + auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; + return mindspore::schema::CreateDetectionPostProcess( + _fbb, + format, + input_size, + scale__, + nms_iou_threshold, + nms_score_threshold, + max_detections, + detections_per_class, + max_classes_per_detection, + num_classes, + use_regular_nms, + out_quantized); +} + +struct DivFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DivFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATION_TYPE = 4 + }; + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct DivFusionBuilder { + typedef DivFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(DivFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit DivFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDivFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + DivFusionBuilder builder_(_fbb); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct DivGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DivGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct DivGradBuilder { + typedef DivGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DivGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDivGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + DivGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Dropout FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DropoutBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEP_PROB = 4 + }; + float keep_prob() const { + return GetField(VT_KEEP_PROB, 0.5f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEP_PROB) && + verifier.EndTable(); + } +}; + +struct DropoutBuilder { + typedef Dropout Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_prob(float keep_prob) { + fbb_.AddElement(Dropout::VT_KEEP_PROB, keep_prob, 0.5f); + } + explicit DropoutBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDropout( + flatbuffers::FlatBufferBuilder &_fbb, + float keep_prob = 0.5f) { + DropoutBuilder builder_(_fbb); + builder_.add_keep_prob(keep_prob); + return builder_.Finish(); +} + +struct DropoutGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DropoutGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEP_PROB = 4 + }; + float keep_prob() const { + return GetField(VT_KEEP_PROB, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEP_PROB) && + verifier.EndTable(); + } +}; + +struct DropoutGradBuilder { + typedef DropoutGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_prob(float keep_prob) { + fbb_.AddElement(DropoutGrad::VT_KEEP_PROB, keep_prob, 0.0f); + } + explicit DropoutGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDropoutGrad( + flatbuffers::FlatBufferBuilder &_fbb, + float keep_prob = 0.0f) { + DropoutGradBuilder builder_(_fbb); + builder_.add_keep_prob(keep_prob); + return builder_.Finish(); +} + +struct Elu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EluBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALPHA = 4 + }; + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALPHA) && + verifier.EndTable(); + } +}; + +struct EluBuilder { + typedef Elu Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(Elu::VT_ALPHA, alpha, 0.0f); + } + explicit EluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateElu( + flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 0.0f) { + EluBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +struct Eltwise FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EltwiseBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MODE = 4 + }; + mindspore::schema::EltwiseMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MODE) && + verifier.EndTable(); + } +}; + +struct EltwiseBuilder { + typedef Eltwise Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(mindspore::schema::EltwiseMode mode) { + fbb_.AddElement(Eltwise::VT_MODE, static_cast(mode), 0); + } + explicit EltwiseBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEltwise( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::EltwiseMode mode = mindspore::schema::EltwiseMode_PROD) { + EltwiseBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +struct Equal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EqualBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct EqualBuilder { + typedef Equal Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit EqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + EqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct EmbeddingLookupFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EmbeddingLookupFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MAX_NORM = 4 + }; + float max_norm() const { + return GetField(VT_MAX_NORM, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MAX_NORM) && + verifier.EndTable(); + } +}; + +struct EmbeddingLookupFusionBuilder { + typedef EmbeddingLookupFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_max_norm(float max_norm) { + fbb_.AddElement(EmbeddingLookupFusion::VT_MAX_NORM, max_norm, 0.0f); + } + explicit EmbeddingLookupFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEmbeddingLookupFusion( + flatbuffers::FlatBufferBuilder &_fbb, + float max_norm = 0.0f) { + EmbeddingLookupFusionBuilder builder_(_fbb); + builder_.add_max_norm(max_norm); + return builder_.Finish(); +} + +struct ExpFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ExpFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BASE = 4, + VT_SCALE = 6, + VT_SHIFT = 8 + }; + float base() const { + return GetField(VT_BASE, -1.0f); + } + float scale() const { + return GetField(VT_SCALE, 1.0f); + } + float shift() const { + return GetField(VT_SHIFT, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BASE) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_SHIFT) && + verifier.EndTable(); + } +}; + +struct ExpFusionBuilder { + typedef ExpFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_base(float base) { + fbb_.AddElement(ExpFusion::VT_BASE, base, -1.0f); + } + void add_scale(float scale) { + fbb_.AddElement(ExpFusion::VT_SCALE, scale, 1.0f); + } + void add_shift(float shift) { + fbb_.AddElement(ExpFusion::VT_SHIFT, shift, 0.0f); + } + explicit ExpFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExpFusion( + flatbuffers::FlatBufferBuilder &_fbb, + float base = -1.0f, + float scale = 1.0f, + float shift = 0.0f) { + ExpFusionBuilder builder_(_fbb); + builder_.add_shift(shift); + builder_.add_scale(scale); + builder_.add_base(base); + return builder_.Finish(); +} + +struct ExpandDims FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ExpandDimsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ExpandDimsBuilder { + typedef ExpandDims Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpandDimsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExpandDims( + flatbuffers::FlatBufferBuilder &_fbb) { + ExpandDimsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FakeQuantWithMinMaxVars FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FakeQuantWithMinMaxVarsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_BITS = 4, + VT_NARROW_RANGE = 6 + }; + int64_t num_bits() const { + return GetField(VT_NUM_BITS, 0); + } + bool narrow_range() const { + return GetField(VT_NARROW_RANGE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_BITS) && + VerifyField(verifier, VT_NARROW_RANGE) && + verifier.EndTable(); + } +}; + +struct FakeQuantWithMinMaxVarsBuilder { + typedef FakeQuantWithMinMaxVars Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_bits(int64_t num_bits) { + fbb_.AddElement(FakeQuantWithMinMaxVars::VT_NUM_BITS, num_bits, 0); + } + void add_narrow_range(bool narrow_range) { + fbb_.AddElement(FakeQuantWithMinMaxVars::VT_NARROW_RANGE, static_cast(narrow_range), 0); + } + explicit FakeQuantWithMinMaxVarsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFakeQuantWithMinMaxVars( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t num_bits = 0, + bool narrow_range = false) { + FakeQuantWithMinMaxVarsBuilder builder_(_fbb); + builder_.add_num_bits(num_bits); + builder_.add_narrow_range(narrow_range); + return builder_.Finish(); +} + +struct FakeQuantWithMinMaxVarsPerChannel FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FakeQuantWithMinMaxVarsPerChannelBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_BITS = 4, + VT_NARROW_RANGE = 6 + }; + int64_t num_bits() const { + return GetField(VT_NUM_BITS, 0); + } + bool narrow_range() const { + return GetField(VT_NARROW_RANGE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_BITS) && + VerifyField(verifier, VT_NARROW_RANGE) && + verifier.EndTable(); + } +}; + +struct FakeQuantWithMinMaxVarsPerChannelBuilder { + typedef FakeQuantWithMinMaxVarsPerChannel Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_bits(int64_t num_bits) { + fbb_.AddElement(FakeQuantWithMinMaxVarsPerChannel::VT_NUM_BITS, num_bits, 0); + } + void add_narrow_range(bool narrow_range) { + fbb_.AddElement(FakeQuantWithMinMaxVarsPerChannel::VT_NARROW_RANGE, static_cast(narrow_range), 0); + } + explicit FakeQuantWithMinMaxVarsPerChannelBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFakeQuantWithMinMaxVarsPerChannel( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t num_bits = 0, + bool narrow_range = false) { + FakeQuantWithMinMaxVarsPerChannelBuilder builder_(_fbb); + builder_.add_num_bits(num_bits); + builder_.add_narrow_range(narrow_range); + return builder_.Finish(); +} + +struct FftReal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FftRealBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FftRealBuilder { + typedef FftReal Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FftRealBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFftReal( + flatbuffers::FlatBufferBuilder &_fbb) { + FftRealBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FftImag FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FftImagBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FftImagBuilder { + typedef FftImag Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FftImagBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFftImag( + flatbuffers::FlatBufferBuilder &_fbb) { + FftImagBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Flatten FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FlattenBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FlattenBuilder { + typedef Flatten Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FlattenBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFlatten( + flatbuffers::FlatBufferBuilder &_fbb) { + FlattenBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FlattenGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FlattenGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FlattenGradBuilder { + typedef FlattenGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FlattenGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFlattenGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + FlattenGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Floor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FloorBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FloorBuilder { + typedef Floor Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloor( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FloorDiv FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FloorDivBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FloorDivBuilder { + typedef FloorDiv Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorDivBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloorDiv( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorDivBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FloorMod FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FloorModBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FloorModBuilder { + typedef FloorMod Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorModBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloorMod( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorModBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Fill FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FillBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FillBuilder { + typedef Fill Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FillBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFill( + flatbuffers::FlatBufferBuilder &_fbb) { + FillBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FullConnection FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FullConnectionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_HAS_BIAS = 4, + VT_USE_AXIS = 6, + VT_AXIS = 8, + VT_ACTIVATION_TYPE = 10 + }; + bool has_bias() const { + return GetField(VT_HAS_BIAS, 0) != 0; + } + bool use_axis() const { + return GetField(VT_USE_AXIS, 0) != 0; + } + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_HAS_BIAS) && + VerifyField(verifier, VT_USE_AXIS) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct FullConnectionBuilder { + typedef FullConnection Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_has_bias(bool has_bias) { + fbb_.AddElement(FullConnection::VT_HAS_BIAS, static_cast(has_bias), 0); + } + void add_use_axis(bool use_axis) { + fbb_.AddElement(FullConnection::VT_USE_AXIS, static_cast(use_axis), 0); + } + void add_axis(int64_t axis) { + fbb_.AddElement(FullConnection::VT_AXIS, axis, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(FullConnection::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit FullConnectionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFullConnection( + flatbuffers::FlatBufferBuilder &_fbb, + bool has_bias = false, + bool use_axis = false, + int64_t axis = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + FullConnectionBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_activation_type(activation_type); + builder_.add_use_axis(use_axis); + builder_.add_has_bias(has_bias); + return builder_.Finish(); +} + +struct FusedBatchNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FusedBatchNormBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPSILON = 4, + VT_MOMENTUM = 6, + VT_MODE = 8 + }; + float epsilon() const { + return GetField(VT_EPSILON, 0.0001f); + } + float momentum() const { + return GetField(VT_MOMENTUM, 0.9f); + } + int64_t mode() const { + return GetField(VT_MODE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPSILON) && + VerifyField(verifier, VT_MOMENTUM) && + VerifyField(verifier, VT_MODE) && + verifier.EndTable(); + } +}; + +struct FusedBatchNormBuilder { + typedef FusedBatchNorm Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) { + fbb_.AddElement(FusedBatchNorm::VT_EPSILON, epsilon, 0.0001f); + } + void add_momentum(float momentum) { + fbb_.AddElement(FusedBatchNorm::VT_MOMENTUM, momentum, 0.9f); + } + void add_mode(int64_t mode) { + fbb_.AddElement(FusedBatchNorm::VT_MODE, mode, 0); + } + explicit FusedBatchNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFusedBatchNorm( + flatbuffers::FlatBufferBuilder &_fbb, + float epsilon = 0.0001f, + float momentum = 0.9f, + int64_t mode = 0) { + FusedBatchNormBuilder builder_(_fbb); + builder_.add_mode(mode); + builder_.add_momentum(momentum); + builder_.add_epsilon(epsilon); + return builder_.Finish(); +} + +struct Gather FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct GatherBuilder { + typedef Gather Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GatherBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGather( + flatbuffers::FlatBufferBuilder &_fbb) { + GatherBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GatherNd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherNdBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct GatherNdBuilder { + typedef GatherNd Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GatherNdBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGatherNd( + flatbuffers::FlatBufferBuilder &_fbb) { + GatherNdBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Greater FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GreaterBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct GreaterBuilder { + typedef Greater Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGreater( + flatbuffers::FlatBufferBuilder &_fbb) { + GreaterBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GreaterEqual FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GreaterEqualBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct GreaterEqualBuilder { + typedef GreaterEqual Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterEqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGreaterEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + GreaterEqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct HashtableLookup FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableLookupBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct HashtableLookupBuilder { + typedef HashtableLookup Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableLookupBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableLookup( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableLookupBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct InstanceNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef InstanceNormBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPSILON = 4 + }; + float epsilon() const { + return GetField(VT_EPSILON, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPSILON) && + verifier.EndTable(); + } +}; + +struct InstanceNormBuilder { + typedef InstanceNorm Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) { + fbb_.AddElement(InstanceNorm::VT_EPSILON, epsilon, 0.0f); + } + explicit InstanceNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateInstanceNorm( + flatbuffers::FlatBufferBuilder &_fbb, + float epsilon = 0.0f) { + InstanceNormBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + return builder_.Finish(); +} + +struct LayerNormFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LayerNormFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BEGIN_NORM_AXIS = 4, + VT_EPSILON = 6, + VT_ELEMENTWISE_AFFINE = 8, + VT_BEGIN_PARAMS_AXIS = 10 + }; + int64_t begin_norm_axis() const { + return GetField(VT_BEGIN_NORM_AXIS, 0); + } + float epsilon() const { + return GetField(VT_EPSILON, 0.00001f); + } + bool elementwise_affine() const { + return GetField(VT_ELEMENTWISE_AFFINE, 0) != 0; + } + int64_t begin_params_axis() const { + return GetField(VT_BEGIN_PARAMS_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BEGIN_NORM_AXIS) && + VerifyField(verifier, VT_EPSILON) && + VerifyField(verifier, VT_ELEMENTWISE_AFFINE) && + VerifyField(verifier, VT_BEGIN_PARAMS_AXIS) && + verifier.EndTable(); + } +}; + +struct LayerNormFusionBuilder { + typedef LayerNormFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_begin_norm_axis(int64_t begin_norm_axis) { + fbb_.AddElement(LayerNormFusion::VT_BEGIN_NORM_AXIS, begin_norm_axis, 0); + } + void add_epsilon(float epsilon) { + fbb_.AddElement(LayerNormFusion::VT_EPSILON, epsilon, 0.00001f); + } + void add_elementwise_affine(bool elementwise_affine) { + fbb_.AddElement(LayerNormFusion::VT_ELEMENTWISE_AFFINE, static_cast(elementwise_affine), 0); + } + void add_begin_params_axis(int64_t begin_params_axis) { + fbb_.AddElement(LayerNormFusion::VT_BEGIN_PARAMS_AXIS, begin_params_axis, 0); + } + explicit LayerNormFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLayerNormFusion( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t begin_norm_axis = 0, + float epsilon = 0.00001f, + bool elementwise_affine = false, + int64_t begin_params_axis = 0) { + LayerNormFusionBuilder builder_(_fbb); + builder_.add_begin_params_axis(begin_params_axis); + builder_.add_begin_norm_axis(begin_norm_axis); + builder_.add_epsilon(epsilon); + builder_.add_elementwise_affine(elementwise_affine); + return builder_.Finish(); +} + +struct LeakyRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LeakyReluBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NEGATIVE_SLOPE = 4 + }; + float negative_slope() const { + return GetField(VT_NEGATIVE_SLOPE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NEGATIVE_SLOPE) && + verifier.EndTable(); + } +}; + +struct LeakyReluBuilder { + typedef LeakyRelu Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_negative_slope(float negative_slope) { + fbb_.AddElement(LeakyRelu::VT_NEGATIVE_SLOPE, negative_slope, 0.0f); + } + explicit LeakyReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLeakyRelu( + flatbuffers::FlatBufferBuilder &_fbb, + float negative_slope = 0.0f) { + LeakyReluBuilder builder_(_fbb); + builder_.add_negative_slope(negative_slope); + return builder_.Finish(); +} + +struct Less FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LessBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LessBuilder { + typedef Less Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLess( + flatbuffers::FlatBufferBuilder &_fbb) { + LessBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LessEqual FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LessEqualBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LessEqualBuilder { + typedef LessEqual Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessEqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLessEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + LessEqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Log FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogBuilder { + typedef Log Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLog( + flatbuffers::FlatBufferBuilder &_fbb) { + LogBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogGradBuilder { + typedef LogGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + LogGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalAnd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogicalAndBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalAndBuilder { + typedef LogicalAnd Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalAndBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalAnd( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalAndBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalNot FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogicalNotBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalNotBuilder { + typedef LogicalNot Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalNotBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalNot( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalNotBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalOr FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogicalOrBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalOrBuilder { + typedef LogicalOr Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalOrBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalOr( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalOrBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LpNormalization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LpNormalizationBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_P = 6 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + int64_t p() const { + return GetField(VT_P, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_P) && + verifier.EndTable(); + } +}; + +struct LpNormalizationBuilder { + typedef LpNormalization Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(LpNormalization::VT_AXIS, axis, 0); + } + void add_p(int64_t p) { + fbb_.AddElement(LpNormalization::VT_P, p, 0); + } + explicit LpNormalizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLpNormalization( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + int64_t p = 0) { + LpNormalizationBuilder builder_(_fbb); + builder_.add_p(p); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct LRN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LRNBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DEPTH_RADIUS = 4, + VT_BIAS = 6, + VT_ALPHA = 8, + VT_BETA = 10, + VT_NORM_REGION = 12 + }; + int64_t depth_radius() const { + return GetField(VT_DEPTH_RADIUS, 0); + } + float bias() const { + return GetField(VT_BIAS, 0.0f); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + float beta() const { + return GetField(VT_BETA, 0.0f); + } + const flatbuffers::String *norm_region() const { + return GetPointer(VT_NORM_REGION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DEPTH_RADIUS) && + VerifyField(verifier, VT_BIAS) && + VerifyField(verifier, VT_ALPHA) && + VerifyField(verifier, VT_BETA) && + VerifyOffset(verifier, VT_NORM_REGION) && + verifier.VerifyString(norm_region()) && + verifier.EndTable(); + } +}; + +struct LRNBuilder { + typedef LRN Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_depth_radius(int64_t depth_radius) { + fbb_.AddElement(LRN::VT_DEPTH_RADIUS, depth_radius, 0); + } + void add_bias(float bias) { + fbb_.AddElement(LRN::VT_BIAS, bias, 0.0f); + } + void add_alpha(float alpha) { + fbb_.AddElement(LRN::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) { + fbb_.AddElement(LRN::VT_BETA, beta, 0.0f); + } + void add_norm_region(flatbuffers::Offset norm_region) { + fbb_.AddOffset(LRN::VT_NORM_REGION, norm_region); + } + explicit LRNBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLRN( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t depth_radius = 0, + float bias = 0.0f, + float alpha = 0.0f, + float beta = 0.0f, + flatbuffers::Offset norm_region = 0) { + LRNBuilder builder_(_fbb); + builder_.add_depth_radius(depth_radius); + builder_.add_norm_region(norm_region); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_bias(bias); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateLRNDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t depth_radius = 0, + float bias = 0.0f, + float alpha = 0.0f, + float beta = 0.0f, + const char *norm_region = nullptr) { + auto norm_region__ = norm_region ? _fbb.CreateString(norm_region) : 0; + return mindspore::schema::CreateLRN( + _fbb, + depth_radius, + bias, + alpha, + beta, + norm_region__); +} + +struct LshProjection FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LshProjectionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + mindspore::schema::LshProjectionType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } +}; + +struct LshProjectionBuilder { + typedef LshProjection Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(mindspore::schema::LshProjectionType type) { + fbb_.AddElement(LshProjection::VT_TYPE, static_cast(type), 0); + } + explicit LshProjectionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLshProjection( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::LshProjectionType type = mindspore::schema::LshProjectionType_UNKNOWN) { + LshProjectionBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +struct LSTM FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LSTMBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BIDIRECTIONAL = 4, + VT_HAS_BIAS = 6, + VT_INPUT_SIZE = 8, + VT_HIDDEN_SIZE = 10, + VT_NUM_LAYERS = 12, + VT_NUM_DIRECTIONS = 14, + VT_DROPOUT = 16, + VT_ZONEOUT_CELL = 18, + VT_ZONEOUT_HIDDEN = 20 + }; + bool bidirectional() const { + return GetField(VT_BIDIRECTIONAL, 0) != 0; + } + bool has_bias() const { + return GetField(VT_HAS_BIAS, 0) != 0; + } + int64_t input_size() const { + return GetField(VT_INPUT_SIZE, 0); + } + int64_t hidden_size() const { + return GetField(VT_HIDDEN_SIZE, 0); + } + int64_t num_layers() const { + return GetField(VT_NUM_LAYERS, 0); + } + int64_t num_directions() const { + return GetField(VT_NUM_DIRECTIONS, 0); + } + float dropout() const { + return GetField(VT_DROPOUT, 0.0f); + } + float zoneout_cell() const { + return GetField(VT_ZONEOUT_CELL, 0.0f); + } + float zoneout_hidden() const { + return GetField(VT_ZONEOUT_HIDDEN, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BIDIRECTIONAL) && + VerifyField(verifier, VT_HAS_BIAS) && + VerifyField(verifier, VT_INPUT_SIZE) && + VerifyField(verifier, VT_HIDDEN_SIZE) && + VerifyField(verifier, VT_NUM_LAYERS) && + VerifyField(verifier, VT_NUM_DIRECTIONS) && + VerifyField(verifier, VT_DROPOUT) && + VerifyField(verifier, VT_ZONEOUT_CELL) && + VerifyField(verifier, VT_ZONEOUT_HIDDEN) && + verifier.EndTable(); + } +}; + +struct LSTMBuilder { + typedef LSTM Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_bidirectional(bool bidirectional) { + fbb_.AddElement(LSTM::VT_BIDIRECTIONAL, static_cast(bidirectional), 0); + } + void add_has_bias(bool has_bias) { + fbb_.AddElement(LSTM::VT_HAS_BIAS, static_cast(has_bias), 0); + } + void add_input_size(int64_t input_size) { + fbb_.AddElement(LSTM::VT_INPUT_SIZE, input_size, 0); + } + void add_hidden_size(int64_t hidden_size) { + fbb_.AddElement(LSTM::VT_HIDDEN_SIZE, hidden_size, 0); + } + void add_num_layers(int64_t num_layers) { + fbb_.AddElement(LSTM::VT_NUM_LAYERS, num_layers, 0); + } + void add_num_directions(int64_t num_directions) { + fbb_.AddElement(LSTM::VT_NUM_DIRECTIONS, num_directions, 0); + } + void add_dropout(float dropout) { + fbb_.AddElement(LSTM::VT_DROPOUT, dropout, 0.0f); + } + void add_zoneout_cell(float zoneout_cell) { + fbb_.AddElement(LSTM::VT_ZONEOUT_CELL, zoneout_cell, 0.0f); + } + void add_zoneout_hidden(float zoneout_hidden) { + fbb_.AddElement(LSTM::VT_ZONEOUT_HIDDEN, zoneout_hidden, 0.0f); + } + explicit LSTMBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLSTM( + flatbuffers::FlatBufferBuilder &_fbb, + bool bidirectional = false, + bool has_bias = false, + int64_t input_size = 0, + int64_t hidden_size = 0, + int64_t num_layers = 0, + int64_t num_directions = 0, + float dropout = 0.0f, + float zoneout_cell = 0.0f, + float zoneout_hidden = 0.0f) { + LSTMBuilder builder_(_fbb); + builder_.add_num_directions(num_directions); + builder_.add_num_layers(num_layers); + builder_.add_hidden_size(hidden_size); + builder_.add_input_size(input_size); + builder_.add_zoneout_hidden(zoneout_hidden); + builder_.add_zoneout_cell(zoneout_cell); + builder_.add_dropout(dropout); + builder_.add_has_bias(has_bias); + builder_.add_bidirectional(bidirectional); + return builder_.Finish(); +} + +struct LSTMGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LSTMGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BIDIRECTIONAL = 4, + VT_HAS_BIAS = 6, + VT_INPUT_SIZE = 8, + VT_HIDDEN_SIZE = 10, + VT_NUM_LAYERS = 12, + VT_NUM_DIRECTIONS = 14, + VT_DROPOUT = 16, + VT_ZONEOUT_CELL = 18, + VT_ZONEOUT_HIDDEN = 20 + }; + bool bidirectional() const { + return GetField(VT_BIDIRECTIONAL, 0) != 0; + } + bool has_bias() const { + return GetField(VT_HAS_BIAS, 0) != 0; + } + int64_t input_size() const { + return GetField(VT_INPUT_SIZE, 0); + } + int64_t hidden_size() const { + return GetField(VT_HIDDEN_SIZE, 0); + } + int64_t num_layers() const { + return GetField(VT_NUM_LAYERS, 0); + } + int64_t num_directions() const { + return GetField(VT_NUM_DIRECTIONS, 0); + } + float dropout() const { + return GetField(VT_DROPOUT, 0.0f); + } + float zoneout_cell() const { + return GetField(VT_ZONEOUT_CELL, 0.0f); + } + float zoneout_hidden() const { + return GetField(VT_ZONEOUT_HIDDEN, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BIDIRECTIONAL) && + VerifyField(verifier, VT_HAS_BIAS) && + VerifyField(verifier, VT_INPUT_SIZE) && + VerifyField(verifier, VT_HIDDEN_SIZE) && + VerifyField(verifier, VT_NUM_LAYERS) && + VerifyField(verifier, VT_NUM_DIRECTIONS) && + VerifyField(verifier, VT_DROPOUT) && + VerifyField(verifier, VT_ZONEOUT_CELL) && + VerifyField(verifier, VT_ZONEOUT_HIDDEN) && + verifier.EndTable(); + } +}; + +struct LSTMGradBuilder { + typedef LSTMGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_bidirectional(bool bidirectional) { + fbb_.AddElement(LSTMGrad::VT_BIDIRECTIONAL, static_cast(bidirectional), 0); + } + void add_has_bias(bool has_bias) { + fbb_.AddElement(LSTMGrad::VT_HAS_BIAS, static_cast(has_bias), 0); + } + void add_input_size(int64_t input_size) { + fbb_.AddElement(LSTMGrad::VT_INPUT_SIZE, input_size, 0); + } + void add_hidden_size(int64_t hidden_size) { + fbb_.AddElement(LSTMGrad::VT_HIDDEN_SIZE, hidden_size, 0); + } + void add_num_layers(int64_t num_layers) { + fbb_.AddElement(LSTMGrad::VT_NUM_LAYERS, num_layers, 0); + } + void add_num_directions(int64_t num_directions) { + fbb_.AddElement(LSTMGrad::VT_NUM_DIRECTIONS, num_directions, 0); + } + void add_dropout(float dropout) { + fbb_.AddElement(LSTMGrad::VT_DROPOUT, dropout, 0.0f); + } + void add_zoneout_cell(float zoneout_cell) { + fbb_.AddElement(LSTMGrad::VT_ZONEOUT_CELL, zoneout_cell, 0.0f); + } + void add_zoneout_hidden(float zoneout_hidden) { + fbb_.AddElement(LSTMGrad::VT_ZONEOUT_HIDDEN, zoneout_hidden, 0.0f); + } + explicit LSTMGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLSTMGrad( + flatbuffers::FlatBufferBuilder &_fbb, + bool bidirectional = false, + bool has_bias = false, + int64_t input_size = 0, + int64_t hidden_size = 0, + int64_t num_layers = 0, + int64_t num_directions = 0, + float dropout = 0.0f, + float zoneout_cell = 0.0f, + float zoneout_hidden = 0.0f) { + LSTMGradBuilder builder_(_fbb); + builder_.add_num_directions(num_directions); + builder_.add_num_layers(num_layers); + builder_.add_hidden_size(hidden_size); + builder_.add_input_size(input_size); + builder_.add_zoneout_hidden(zoneout_hidden); + builder_.add_zoneout_cell(zoneout_cell); + builder_.add_dropout(dropout); + builder_.add_has_bias(has_bias); + builder_.add_bidirectional(bidirectional); + return builder_.Finish(); +} + +struct L2NormalizeFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef L2NormalizeFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_EPSILON = 6, + VT_ACTIVATION_TYPE = 8 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + float epsilon() const { + return GetField(VT_EPSILON, 0.0f); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + VerifyField(verifier, VT_EPSILON) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct L2NormalizeFusionBuilder { + typedef L2NormalizeFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(L2NormalizeFusion::VT_AXIS, axis); + } + void add_epsilon(float epsilon) { + fbb_.AddElement(L2NormalizeFusion::VT_EPSILON, epsilon, 0.0f); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(L2NormalizeFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit L2NormalizeFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateL2NormalizeFusion( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0, + float epsilon = 0.0f, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + L2NormalizeFusionBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + builder_.add_axis(axis); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateL2NormalizeFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr, + float epsilon = 0.0f, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateL2NormalizeFusion( + _fbb, + axis__, + epsilon, + activation_type); +} + +struct MatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MatMulBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TRANSPOSE_A = 4, + VT_TRANSPOSE_B = 6 + }; + bool transpose_a() const { + return GetField(VT_TRANSPOSE_A, 0) != 0; + } + bool transpose_b() const { + return GetField(VT_TRANSPOSE_B, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TRANSPOSE_A) && + VerifyField(verifier, VT_TRANSPOSE_B) && + verifier.EndTable(); + } +}; + +struct MatMulBuilder { + typedef MatMul Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_transpose_a(bool transpose_a) { + fbb_.AddElement(MatMul::VT_TRANSPOSE_A, static_cast(transpose_a), 0); + } + void add_transpose_b(bool transpose_b) { + fbb_.AddElement(MatMul::VT_TRANSPOSE_B, static_cast(transpose_b), 0); + } + explicit MatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMatMul( + flatbuffers::FlatBufferBuilder &_fbb, + bool transpose_a = false, + bool transpose_b = false) { + MatMulBuilder builder_(_fbb); + builder_.add_transpose_b(transpose_b); + builder_.add_transpose_a(transpose_a); + return builder_.Finish(); +} + +struct Maximum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MaximumBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MaximumBuilder { + typedef Maximum Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MaximumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMaximum( + flatbuffers::FlatBufferBuilder &_fbb) { + MaximumBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MaximumGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MaximumGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_GRAD_X = 4, + VT_GRAD_Y = 6 + }; + bool grad_x() const { + return GetField(VT_GRAD_X, 0) != 0; + } + bool grad_y() const { + return GetField(VT_GRAD_Y, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_GRAD_X) && + VerifyField(verifier, VT_GRAD_Y) && + verifier.EndTable(); + } +}; + +struct MaximumGradBuilder { + typedef MaximumGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_grad_x(bool grad_x) { + fbb_.AddElement(MaximumGrad::VT_GRAD_X, static_cast(grad_x), 0); + } + void add_grad_y(bool grad_y) { + fbb_.AddElement(MaximumGrad::VT_GRAD_Y, static_cast(grad_y), 0); + } + explicit MaximumGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMaximumGrad( + flatbuffers::FlatBufferBuilder &_fbb, + bool grad_x = false, + bool grad_y = false) { + MaximumGradBuilder builder_(_fbb); + builder_.add_grad_y(grad_y); + builder_.add_grad_x(grad_x); + return builder_.Finish(); +} + +struct MaxPoolFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MaxPoolFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KERNEL_SIZE = 4, + VT_STRIDES = 6, + VT_PAD = 8, + VT_PAD_MODE = 10, + VT_ROUND_MODE = 12, + VT_FORMAT = 14, + VT_GLOBAL = 16, + VT_ACTIVATION_TYPE = 18 + }; + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + const flatbuffers::Vector *pad() const { + return GetPointer *>(VT_PAD); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + mindspore::schema::RoundMode round_mode() const { + return static_cast(GetField(VT_ROUND_MODE, 0)); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool global() const { + return GetField(VT_GLOBAL, 0) != 0; + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyOffset(verifier, VT_PAD) && + verifier.VerifyVector(pad()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyField(verifier, VT_ROUND_MODE) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_GLOBAL) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct MaxPoolFusionBuilder { + typedef MaxPoolFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(MaxPoolFusion::VT_KERNEL_SIZE, kernel_size); + } + void add_strides(flatbuffers::Offset> strides) { + fbb_.AddOffset(MaxPoolFusion::VT_STRIDES, strides); + } + void add_pad(flatbuffers::Offset> pad) { + fbb_.AddOffset(MaxPoolFusion::VT_PAD, pad); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(MaxPoolFusion::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_round_mode(mindspore::schema::RoundMode round_mode) { + fbb_.AddElement(MaxPoolFusion::VT_ROUND_MODE, static_cast(round_mode), 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(MaxPoolFusion::VT_FORMAT, static_cast(format), 0); + } + void add_global(bool global) { + fbb_.AddElement(MaxPoolFusion::VT_GLOBAL, static_cast(global), 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(MaxPoolFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit MaxPoolFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMaxPoolFusion( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> strides = 0, + flatbuffers::Offset> pad = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::RoundMode round_mode = mindspore::schema::RoundMode_FLOOR, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + bool global = false, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + MaxPoolFusionBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_pad(pad); + builder_.add_strides(strides); + builder_.add_kernel_size(kernel_size); + builder_.add_activation_type(activation_type); + builder_.add_global(global); + builder_.add_round_mode(round_mode); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMaxPoolFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *kernel_size = nullptr, + const std::vector *strides = nullptr, + const std::vector *pad = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::RoundMode round_mode = mindspore::schema::RoundMode_FLOOR, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + bool global = false, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + auto pad__ = pad ? _fbb.CreateVector(*pad) : 0; + return mindspore::schema::CreateMaxPoolFusion( + _fbb, + kernel_size__, + strides__, + pad__, + pad_mode, + round_mode, + format, + global, + activation_type); +} + +struct MaxPoolGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MaxPoolGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KERNEL_SIZE = 4, + VT_STRIDES = 6, + VT_PAD_MODE = 8, + VT_FORMAT = 10 + }; + const flatbuffers::Vector *kernel_size() const { + return GetPointer *>(VT_KERNEL_SIZE); + } + const flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + mindspore::schema::PadMode pad_mode() const { + return static_cast(GetField(VT_PAD_MODE, 0)); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_KERNEL_SIZE) && + verifier.VerifyVector(kernel_size()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyField(verifier, VT_PAD_MODE) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct MaxPoolGradBuilder { + typedef MaxPoolGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_kernel_size(flatbuffers::Offset> kernel_size) { + fbb_.AddOffset(MaxPoolGrad::VT_KERNEL_SIZE, kernel_size); + } + void add_strides(flatbuffers::Offset> strides) { + fbb_.AddOffset(MaxPoolGrad::VT_STRIDES, strides); + } + void add_pad_mode(mindspore::schema::PadMode pad_mode) { + fbb_.AddElement(MaxPoolGrad::VT_PAD_MODE, static_cast(pad_mode), 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(MaxPoolGrad::VT_FORMAT, static_cast(format), 0); + } + explicit MaxPoolGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMaxPoolGrad( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> kernel_size = 0, + flatbuffers::Offset> strides = 0, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + MaxPoolGradBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_strides(strides); + builder_.add_kernel_size(kernel_size); + builder_.add_pad_mode(pad_mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMaxPoolGradDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *kernel_size = nullptr, + const std::vector *strides = nullptr, + mindspore::schema::PadMode pad_mode = mindspore::schema::PadMode_PAD, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + auto kernel_size__ = kernel_size ? _fbb.CreateVector(*kernel_size) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + return mindspore::schema::CreateMaxPoolGrad( + _fbb, + kernel_size__, + strides__, + pad_mode, + format); +} + +struct Merge FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MergeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MergeBuilder { + typedef Merge Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MergeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMerge( + flatbuffers::FlatBufferBuilder &_fbb) { + MergeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Mfcc FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MfccBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FREQ_UPPER_LIMIT = 4, + VT_FREQ_LOWER_LIMIT = 6, + VT_FILTER_BANK_CHANNEL_NUM = 8, + VT_DCT_COEFF_NUM = 10 + }; + float freq_upper_limit() const { + return GetField(VT_FREQ_UPPER_LIMIT, 0.0f); + } + float freq_lower_limit() const { + return GetField(VT_FREQ_LOWER_LIMIT, 0.0f); + } + int64_t filter_bank_channel_num() const { + return GetField(VT_FILTER_BANK_CHANNEL_NUM, 0); + } + int64_t dct_coeff_num() const { + return GetField(VT_DCT_COEFF_NUM, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FREQ_UPPER_LIMIT) && + VerifyField(verifier, VT_FREQ_LOWER_LIMIT) && + VerifyField(verifier, VT_FILTER_BANK_CHANNEL_NUM) && + VerifyField(verifier, VT_DCT_COEFF_NUM) && + verifier.EndTable(); + } +}; + +struct MfccBuilder { + typedef Mfcc Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_freq_upper_limit(float freq_upper_limit) { + fbb_.AddElement(Mfcc::VT_FREQ_UPPER_LIMIT, freq_upper_limit, 0.0f); + } + void add_freq_lower_limit(float freq_lower_limit) { + fbb_.AddElement(Mfcc::VT_FREQ_LOWER_LIMIT, freq_lower_limit, 0.0f); + } + void add_filter_bank_channel_num(int64_t filter_bank_channel_num) { + fbb_.AddElement(Mfcc::VT_FILTER_BANK_CHANNEL_NUM, filter_bank_channel_num, 0); + } + void add_dct_coeff_num(int64_t dct_coeff_num) { + fbb_.AddElement(Mfcc::VT_DCT_COEFF_NUM, dct_coeff_num, 0); + } + explicit MfccBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMfcc( + flatbuffers::FlatBufferBuilder &_fbb, + float freq_upper_limit = 0.0f, + float freq_lower_limit = 0.0f, + int64_t filter_bank_channel_num = 0, + int64_t dct_coeff_num = 0) { + MfccBuilder builder_(_fbb); + builder_.add_dct_coeff_num(dct_coeff_num); + builder_.add_filter_bank_channel_num(filter_bank_channel_num); + builder_.add_freq_lower_limit(freq_lower_limit); + builder_.add_freq_upper_limit(freq_upper_limit); + return builder_.Finish(); +} + +struct Minimum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MinimumBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MinimumBuilder { + typedef Minimum Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MinimumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMinimum( + flatbuffers::FlatBufferBuilder &_fbb) { + MinimumBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MinimumGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MinimumGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_GRAD_X = 4, + VT_GRAD_Y = 6 + }; + bool grad_x() const { + return GetField(VT_GRAD_X, 0) != 0; + } + bool grad_y() const { + return GetField(VT_GRAD_Y, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_GRAD_X) && + VerifyField(verifier, VT_GRAD_Y) && + verifier.EndTable(); + } +}; + +struct MinimumGradBuilder { + typedef MinimumGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_grad_x(bool grad_x) { + fbb_.AddElement(MinimumGrad::VT_GRAD_X, static_cast(grad_x), 0); + } + void add_grad_y(bool grad_y) { + fbb_.AddElement(MinimumGrad::VT_GRAD_Y, static_cast(grad_y), 0); + } + explicit MinimumGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMinimumGrad( + flatbuffers::FlatBufferBuilder &_fbb, + bool grad_x = false, + bool grad_y = false) { + MinimumGradBuilder builder_(_fbb); + builder_.add_grad_y(grad_y); + builder_.add_grad_x(grad_x); + return builder_.Finish(); +} + +struct Mod FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ModBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ModBuilder { + typedef Mod Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ModBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMod( + flatbuffers::FlatBufferBuilder &_fbb) { + ModBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MulFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MulFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATION_TYPE = 4 + }; + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct MulFusionBuilder { + typedef MulFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(MulFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit MulFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMulFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + MulFusionBuilder builder_(_fbb); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct MulGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MulGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MulGradBuilder { + typedef MulGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MulGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMulGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + MulGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Neg FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NegBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct NegBuilder { + typedef Neg Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NegBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNeg( + flatbuffers::FlatBufferBuilder &_fbb) { + NegBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NegGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NegGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct NegGradBuilder { + typedef NegGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NegGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNegGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + NegGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NotEqual FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NotEqualBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct NotEqualBuilder { + typedef NotEqual Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NotEqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNotEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + NotEqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NonMaxSuppression FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NonMaxSuppressionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CENTER_POINT_BOX = 4 + }; + int64_t center_point_box() const { + return GetField(VT_CENTER_POINT_BOX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_CENTER_POINT_BOX) && + verifier.EndTable(); + } +}; + +struct NonMaxSuppressionBuilder { + typedef NonMaxSuppression Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_center_point_box(int64_t center_point_box) { + fbb_.AddElement(NonMaxSuppression::VT_CENTER_POINT_BOX, center_point_box, 0); + } + explicit NonMaxSuppressionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNonMaxSuppression( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t center_point_box = 0) { + NonMaxSuppressionBuilder builder_(_fbb); + builder_.add_center_point_box(center_point_box); + return builder_.Finish(); +} + +struct OneHot FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OneHotBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct OneHotBuilder { + typedef OneHot Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(OneHot::VT_AXIS, axis, 0); + } + explicit OneHotBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOneHot( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0) { + OneHotBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct OnesLike FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OnesLikeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct OnesLikeBuilder { + typedef OnesLike Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit OnesLikeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOnesLike( + flatbuffers::FlatBufferBuilder &_fbb) { + OnesLikeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct PadFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PadFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDINGS = 4, + VT_PADDING_MODE = 6, + VT_CONSTANT_VALUE = 8 + }; + const mindspore::schema::Vec2D *paddings() const { + return GetPointer(VT_PADDINGS); + } + mindspore::schema::PaddingMode padding_mode() const { + return static_cast(GetField(VT_PADDING_MODE, 0)); + } + float constant_value() const { + return GetField(VT_CONSTANT_VALUE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_PADDINGS) && + verifier.VerifyTable(paddings()) && + VerifyField(verifier, VT_PADDING_MODE) && + VerifyField(verifier, VT_CONSTANT_VALUE) && + verifier.EndTable(); + } +}; + +struct PadFusionBuilder { + typedef PadFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_paddings(flatbuffers::Offset paddings) { + fbb_.AddOffset(PadFusion::VT_PADDINGS, paddings); + } + void add_padding_mode(mindspore::schema::PaddingMode padding_mode) { + fbb_.AddElement(PadFusion::VT_PADDING_MODE, static_cast(padding_mode), 0); + } + void add_constant_value(float constant_value) { + fbb_.AddElement(PadFusion::VT_CONSTANT_VALUE, constant_value, 0.0f); + } + explicit PadFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePadFusion( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset paddings = 0, + mindspore::schema::PaddingMode padding_mode = mindspore::schema::PaddingMode_CONSTANT, + float constant_value = 0.0f) { + PadFusionBuilder builder_(_fbb); + builder_.add_constant_value(constant_value); + builder_.add_paddings(paddings); + builder_.add_padding_mode(padding_mode); + return builder_.Finish(); +} + +struct PartialFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PartialFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SUB_GRAPH_INDEX = 4 + }; + int64_t sub_graph_index() const { + return GetField(VT_SUB_GRAPH_INDEX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SUB_GRAPH_INDEX) && + verifier.EndTable(); + } +}; + +struct PartialFusionBuilder { + typedef PartialFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_sub_graph_index(int64_t sub_graph_index) { + fbb_.AddElement(PartialFusion::VT_SUB_GRAPH_INDEX, sub_graph_index, 0); + } + explicit PartialFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePartialFusion( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t sub_graph_index = 0) { + PartialFusionBuilder builder_(_fbb); + builder_.add_sub_graph_index(sub_graph_index); + return builder_.Finish(); +} + +struct PowerGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PowerGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_POWER = 4, + VT_SCALE = 6, + VT_SHIFT = 8 + }; + float power() const { + return GetField(VT_POWER, 0.0f); + } + float scale() const { + return GetField(VT_SCALE, 0.0f); + } + float shift() const { + return GetField(VT_SHIFT, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_POWER) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_SHIFT) && + verifier.EndTable(); + } +}; + +struct PowerGradBuilder { + typedef PowerGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_power(float power) { + fbb_.AddElement(PowerGrad::VT_POWER, power, 0.0f); + } + void add_scale(float scale) { + fbb_.AddElement(PowerGrad::VT_SCALE, scale, 0.0f); + } + void add_shift(float shift) { + fbb_.AddElement(PowerGrad::VT_SHIFT, shift, 0.0f); + } + explicit PowerGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePowerGrad( + flatbuffers::FlatBufferBuilder &_fbb, + float power = 0.0f, + float scale = 0.0f, + float shift = 0.0f) { + PowerGradBuilder builder_(_fbb); + builder_.add_shift(shift); + builder_.add_scale(scale); + builder_.add_power(power); + return builder_.Finish(); +} + +struct PowFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PowFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SCALE = 4, + VT_SHIFT = 6 + }; + float scale() const { + return GetField(VT_SCALE, 1.0f); + } + float shift() const { + return GetField(VT_SHIFT, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_SHIFT) && + verifier.EndTable(); + } +}; + +struct PowFusionBuilder { + typedef PowFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_scale(float scale) { + fbb_.AddElement(PowFusion::VT_SCALE, scale, 1.0f); + } + void add_shift(float shift) { + fbb_.AddElement(PowFusion::VT_SHIFT, shift, 0.0f); + } + explicit PowFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePowFusion( + flatbuffers::FlatBufferBuilder &_fbb, + float scale = 1.0f, + float shift = 0.0f) { + PowFusionBuilder builder_(_fbb); + builder_.add_shift(shift); + builder_.add_scale(scale); + return builder_.Finish(); +} + +struct PriorBox FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PriorBoxBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MIN_SIZES = 4, + VT_MAX_SIZES = 6, + VT_ASPECT_RATIOS = 8, + VT_VARIANCES = 10, + VT_IMAGE_SIZE_W = 12, + VT_IMAGE_SIZE_H = 14, + VT_STEP_W = 16, + VT_STEP_H = 18, + VT_CLIP = 20, + VT_FLIP = 22, + VT_OFFSET = 24 + }; + const flatbuffers::Vector *min_sizes() const { + return GetPointer *>(VT_MIN_SIZES); + } + const flatbuffers::Vector *max_sizes() const { + return GetPointer *>(VT_MAX_SIZES); + } + const flatbuffers::Vector *aspect_ratios() const { + return GetPointer *>(VT_ASPECT_RATIOS); + } + const flatbuffers::Vector *variances() const { + return GetPointer *>(VT_VARIANCES); + } + int64_t image_size_w() const { + return GetField(VT_IMAGE_SIZE_W, 0); + } + int64_t image_size_h() const { + return GetField(VT_IMAGE_SIZE_H, 0); + } + float step_w() const { + return GetField(VT_STEP_W, 0.0f); + } + float step_h() const { + return GetField(VT_STEP_H, 0.0f); + } + bool clip() const { + return GetField(VT_CLIP, 0) != 0; + } + bool flip() const { + return GetField(VT_FLIP, 0) != 0; + } + float offset() const { + return GetField(VT_OFFSET, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_MIN_SIZES) && + verifier.VerifyVector(min_sizes()) && + VerifyOffset(verifier, VT_MAX_SIZES) && + verifier.VerifyVector(max_sizes()) && + VerifyOffset(verifier, VT_ASPECT_RATIOS) && + verifier.VerifyVector(aspect_ratios()) && + VerifyOffset(verifier, VT_VARIANCES) && + verifier.VerifyVector(variances()) && + VerifyField(verifier, VT_IMAGE_SIZE_W) && + VerifyField(verifier, VT_IMAGE_SIZE_H) && + VerifyField(verifier, VT_STEP_W) && + VerifyField(verifier, VT_STEP_H) && + VerifyField(verifier, VT_CLIP) && + VerifyField(verifier, VT_FLIP) && + VerifyField(verifier, VT_OFFSET) && + verifier.EndTable(); + } +}; + +struct PriorBoxBuilder { + typedef PriorBox Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min_sizes(flatbuffers::Offset> min_sizes) { + fbb_.AddOffset(PriorBox::VT_MIN_SIZES, min_sizes); + } + void add_max_sizes(flatbuffers::Offset> max_sizes) { + fbb_.AddOffset(PriorBox::VT_MAX_SIZES, max_sizes); + } + void add_aspect_ratios(flatbuffers::Offset> aspect_ratios) { + fbb_.AddOffset(PriorBox::VT_ASPECT_RATIOS, aspect_ratios); + } + void add_variances(flatbuffers::Offset> variances) { + fbb_.AddOffset(PriorBox::VT_VARIANCES, variances); + } + void add_image_size_w(int64_t image_size_w) { + fbb_.AddElement(PriorBox::VT_IMAGE_SIZE_W, image_size_w, 0); + } + void add_image_size_h(int64_t image_size_h) { + fbb_.AddElement(PriorBox::VT_IMAGE_SIZE_H, image_size_h, 0); + } + void add_step_w(float step_w) { + fbb_.AddElement(PriorBox::VT_STEP_W, step_w, 0.0f); + } + void add_step_h(float step_h) { + fbb_.AddElement(PriorBox::VT_STEP_H, step_h, 0.0f); + } + void add_clip(bool clip) { + fbb_.AddElement(PriorBox::VT_CLIP, static_cast(clip), 0); + } + void add_flip(bool flip) { + fbb_.AddElement(PriorBox::VT_FLIP, static_cast(flip), 0); + } + void add_offset(float offset) { + fbb_.AddElement(PriorBox::VT_OFFSET, offset, 0.0f); + } + explicit PriorBoxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePriorBox( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> min_sizes = 0, + flatbuffers::Offset> max_sizes = 0, + flatbuffers::Offset> aspect_ratios = 0, + flatbuffers::Offset> variances = 0, + int64_t image_size_w = 0, + int64_t image_size_h = 0, + float step_w = 0.0f, + float step_h = 0.0f, + bool clip = false, + bool flip = false, + float offset = 0.0f) { + PriorBoxBuilder builder_(_fbb); + builder_.add_image_size_h(image_size_h); + builder_.add_image_size_w(image_size_w); + builder_.add_offset(offset); + builder_.add_step_h(step_h); + builder_.add_step_w(step_w); + builder_.add_variances(variances); + builder_.add_aspect_ratios(aspect_ratios); + builder_.add_max_sizes(max_sizes); + builder_.add_min_sizes(min_sizes); + builder_.add_flip(flip); + builder_.add_clip(clip); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePriorBoxDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *min_sizes = nullptr, + const std::vector *max_sizes = nullptr, + const std::vector *aspect_ratios = nullptr, + const std::vector *variances = nullptr, + int64_t image_size_w = 0, + int64_t image_size_h = 0, + float step_w = 0.0f, + float step_h = 0.0f, + bool clip = false, + bool flip = false, + float offset = 0.0f) { + auto min_sizes__ = min_sizes ? _fbb.CreateVector(*min_sizes) : 0; + auto max_sizes__ = max_sizes ? _fbb.CreateVector(*max_sizes) : 0; + auto aspect_ratios__ = aspect_ratios ? _fbb.CreateVector(*aspect_ratios) : 0; + auto variances__ = variances ? _fbb.CreateVector(*variances) : 0; + return mindspore::schema::CreatePriorBox( + _fbb, + min_sizes__, + max_sizes__, + aspect_ratios__, + variances__, + image_size_w, + image_size_h, + step_w, + step_h, + clip, + flip, + offset); +} + +struct PReLUFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PReLUFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CHANNEL_SHARED = 4 + }; + bool channel_shared() const { + return GetField(VT_CHANNEL_SHARED, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_CHANNEL_SHARED) && + verifier.EndTable(); + } +}; + +struct PReLUFusionBuilder { + typedef PReLUFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_channel_shared(bool channel_shared) { + fbb_.AddElement(PReLUFusion::VT_CHANNEL_SHARED, static_cast(channel_shared), 0); + } + explicit PReLUFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePReLUFusion( + flatbuffers::FlatBufferBuilder &_fbb, + bool channel_shared = false) { + PReLUFusionBuilder builder_(_fbb); + builder_.add_channel_shared(channel_shared); + return builder_.Finish(); +} + +struct Rank FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RankBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RankBuilder { + typedef Rank Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRank( + flatbuffers::FlatBufferBuilder &_fbb) { + RankBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Range FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RangeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_D_TYPE = 4, + VT_START = 6, + VT_LIMIT = 8, + VT_DELTA = 10 + }; + int64_t d_type() const { + return GetField(VT_D_TYPE, 0); + } + int64_t start() const { + return GetField(VT_START, 0); + } + int64_t limit() const { + return GetField(VT_LIMIT, 0); + } + int64_t delta() const { + return GetField(VT_DELTA, 1LL); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_D_TYPE) && + VerifyField(verifier, VT_START) && + VerifyField(verifier, VT_LIMIT) && + VerifyField(verifier, VT_DELTA) && + verifier.EndTable(); + } +}; + +struct RangeBuilder { + typedef Range Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_d_type(int64_t d_type) { + fbb_.AddElement(Range::VT_D_TYPE, d_type, 0); + } + void add_start(int64_t start) { + fbb_.AddElement(Range::VT_START, start, 0); + } + void add_limit(int64_t limit) { + fbb_.AddElement(Range::VT_LIMIT, limit, 0); + } + void add_delta(int64_t delta) { + fbb_.AddElement(Range::VT_DELTA, delta, 1LL); + } + explicit RangeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRange( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t d_type = 0, + int64_t start = 0, + int64_t limit = 0, + int64_t delta = 1LL) { + RangeBuilder builder_(_fbb); + builder_.add_delta(delta); + builder_.add_limit(limit); + builder_.add_start(start); + builder_.add_d_type(d_type); + return builder_.Finish(); +} + +struct Reciprocal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReciprocalBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ReciprocalBuilder { + typedef Reciprocal Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReciprocalBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReciprocal( + flatbuffers::FlatBufferBuilder &_fbb) { + ReciprocalBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct RealDiv FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RealDivBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RealDivBuilder { + typedef RealDiv Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RealDivBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRealDiv( + flatbuffers::FlatBufferBuilder &_fbb) { + RealDivBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReduceFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReduceFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEP_DIMS = 4, + VT_MODE = 6, + VT_REDUCE_TO_END = 8, + VT_COEFF = 10 + }; + bool keep_dims() const { + return GetField(VT_KEEP_DIMS, 0) != 0; + } + mindspore::schema::ReduceMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + bool reduce_to_end() const { + return GetField(VT_REDUCE_TO_END, 0) != 0; + } + float coeff() const { + return GetField(VT_COEFF, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEP_DIMS) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_REDUCE_TO_END) && + VerifyField(verifier, VT_COEFF) && + verifier.EndTable(); + } +}; + +struct ReduceFusionBuilder { + typedef ReduceFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_dims(bool keep_dims) { + fbb_.AddElement(ReduceFusion::VT_KEEP_DIMS, static_cast(keep_dims), 0); + } + void add_mode(mindspore::schema::ReduceMode mode) { + fbb_.AddElement(ReduceFusion::VT_MODE, static_cast(mode), 0); + } + void add_reduce_to_end(bool reduce_to_end) { + fbb_.AddElement(ReduceFusion::VT_REDUCE_TO_END, static_cast(reduce_to_end), 0); + } + void add_coeff(float coeff) { + fbb_.AddElement(ReduceFusion::VT_COEFF, coeff, 0.0f); + } + explicit ReduceFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReduceFusion( + flatbuffers::FlatBufferBuilder &_fbb, + bool keep_dims = false, + mindspore::schema::ReduceMode mode = mindspore::schema::ReduceMode_ReduceMean, + bool reduce_to_end = false, + float coeff = 0.0f) { + ReduceFusionBuilder builder_(_fbb); + builder_.add_coeff(coeff); + builder_.add_reduce_to_end(reduce_to_end); + builder_.add_mode(mode); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +struct Reshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReshapeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ReshapeBuilder { + typedef Reshape Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReshape( + flatbuffers::FlatBufferBuilder &_fbb) { + ReshapeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Resize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ResizeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_METHOD = 6, + VT_NEW_HEIGHT = 8, + VT_NEW_WIDTH = 10, + VT_PRESERVE_ASPECT_RATIO = 12, + VT_COORDINATE_TRANSFORM_MODE = 14, + VT_CUBIC_COEFF = 16, + VT_EXCLUDE_OUTSIDE = 18, + VT_EXTRAPOLATION_VALUE = 20, + VT_NEAREST_MODE = 22 + }; + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + mindspore::schema::ResizeMethod method() const { + return static_cast(GetField(VT_METHOD, 0)); + } + int64_t new_height() const { + return GetField(VT_NEW_HEIGHT, 0); + } + int64_t new_width() const { + return GetField(VT_NEW_WIDTH, 0); + } + bool preserve_aspect_ratio() const { + return GetField(VT_PRESERVE_ASPECT_RATIO, 0) != 0; + } + mindspore::schema::CoordinateTransformMode coordinate_transform_mode() const { + return static_cast(GetField(VT_COORDINATE_TRANSFORM_MODE, 0)); + } + float cubic_coeff() const { + return GetField(VT_CUBIC_COEFF, 0.0f); + } + int64_t exclude_outside() const { + return GetField(VT_EXCLUDE_OUTSIDE, 0); + } + float extrapolation_value() const { + return GetField(VT_EXTRAPOLATION_VALUE, 0.0f); + } + mindspore::schema::NearestMode nearest_mode() const { + return static_cast(GetField(VT_NEAREST_MODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_METHOD) && + VerifyField(verifier, VT_NEW_HEIGHT) && + VerifyField(verifier, VT_NEW_WIDTH) && + VerifyField(verifier, VT_PRESERVE_ASPECT_RATIO) && + VerifyField(verifier, VT_COORDINATE_TRANSFORM_MODE) && + VerifyField(verifier, VT_CUBIC_COEFF) && + VerifyField(verifier, VT_EXCLUDE_OUTSIDE) && + VerifyField(verifier, VT_EXTRAPOLATION_VALUE) && + VerifyField(verifier, VT_NEAREST_MODE) && + verifier.EndTable(); + } +}; + +struct ResizeBuilder { + typedef Resize Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(Resize::VT_FORMAT, static_cast(format), 0); + } + void add_method(mindspore::schema::ResizeMethod method) { + fbb_.AddElement(Resize::VT_METHOD, static_cast(method), 0); + } + void add_new_height(int64_t new_height) { + fbb_.AddElement(Resize::VT_NEW_HEIGHT, new_height, 0); + } + void add_new_width(int64_t new_width) { + fbb_.AddElement(Resize::VT_NEW_WIDTH, new_width, 0); + } + void add_preserve_aspect_ratio(bool preserve_aspect_ratio) { + fbb_.AddElement(Resize::VT_PRESERVE_ASPECT_RATIO, static_cast(preserve_aspect_ratio), 0); + } + void add_coordinate_transform_mode(mindspore::schema::CoordinateTransformMode coordinate_transform_mode) { + fbb_.AddElement(Resize::VT_COORDINATE_TRANSFORM_MODE, static_cast(coordinate_transform_mode), 0); + } + void add_cubic_coeff(float cubic_coeff) { + fbb_.AddElement(Resize::VT_CUBIC_COEFF, cubic_coeff, 0.0f); + } + void add_exclude_outside(int64_t exclude_outside) { + fbb_.AddElement(Resize::VT_EXCLUDE_OUTSIDE, exclude_outside, 0); + } + void add_extrapolation_value(float extrapolation_value) { + fbb_.AddElement(Resize::VT_EXTRAPOLATION_VALUE, extrapolation_value, 0.0f); + } + void add_nearest_mode(mindspore::schema::NearestMode nearest_mode) { + fbb_.AddElement(Resize::VT_NEAREST_MODE, static_cast(nearest_mode), 0); + } + explicit ResizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResize( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::Format format = mindspore::schema::Format_NCHW, + mindspore::schema::ResizeMethod method = mindspore::schema::ResizeMethod_LINEAR, + int64_t new_height = 0, + int64_t new_width = 0, + bool preserve_aspect_ratio = false, + mindspore::schema::CoordinateTransformMode coordinate_transform_mode = mindspore::schema::CoordinateTransformMode_ASYMMETRIC, + float cubic_coeff = 0.0f, + int64_t exclude_outside = 0, + float extrapolation_value = 0.0f, + mindspore::schema::NearestMode nearest_mode = mindspore::schema::NearestMode_NORMAL) { + ResizeBuilder builder_(_fbb); + builder_.add_exclude_outside(exclude_outside); + builder_.add_new_width(new_width); + builder_.add_new_height(new_height); + builder_.add_extrapolation_value(extrapolation_value); + builder_.add_cubic_coeff(cubic_coeff); + builder_.add_format(format); + builder_.add_nearest_mode(nearest_mode); + builder_.add_coordinate_transform_mode(coordinate_transform_mode); + builder_.add_preserve_aspect_ratio(preserve_aspect_ratio); + builder_.add_method(method); + return builder_.Finish(); +} + +struct ReverseSequence FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseSequenceBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SEQ_DIM = 4, + VT_BATCH_DIM = 6 + }; + int64_t seq_dim() const { + return GetField(VT_SEQ_DIM, 0); + } + int64_t batch_dim() const { + return GetField(VT_BATCH_DIM, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SEQ_DIM) && + VerifyField(verifier, VT_BATCH_DIM) && + verifier.EndTable(); + } +}; + +struct ReverseSequenceBuilder { + typedef ReverseSequence Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seq_dim(int64_t seq_dim) { + fbb_.AddElement(ReverseSequence::VT_SEQ_DIM, seq_dim, 0); + } + void add_batch_dim(int64_t batch_dim) { + fbb_.AddElement(ReverseSequence::VT_BATCH_DIM, batch_dim, 0); + } + explicit ReverseSequenceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReverseSequence( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t seq_dim = 0, + int64_t batch_dim = 0) { + ReverseSequenceBuilder builder_(_fbb); + builder_.add_batch_dim(batch_dim); + builder_.add_seq_dim(seq_dim); + return builder_.Finish(); +} + +struct ReverseV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseV2Builder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct ReverseV2Builder { + typedef ReverseV2 Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(ReverseV2::VT_AXIS, axis); + } + explicit ReverseV2Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReverseV2( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + ReverseV2Builder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReverseV2Direct( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateReverseV2( + _fbb, + axis__); +} + +struct Rfft FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RfftBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FFT_LENGTH = 4 + }; + int64_t fft_length() const { + return GetField(VT_FFT_LENGTH, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FFT_LENGTH) && + verifier.EndTable(); + } +}; + +struct RfftBuilder { + typedef Rfft Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fft_length(int64_t fft_length) { + fbb_.AddElement(Rfft::VT_FFT_LENGTH, fft_length, 0); + } + explicit RfftBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRfft( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t fft_length = 0) { + RfftBuilder builder_(_fbb); + builder_.add_fft_length(fft_length); + return builder_.Finish(); +} + +struct ROIPooling FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ROIPoolingBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_POOLED_H = 4, + VT_POOLED_W = 6, + VT_SCALE = 8 + }; + int64_t pooled_h() const { + return GetField(VT_POOLED_H, 0); + } + int64_t pooled_w() const { + return GetField(VT_POOLED_W, 0); + } + float scale() const { + return GetField(VT_SCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_POOLED_H) && + VerifyField(verifier, VT_POOLED_W) && + VerifyField(verifier, VT_SCALE) && + verifier.EndTable(); + } +}; + +struct ROIPoolingBuilder { + typedef ROIPooling Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_pooled_h(int64_t pooled_h) { + fbb_.AddElement(ROIPooling::VT_POOLED_H, pooled_h, 0); + } + void add_pooled_w(int64_t pooled_w) { + fbb_.AddElement(ROIPooling::VT_POOLED_W, pooled_w, 0); + } + void add_scale(float scale) { + fbb_.AddElement(ROIPooling::VT_SCALE, scale, 0.0f); + } + explicit ROIPoolingBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateROIPooling( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t pooled_h = 0, + int64_t pooled_w = 0, + float scale = 0.0f) { + ROIPoolingBuilder builder_(_fbb); + builder_.add_pooled_w(pooled_w); + builder_.add_pooled_h(pooled_h); + builder_.add_scale(scale); + return builder_.Finish(); +} + +struct Round FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RoundBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RoundBuilder { + typedef Round Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RoundBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRound( + flatbuffers::FlatBufferBuilder &_fbb) { + RoundBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Rsqrt FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RsqrtBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RsqrtBuilder { + typedef Rsqrt Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RsqrtBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRsqrt( + flatbuffers::FlatBufferBuilder &_fbb) { + RsqrtBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct QuantDTypeCast FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantDTypeCastBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SRC_T = 4, + VT_DST_T = 6 + }; + int64_t src_t() const { + return GetField(VT_SRC_T, 0); + } + int64_t dst_t() const { + return GetField(VT_DST_T, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SRC_T) && + VerifyField(verifier, VT_DST_T) && + verifier.EndTable(); + } +}; + +struct QuantDTypeCastBuilder { + typedef QuantDTypeCast Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_src_t(int64_t src_t) { + fbb_.AddElement(QuantDTypeCast::VT_SRC_T, src_t, 0); + } + void add_dst_t(int64_t dst_t) { + fbb_.AddElement(QuantDTypeCast::VT_DST_T, dst_t, 0); + } + explicit QuantDTypeCastBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantDTypeCast( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t src_t = 0, + int64_t dst_t = 0) { + QuantDTypeCastBuilder builder_(_fbb); + builder_.add_dst_t(dst_t); + builder_.add_src_t(src_t); + return builder_.Finish(); +} + +struct ScaleFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ScaleFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_ACTIVATION_TYPE = 6 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct ScaleFusionBuilder { + typedef ScaleFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(ScaleFusion::VT_AXIS, axis, 0); + } + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(ScaleFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit ScaleFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateScaleFusion( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + ScaleFusionBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct ScatterNd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ScatterNdBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ScatterNdBuilder { + typedef ScatterNd Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ScatterNdBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateScatterNd( + flatbuffers::FlatBufferBuilder &_fbb) { + ScatterNdBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SGD FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SGDBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NESTEROV = 4, + VT_DAMPENING = 6, + VT_WEIGHT_DECAY = 8 + }; + bool nesterov() const { + return GetField(VT_NESTEROV, 0) != 0; + } + float dampening() const { + return GetField(VT_DAMPENING, 0.0f); + } + float weight_decay() const { + return GetField(VT_WEIGHT_DECAY, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NESTEROV) && + VerifyField(verifier, VT_DAMPENING) && + VerifyField(verifier, VT_WEIGHT_DECAY) && + verifier.EndTable(); + } +}; + +struct SGDBuilder { + typedef SGD Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_nesterov(bool nesterov) { + fbb_.AddElement(SGD::VT_NESTEROV, static_cast(nesterov), 0); + } + void add_dampening(float dampening) { + fbb_.AddElement(SGD::VT_DAMPENING, dampening, 0.0f); + } + void add_weight_decay(float weight_decay) { + fbb_.AddElement(SGD::VT_WEIGHT_DECAY, weight_decay, 0.0f); + } + explicit SGDBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSGD( + flatbuffers::FlatBufferBuilder &_fbb, + bool nesterov = false, + float dampening = 0.0f, + float weight_decay = 0.0f) { + SGDBuilder builder_(_fbb); + builder_.add_weight_decay(weight_decay); + builder_.add_dampening(dampening); + builder_.add_nesterov(nesterov); + return builder_.Finish(); +} + +struct Shape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ShapeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ShapeBuilder { + typedef Shape Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ShapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateShape( + flatbuffers::FlatBufferBuilder &_fbb) { + ShapeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SigmoidCrossEntropyWithLogits FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SigmoidCrossEntropyWithLogitsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SigmoidCrossEntropyWithLogitsBuilder { + typedef SigmoidCrossEntropyWithLogits Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SigmoidCrossEntropyWithLogitsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSigmoidCrossEntropyWithLogits( + flatbuffers::FlatBufferBuilder &_fbb) { + SigmoidCrossEntropyWithLogitsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SigmoidCrossEntropyWithLogitsGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SigmoidCrossEntropyWithLogitsGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SigmoidCrossEntropyWithLogitsGradBuilder { + typedef SigmoidCrossEntropyWithLogitsGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SigmoidCrossEntropyWithLogitsGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSigmoidCrossEntropyWithLogitsGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + SigmoidCrossEntropyWithLogitsGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Sin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SinBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SinBuilder { + typedef Sin Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSin( + flatbuffers::FlatBufferBuilder &_fbb) { + SinBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SkipGram FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SkipGramBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INCLUDE_ALL_GRAMS = 4, + VT_MAX_SKIP_SIZE = 6, + VT_NGRAM_SIZE = 8 + }; + bool include_all_grams() const { + return GetField(VT_INCLUDE_ALL_GRAMS, 0) != 0; + } + int64_t max_skip_size() const { + return GetField(VT_MAX_SKIP_SIZE, 0); + } + int64_t ngram_size() const { + return GetField(VT_NGRAM_SIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_INCLUDE_ALL_GRAMS) && + VerifyField(verifier, VT_MAX_SKIP_SIZE) && + VerifyField(verifier, VT_NGRAM_SIZE) && + verifier.EndTable(); + } +}; + +struct SkipGramBuilder { + typedef SkipGram Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_include_all_grams(bool include_all_grams) { + fbb_.AddElement(SkipGram::VT_INCLUDE_ALL_GRAMS, static_cast(include_all_grams), 0); + } + void add_max_skip_size(int64_t max_skip_size) { + fbb_.AddElement(SkipGram::VT_MAX_SKIP_SIZE, max_skip_size, 0); + } + void add_ngram_size(int64_t ngram_size) { + fbb_.AddElement(SkipGram::VT_NGRAM_SIZE, ngram_size, 0); + } + explicit SkipGramBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSkipGram( + flatbuffers::FlatBufferBuilder &_fbb, + bool include_all_grams = false, + int64_t max_skip_size = 0, + int64_t ngram_size = 0) { + SkipGramBuilder builder_(_fbb); + builder_.add_ngram_size(ngram_size); + builder_.add_max_skip_size(max_skip_size); + builder_.add_include_all_grams(include_all_grams); + return builder_.Finish(); +} + +struct SliceFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SliceFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXES = 4 + }; + const flatbuffers::Vector *axes() const { + return GetPointer *>(VT_AXES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXES) && + verifier.VerifyVector(axes()) && + verifier.EndTable(); + } +}; + +struct SliceFusionBuilder { + typedef SliceFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axes(flatbuffers::Offset> axes) { + fbb_.AddOffset(SliceFusion::VT_AXES, axes); + } + explicit SliceFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSliceFusion( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axes = 0) { + SliceFusionBuilder builder_(_fbb); + builder_.add_axes(axes); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSliceFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axes = nullptr) { + auto axes__ = axes ? _fbb.CreateVector(*axes) : 0; + return mindspore::schema::CreateSliceFusion( + _fbb, + axes__); +} + +struct SmoothL1Loss FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SmoothL1LossBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BETA = 4 + }; + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BETA) && + verifier.EndTable(); + } +}; + +struct SmoothL1LossBuilder { + typedef SmoothL1Loss Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beta(float beta) { + fbb_.AddElement(SmoothL1Loss::VT_BETA, beta, 0.0f); + } + explicit SmoothL1LossBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSmoothL1Loss( + flatbuffers::FlatBufferBuilder &_fbb, + float beta = 0.0f) { + SmoothL1LossBuilder builder_(_fbb); + builder_.add_beta(beta); + return builder_.Finish(); +} + +struct SmoothL1LossGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SmoothL1LossGradBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BETA = 4 + }; + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BETA) && + verifier.EndTable(); + } +}; + +struct SmoothL1LossGradBuilder { + typedef SmoothL1LossGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beta(float beta) { + fbb_.AddElement(SmoothL1LossGrad::VT_BETA, beta, 0.0f); + } + explicit SmoothL1LossGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSmoothL1LossGrad( + flatbuffers::FlatBufferBuilder &_fbb, + float beta = 0.0f) { + SmoothL1LossGradBuilder builder_(_fbb); + builder_.add_beta(beta); + return builder_.Finish(); +} + +struct Softmax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SoftmaxBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct SoftmaxBuilder { + typedef Softmax Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(Softmax::VT_AXIS, axis); + } + explicit SoftmaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSoftmax( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + SoftmaxBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSoftmaxDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateSoftmax( + _fbb, + axis__); +} + +struct SoftmaxCrossEntropyWithLogits FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SoftmaxCrossEntropyWithLogitsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SoftmaxCrossEntropyWithLogitsBuilder { + typedef SoftmaxCrossEntropyWithLogits Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SoftmaxCrossEntropyWithLogitsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSoftmaxCrossEntropyWithLogits( + flatbuffers::FlatBufferBuilder &_fbb) { + SoftmaxCrossEntropyWithLogitsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SpaceToBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SpaceToBatchBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4, + VT_PADDINGS = 6 + }; + const flatbuffers::Vector *block_size() const { + return GetPointer *>(VT_BLOCK_SIZE); + } + const mindspore::schema::Vec2D *paddings() const { + return GetPointer(VT_PADDINGS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCK_SIZE) && + verifier.VerifyVector(block_size()) && + VerifyOffset(verifier, VT_PADDINGS) && + verifier.VerifyTable(paddings()) && + verifier.EndTable(); + } +}; + +struct SpaceToBatchBuilder { + typedef SpaceToBatch Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(flatbuffers::Offset> block_size) { + fbb_.AddOffset(SpaceToBatch::VT_BLOCK_SIZE, block_size); + } + void add_paddings(flatbuffers::Offset paddings) { + fbb_.AddOffset(SpaceToBatch::VT_PADDINGS, paddings); + } + explicit SpaceToBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToBatch( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> block_size = 0, + flatbuffers::Offset paddings = 0) { + SpaceToBatchBuilder builder_(_fbb); + builder_.add_paddings(paddings); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSpaceToBatchDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *block_size = nullptr, + flatbuffers::Offset paddings = 0) { + auto block_size__ = block_size ? _fbb.CreateVector(*block_size) : 0; + return mindspore::schema::CreateSpaceToBatch( + _fbb, + block_size__, + paddings); +} + +struct SpaceToBatchND FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SpaceToBatchNDBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SHAPE = 4, + VT_PADDINGS = 6 + }; + const flatbuffers::Vector *block_shape() const { + return GetPointer *>(VT_BLOCK_SHAPE); + } + const mindspore::schema::Vec2D *paddings() const { + return GetPointer(VT_PADDINGS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCK_SHAPE) && + verifier.VerifyVector(block_shape()) && + VerifyOffset(verifier, VT_PADDINGS) && + verifier.VerifyTable(paddings()) && + verifier.EndTable(); + } +}; + +struct SpaceToBatchNDBuilder { + typedef SpaceToBatchND Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_shape(flatbuffers::Offset> block_shape) { + fbb_.AddOffset(SpaceToBatchND::VT_BLOCK_SHAPE, block_shape); + } + void add_paddings(flatbuffers::Offset paddings) { + fbb_.AddOffset(SpaceToBatchND::VT_PADDINGS, paddings); + } + explicit SpaceToBatchNDBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToBatchND( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> block_shape = 0, + flatbuffers::Offset paddings = 0) { + SpaceToBatchNDBuilder builder_(_fbb); + builder_.add_paddings(paddings); + builder_.add_block_shape(block_shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSpaceToBatchNDDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *block_shape = nullptr, + flatbuffers::Offset paddings = 0) { + auto block_shape__ = block_shape ? _fbb.CreateVector(*block_shape) : 0; + return mindspore::schema::CreateSpaceToBatchND( + _fbb, + block_shape__, + paddings); +} + +struct SpaceToDepth FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SpaceToDepthBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4, + VT_FORMAT = 6 + }; + int64_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); + } + mindspore::schema::Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct SpaceToDepthBuilder { + typedef SpaceToDepth Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int64_t block_size) { + fbb_.AddElement(SpaceToDepth::VT_BLOCK_SIZE, block_size, 0); + } + void add_format(mindspore::schema::Format format) { + fbb_.AddElement(SpaceToDepth::VT_FORMAT, static_cast(format), 0); + } + explicit SpaceToDepthBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToDepth( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t block_size = 0, + mindspore::schema::Format format = mindspore::schema::Format_NCHW) { + SpaceToDepthBuilder builder_(_fbb); + builder_.add_block_size(block_size); + builder_.add_format(format); + return builder_.Finish(); +} + +struct SparseSoftmaxCrossEntropyWithLogits FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SparseSoftmaxCrossEntropyWithLogitsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IS_GRAD = 4 + }; + bool is_grad() const { + return GetField(VT_IS_GRAD, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IS_GRAD) && + verifier.EndTable(); + } +}; + +struct SparseSoftmaxCrossEntropyWithLogitsBuilder { + typedef SparseSoftmaxCrossEntropyWithLogits Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_is_grad(bool is_grad) { + fbb_.AddElement(SparseSoftmaxCrossEntropyWithLogits::VT_IS_GRAD, static_cast(is_grad), 0); + } + explicit SparseSoftmaxCrossEntropyWithLogitsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSparseSoftmaxCrossEntropyWithLogits( + flatbuffers::FlatBufferBuilder &_fbb, + bool is_grad = false) { + SparseSoftmaxCrossEntropyWithLogitsBuilder builder_(_fbb); + builder_.add_is_grad(is_grad); + return builder_.Finish(); +} + +struct SparseToDense FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SparseToDenseBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SparseToDenseBuilder { + typedef SparseToDense Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SparseToDenseBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSparseToDense( + flatbuffers::FlatBufferBuilder &_fbb) { + SparseToDenseBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Split FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SplitBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT_NUM = 4, + VT_SIZE_SPLITS = 6, + VT_AXIS = 8 + }; + int64_t output_num() const { + return GetField(VT_OUTPUT_NUM, 0); + } + const flatbuffers::Vector *size_splits() const { + return GetPointer *>(VT_SIZE_SPLITS); + } + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUT_NUM) && + VerifyOffset(verifier, VT_SIZE_SPLITS) && + verifier.VerifyVector(size_splits()) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct SplitBuilder { + typedef Split Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_num(int64_t output_num) { + fbb_.AddElement(Split::VT_OUTPUT_NUM, output_num, 0); + } + void add_size_splits(flatbuffers::Offset> size_splits) { + fbb_.AddOffset(Split::VT_SIZE_SPLITS, size_splits); + } + void add_axis(int64_t axis) { + fbb_.AddElement(Split::VT_AXIS, axis, 0); + } + explicit SplitBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSplit( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t output_num = 0, + flatbuffers::Offset> size_splits = 0, + int64_t axis = 0) { + SplitBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_output_num(output_num); + builder_.add_size_splits(size_splits); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSplitDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t output_num = 0, + const std::vector *size_splits = nullptr, + int64_t axis = 0) { + auto size_splits__ = size_splits ? _fbb.CreateVector(*size_splits) : 0; + return mindspore::schema::CreateSplit( + _fbb, + output_num, + size_splits__, + axis); +} + +struct Sqrt FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SqrtBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SqrtBuilder { + typedef Sqrt Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SqrtBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSqrt( + flatbuffers::FlatBufferBuilder &_fbb) { + SqrtBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Squeeze FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SqueezeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct SqueezeBuilder { + typedef Squeeze Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(Squeeze::VT_AXIS, axis); + } + explicit SqueezeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSqueeze( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + SqueezeBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSqueezeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateSqueeze( + _fbb, + axis__); +} + +struct Square FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SquareBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SquareBuilder { + typedef Square Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquareBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSquare( + flatbuffers::FlatBufferBuilder &_fbb) { + SquareBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SquaredDifference FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SquaredDifferenceBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SquaredDifferenceBuilder { + typedef SquaredDifference Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquaredDifferenceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSquaredDifference( + flatbuffers::FlatBufferBuilder &_fbb) { + SquaredDifferenceBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Stack FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef StackBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct StackBuilder { + typedef Stack Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(Stack::VT_AXIS, axis, 0); + } + explicit StackBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStack( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0) { + StackBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct StridedSlice FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef StridedSliceBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BEGIN_MASK = 4, + VT_END_MASK = 6, + VT_ELLIPSIS_MASK = 8, + VT_NEW_AXIS_MASK = 10, + VT_SHRINK_AXIS_MASK = 12 + }; + int64_t begin_mask() const { + return GetField(VT_BEGIN_MASK, 0); + } + int64_t end_mask() const { + return GetField(VT_END_MASK, 0); + } + int64_t ellipsis_mask() const { + return GetField(VT_ELLIPSIS_MASK, 0); + } + int64_t new_axis_mask() const { + return GetField(VT_NEW_AXIS_MASK, 0); + } + int64_t shrink_axis_mask() const { + return GetField(VT_SHRINK_AXIS_MASK, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BEGIN_MASK) && + VerifyField(verifier, VT_END_MASK) && + VerifyField(verifier, VT_ELLIPSIS_MASK) && + VerifyField(verifier, VT_NEW_AXIS_MASK) && + VerifyField(verifier, VT_SHRINK_AXIS_MASK) && + verifier.EndTable(); + } +}; + +struct StridedSliceBuilder { + typedef StridedSlice Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_begin_mask(int64_t begin_mask) { + fbb_.AddElement(StridedSlice::VT_BEGIN_MASK, begin_mask, 0); + } + void add_end_mask(int64_t end_mask) { + fbb_.AddElement(StridedSlice::VT_END_MASK, end_mask, 0); + } + void add_ellipsis_mask(int64_t ellipsis_mask) { + fbb_.AddElement(StridedSlice::VT_ELLIPSIS_MASK, ellipsis_mask, 0); + } + void add_new_axis_mask(int64_t new_axis_mask) { + fbb_.AddElement(StridedSlice::VT_NEW_AXIS_MASK, new_axis_mask, 0); + } + void add_shrink_axis_mask(int64_t shrink_axis_mask) { + fbb_.AddElement(StridedSlice::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); + } + explicit StridedSliceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStridedSlice( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t begin_mask = 0, + int64_t end_mask = 0, + int64_t ellipsis_mask = 0, + int64_t new_axis_mask = 0, + int64_t shrink_axis_mask = 0) { + StridedSliceBuilder builder_(_fbb); + builder_.add_shrink_axis_mask(shrink_axis_mask); + builder_.add_new_axis_mask(new_axis_mask); + builder_.add_ellipsis_mask(ellipsis_mask); + builder_.add_end_mask(end_mask); + builder_.add_begin_mask(begin_mask); + return builder_.Finish(); +} + +struct SubFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SubFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATION_TYPE = 4 + }; + mindspore::schema::ActivationType activation_type() const { + return static_cast(GetField(VT_ACTIVATION_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATION_TYPE) && + verifier.EndTable(); + } +}; + +struct SubFusionBuilder { + typedef SubFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activation_type(mindspore::schema::ActivationType activation_type) { + fbb_.AddElement(SubFusion::VT_ACTIVATION_TYPE, static_cast(activation_type), 0); + } + explicit SubFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSubFusion( + flatbuffers::FlatBufferBuilder &_fbb, + mindspore::schema::ActivationType activation_type = mindspore::schema::ActivationType_NO_ACTIVATION) { + SubFusionBuilder builder_(_fbb); + builder_.add_activation_type(activation_type); + return builder_.Finish(); +} + +struct SubGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SubGradBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SubGradBuilder { + typedef SubGrad Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SubGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSubGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + SubGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Switch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SwitchBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SwitchBuilder { + typedef Switch Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SwitchBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSwitch( + flatbuffers::FlatBufferBuilder &_fbb) { + SwitchBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TensorListFromTensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorListFromTensorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ELEMENT_DTYPE = 4, + VT_SHAPE_TYPE = 6 + }; + int64_t element_dtype() const { + return GetField(VT_ELEMENT_DTYPE, 0); + } + int64_t shape_type() const { + return GetField(VT_SHAPE_TYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ELEMENT_DTYPE) && + VerifyField(verifier, VT_SHAPE_TYPE) && + verifier.EndTable(); + } +}; + +struct TensorListFromTensorBuilder { + typedef TensorListFromTensor Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_element_dtype(int64_t element_dtype) { + fbb_.AddElement(TensorListFromTensor::VT_ELEMENT_DTYPE, element_dtype, 0); + } + void add_shape_type(int64_t shape_type) { + fbb_.AddElement(TensorListFromTensor::VT_SHAPE_TYPE, shape_type, 0); + } + explicit TensorListFromTensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorListFromTensor( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t element_dtype = 0, + int64_t shape_type = 0) { + TensorListFromTensorBuilder builder_(_fbb); + builder_.add_shape_type(shape_type); + builder_.add_element_dtype(element_dtype); + return builder_.Finish(); +} + +struct TensorListGetItem FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorListGetItemBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ELEMENT_DTYPE = 4 + }; + int64_t element_dtype() const { + return GetField(VT_ELEMENT_DTYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ELEMENT_DTYPE) && + verifier.EndTable(); + } +}; + +struct TensorListGetItemBuilder { + typedef TensorListGetItem Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_element_dtype(int64_t element_dtype) { + fbb_.AddElement(TensorListGetItem::VT_ELEMENT_DTYPE, element_dtype, 0); + } + explicit TensorListGetItemBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorListGetItem( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t element_dtype = 0) { + TensorListGetItemBuilder builder_(_fbb); + builder_.add_element_dtype(element_dtype); + return builder_.Finish(); +} + +struct TensorListReserve FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorListReserveBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ELEMENT_DTYPE = 4, + VT_SHAPE_TYPE = 6 + }; + int64_t element_dtype() const { + return GetField(VT_ELEMENT_DTYPE, 0); + } + int64_t shape_type() const { + return GetField(VT_SHAPE_TYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ELEMENT_DTYPE) && + VerifyField(verifier, VT_SHAPE_TYPE) && + verifier.EndTable(); + } +}; + +struct TensorListReserveBuilder { + typedef TensorListReserve Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_element_dtype(int64_t element_dtype) { + fbb_.AddElement(TensorListReserve::VT_ELEMENT_DTYPE, element_dtype, 0); + } + void add_shape_type(int64_t shape_type) { + fbb_.AddElement(TensorListReserve::VT_SHAPE_TYPE, shape_type, 0); + } + explicit TensorListReserveBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorListReserve( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t element_dtype = 0, + int64_t shape_type = 0) { + TensorListReserveBuilder builder_(_fbb); + builder_.add_shape_type(shape_type); + builder_.add_element_dtype(element_dtype); + return builder_.Finish(); +} + +struct TensorListSetItem FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorListSetItemBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ELEMENT_DTYPE = 4 + }; + int64_t element_dtype() const { + return GetField(VT_ELEMENT_DTYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ELEMENT_DTYPE) && + verifier.EndTable(); + } +}; + +struct TensorListSetItemBuilder { + typedef TensorListSetItem Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_element_dtype(int64_t element_dtype) { + fbb_.AddElement(TensorListSetItem::VT_ELEMENT_DTYPE, element_dtype, 0); + } + explicit TensorListSetItemBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorListSetItem( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t element_dtype = 0) { + TensorListSetItemBuilder builder_(_fbb); + builder_.add_element_dtype(element_dtype); + return builder_.Finish(); +} + +struct TensorListStack FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorListStackBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_ELEMENTS = 4, + VT_ELEMENT_DTYPE = 6 + }; + int64_t num_elements() const { + return GetField(VT_NUM_ELEMENTS, 0); + } + int64_t element_dtype() const { + return GetField(VT_ELEMENT_DTYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_ELEMENTS) && + VerifyField(verifier, VT_ELEMENT_DTYPE) && + verifier.EndTable(); + } +}; + +struct TensorListStackBuilder { + typedef TensorListStack Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_elements(int64_t num_elements) { + fbb_.AddElement(TensorListStack::VT_NUM_ELEMENTS, num_elements, 0); + } + void add_element_dtype(int64_t element_dtype) { + fbb_.AddElement(TensorListStack::VT_ELEMENT_DTYPE, element_dtype, 0); + } + explicit TensorListStackBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorListStack( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t num_elements = 0, + int64_t element_dtype = 0) { + TensorListStackBuilder builder_(_fbb); + builder_.add_element_dtype(element_dtype); + builder_.add_num_elements(num_elements); + return builder_.Finish(); +} + +struct TileFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TileFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + verifier.EndTable(); + } +}; + +struct TileFusionBuilder { + typedef TileFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(TileFusion::VT_DIMS, dims); + } + explicit TileFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTileFusion( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0) { + TileFusionBuilder builder_(_fbb); + builder_.add_dims(dims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTileFusionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + return mindspore::schema::CreateTileFusion( + _fbb, + dims__); +} + +struct TopKFusion FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TopKFusionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SORTED = 4, + VT_AXIS = 6, + VT_LARGEST = 8 + }; + bool sorted() const { + return GetField(VT_SORTED, 1) != 0; + } + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + int64_t largest() const { + return GetField(VT_LARGEST, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SORTED) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_LARGEST) && + verifier.EndTable(); + } +}; + +struct TopKFusionBuilder { + typedef TopKFusion Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_sorted(bool sorted) { + fbb_.AddElement(TopKFusion::VT_SORTED, static_cast(sorted), 1); + } + void add_axis(int64_t axis) { + fbb_.AddElement(TopKFusion::VT_AXIS, axis, 0); + } + void add_largest(int64_t largest) { + fbb_.AddElement(TopKFusion::VT_LARGEST, largest, 0); + } + explicit TopKFusionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTopKFusion( + flatbuffers::FlatBufferBuilder &_fbb, + bool sorted = true, + int64_t axis = 0, + int64_t largest = 0) { + TopKFusionBuilder builder_(_fbb); + builder_.add_largest(largest); + builder_.add_axis(axis); + builder_.add_sorted(sorted); + return builder_.Finish(); +} + +struct Transpose FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TransposeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct TransposeBuilder { + typedef Transpose Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TransposeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTranspose( + flatbuffers::FlatBufferBuilder &_fbb) { + TransposeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Unique FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UniqueBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct UniqueBuilder { + typedef Unique Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit UniqueBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnique( + flatbuffers::FlatBufferBuilder &_fbb) { + UniqueBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct UnsortedSegmentSum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnsortedSegmentSumBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct UnsortedSegmentSumBuilder { + typedef UnsortedSegmentSum Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit UnsortedSegmentSumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsortedSegmentSum( + flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentSumBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Unsqueeze FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnsqueezeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct UnsqueezeBuilder { + typedef Unsqueeze Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(Unsqueeze::VT_AXIS, axis); + } + explicit UnsqueezeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsqueeze( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + UnsqueezeBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateUnsqueezeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateUnsqueeze( + _fbb, + axis__); +} + +struct Unstack FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnstackBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct UnstackBuilder { + typedef Unstack Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(Unstack::VT_AXIS, axis, 0); + } + explicit UnstackBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnstack( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0) { + UnstackBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct Where FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef WhereBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct WhereBuilder { + typedef Where Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit WhereBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateWhere( + flatbuffers::FlatBufferBuilder &_fbb) { + WhereBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ZerosLike FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ZerosLikeBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ZerosLikeBuilder { + typedef ZerosLike Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ZerosLikeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateZerosLike( + flatbuffers::FlatBufferBuilder &_fbb) { + ZerosLikeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Select FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SelectBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SelectBuilder { + typedef Select Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset