From 132b40c8b834602df8503055b8bc63705749565c Mon Sep 17 00:00:00 2001 From: chengfeng27 Date: Sat, 11 Nov 2023 17:05:15 +0800 Subject: [PATCH] add train c api Signed-off-by: chengfeng27 --- .../mindspore/kits/mindspore_lib.ndk.json | 76 ++++++ third_party/mindspore/kits/model.h | 239 +++++++++++++++- third_party/mindspore/kits/types.h | 139 +++++++--- third_party/mindspore/mindspore_lib.ndk.json | 254 ------------------ 4 files changed, 413 insertions(+), 295 deletions(-) delete mode 100644 third_party/mindspore/mindspore_lib.ndk.json diff --git a/third_party/mindspore/kits/mindspore_lib.ndk.json b/third_party/mindspore/kits/mindspore_lib.ndk.json index b46cc4d4b..a2fe3fca8 100644 --- a/third_party/mindspore/kits/mindspore_lib.ndk.json +++ b/third_party/mindspore/kits/mindspore_lib.ndk.json @@ -250,5 +250,81 @@ { "first_introduced": "10", "name": "OH_AI_TensorSetUserData" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainCfgCreate" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainCfgDestroy" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainCfgGetLossName" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainCfgSetLossName" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainCfgGetOptimizationLevel" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainCfgSetOptimizationLevel" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainModelBuild" + }, + { + "first_introduced": "11", + "name": "OH_AI_TrainModelBuildFromFile" + }, + { + "first_introduced": "11", + "name": "OH_AI_RunStep" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelSetLearningRate" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelGetLearningRate" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelGetWeights" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelUpdateWeights" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelGetTrainMode" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelSetTrainMode" + }, + { + "first_introduced": "11", + "name": "OH_AI_ModelSetupVirtualBatch" + }, + { + "first_introduced": "11", + "name": "OH_AI_ExportModel" + }, + { + "first_introduced": "11", + "name": "OH_AI_ExportModelBuffer" + }, + { + "first_introduced": "11", + "name": "OH_AI_ExportWeightsCollaborateWithMicro" } ] diff --git a/third_party/mindspore/kits/model.h b/third_party/mindspore/kits/model.h index 8ae46faf4..5281a8608 100644 --- a/third_party/mindspore/kits/model.h +++ b/third_party/mindspore/kits/model.h @@ -18,7 +18,7 @@ * @addtogroup MindSpore * @{ * - * @brief 提供MindSpore Lite的模型推理相关接口。 + * @brief provide the model reasoning related interfaces of MindSpore Lite. * * @Syscap SystemCapability.Ai.MindSpore * @since 9 @@ -27,7 +27,7 @@ /** * @file model.h * - * @brief 提供了模型相关接口,可以用于模型创建、模型推理等。 + * @brief provide model-related interfaces that can be used for model creation, model reasoning, and more. * * @library libmindspore_lite_ndk.so * @since 9 @@ -45,6 +45,8 @@ extern "C" { typedef void *OH_AI_ModelHandle; +typedef void *OH_AI_TrainCfgHandle; + typedef struct OH_AI_TensorHandleArray { size_t handle_num; OH_AI_TensorHandle *handle_list; @@ -66,13 +68,15 @@ typedef bool (*OH_AI_KernelCallBack)(const OH_AI_TensorHandleArray inputs, const /** * @brief Create a model object. + * * @return Model object handle. * @since 9 */ -OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate(); +OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate(void); /** * @brief Destroy the model object. + * * @param model Model object handle address. * @since 9 */ @@ -80,6 +84,7 @@ OH_AI_API void OH_AI_ModelDestroy(OH_AI_ModelHandle *model); /** * @brief Build the model from model file buffer so that it can run on a device. + * * @param model Model object handle. * @param model_data Define the buffer read from a model file. * @param data_size Define bytes number of model file buffer. @@ -93,6 +98,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *mod /** * @brief Load and build the model from model path so that it can run on a device. + * * @param model Model object handle. * @param model_path Define the model file path. * @param model_type Define The type of model file. @@ -105,6 +111,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const c /** * @brief Resizes the shapes of inputs. + * * @param model Model object handle. * @param inputs The array that includes all input tensor handles. * @param shape_infos Defines the new shapes of inputs, should be consistent with inputs. @@ -117,6 +124,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_Te /** * @brief Inference model. + * * @param model Model object handle. * @param inputs The array that includes all input tensor handles. * @param outputs The array that includes all output tensor handles. @@ -131,6 +139,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_T /** * @brief Obtains all input tensor handles of the model. + * * @param model Model object handle. * @return The array that includes all input tensor handles. * @since 9 @@ -139,6 +148,7 @@ OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle m /** * @brief Obtains all output tensor handles of the model. + * * @param model Model object handle. * @return The array that includes all output tensor handles. * @since 9 @@ -147,6 +157,7 @@ OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle /** * @brief Obtains the input tensor handle of the model by name. + * * @param model Model object handle. * @param tensor_name The name of tensor. * @return The input tensor handle with the given name, if the name is not found, an NULL is returned. @@ -156,6 +167,7 @@ OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHa /** * @brief Obtains the output tensor handle of the model by name. + * * @param model Model object handle. * @param tensor_name The name of tensor. * @return The output tensor handle with the given name, if the name is not found, an NULL is returned. @@ -163,6 +175,227 @@ OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHa */ OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name); +/** + * @brief Create a TrainCfg object. Only valid for Lite Train. + * + * @return TrainCfg object handle. + * @since 11 + */ +OH_AI_API OH_AI_TrainCfgHandle OH_AI_TrainCfgCreate(void); + +/** + * @brief Destroy the train_cfg object. Only valid for Lite Train. + * + * @param train_cfg TrainCfg object handle. + * @since 11 + */ +OH_AI_API void OH_AI_TrainCfgDestroy(OH_AI_TrainCfgHandle *train_cfg); + +/** + * @brief Obtains part of the name that identify a loss kernel. Only valid for Lite Train. + * + * @param train_cfg TrainCfg object handle. + * @param num The num of loss_name. + * @return loss_name. + * @since 11 + */ +OH_AI_API char **OH_AI_TrainCfgGetLossName(OH_AI_TrainCfgHandle train_cfg, size_t *num); + +/** + * @brief Set part of the name that identify a loss kernel. Only valid for Lite Train. + * + * @param train_cfg TrainCfg object handle. + * @param loss_name Define part of the name that identify a loss kernel. + * @param num The num of loss_name. + * @since 11 + */ +OH_AI_API void OH_AI_TrainCfgSetLossName(OH_AI_TrainCfgHandle train_cfg, const char **loss_name, size_t num); + +/** + * @brief Obtains optimization level of the train_cfg. Only valid for Lite Train. + * + * @param train_cfg TrainCfg object handle. + * @return OH_AI_OptimizationLevel. + * @since 11 + */ +OH_AI_API OH_AI_OptimizationLevel OH_AI_TrainCfgGetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg); + +/** + * @brief Set optimization level of the train_cfg. Only valid for Lite Train. + * + * @param train_cfg TrainCfg object handle. + * @param level The optimization level of train_cfg. + * @since 11 + */ +OH_AI_API void OH_AI_TrainCfgSetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg, OH_AI_OptimizationLevel level); + +/** + * @brief Build the train model from model buffer so that it can run on a device. Only valid for Lite Train. + * + * @param model Model object handle. + * @param model_data Define the buffer read from a model file. + * @param data_size Define bytes number of model file buffer. + * @param model_type Define The type of model file. + * @param model_context Define the context used to store options during execution. + * @param train_cfg Define the config used by training. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, + OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context, + const OH_AI_TrainCfgHandle train_cfg); + +/** + * @brief Build the train model from model file buffer so that it can run on a device. Only valid for Lite Train. + * + * @param model Model object handle. + * @param model_path Define the model path. + * @param model_type Define The type of model file. + * @param model_context Define the context used to store options during execution. + * @param train_cfg Define the config used by training. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_TrainModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, + OH_AI_ModelType model_type, + const OH_AI_ContextHandle model_context, + const OH_AI_TrainCfgHandle train_cfg); + +/** + * @brief Train model by step. Only valid for Lite Train. + * + * @param model Model object handle. + * @param before CallBack before predict. + * @param after CallBack after predict. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_RunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before, + const OH_AI_KernelCallBack after); + +/** + * @brief Sets the Learning Rate of the training. Only valid for Lite Train. + * + * @param learning_rate to set. + * @return OH_AI_Status of operation. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ModelSetLearningRate(OH_AI_ModelHandle model, float learning_rate); + +/** + * @brief Obtains the Learning Rate of the optimizer. Only valid for Lite Train. + * + * @param model Model object handle. + * @return Learning rate. 0.0 if no optimizer was found. + * @since 11 + */ +OH_AI_API float OH_AI_ModelGetLearningRate(OH_AI_ModelHandle model); + +/** + * @brief Obtains all weights tensors of the model. Only valid for Lite Train. + * + * @param model Model object handle. + * @return The vector that includes all gradient tensors. + * @since 11 + */ +OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetWeights(OH_AI_ModelHandle model); + +/** + * @brief update weights tensors of the model. Only valid for Lite Train. + * + * @param new_weights A vector new weights. + * @return OH_AI_Status + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ModelUpdateWeights(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray new_weights); + +/** + * @brief Get the model running mode. + * + * @param model Model object handle. + * @return Is Train Mode or not. + * @since 11 + */ +OH_AI_API bool OH_AI_ModelGetTrainMode(OH_AI_ModelHandle model); + +/** + * @brief Set the model running mode. Only valid for Lite Train. + * + * @param model Model object handle. + * @param train True means model runs in Train Mode, otherwise Eval Mode. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ModelSetTrainMode(OH_AI_ModelHandle model, bool train); + +/** + * @brief Setup training with virtual batches. Only valid for Lite Train. + * + * @param model Model object handle. + * @param virtual_batch_multiplier Virtual batch multiplier, use any number < 1 to disable. + * @param lr Learning rate to use for virtual batch, -1 for internal configuration. + * @param momentum Batch norm momentum to use for virtual batch, -1 for internal configuration. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ModelSetupVirtualBatch(OH_AI_ModelHandle model, int virtual_batch_multiplier, float lr, + float momentum); + +/** + * @brief Export training model from file. Only valid for Lite Train. + * + * @param model The model data. + * @param model_type The model file type. + * @param model_file The exported model file. + * @param quantization_type The quantification type. + * @param export_inference_only Whether to export a reasoning only model. + * @param output_tensor_name The set the name of the output tensor of the exported reasoning model, default as + * empty, and export the complete reasoning model. + * @param num The number of output_tensor_name. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ExportModel(OH_AI_ModelHandle model, OH_AI_ModelType model_type, const char *model_file, + OH_AI_QuantizationType quantization_type, bool export_inference_only, + char **output_tensor_name, size_t num); + +/** + * @brief Export training model from buffer. Only valid for Lite Train. + * + * @param model The model data. + * @param model_type The model file type. + * @param model_data The exported model buffer. + * @param data_size The exported model buffer size. + * @param quantization_type The quantification type. + * @param export_inference_only Whether to export a reasoning only model. + * @param output_tensor_name The set the name of the output tensor of the exported reasoning model, default as + * empty, and export the complete reasoning model. + * @param num The number of output_tensor_name. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ExportModelBuffer(OH_AI_ModelHandle model, OH_AI_ModelType model_type, void *model_data, + size_t *data_size, OH_AI_QuantizationType quantization_type, + bool export_inference_only, char **output_tensor_name, size_t num); + +/** + * @brief Export model's weights, which can be used in micro only. Only valid for Lite Train. + * + * @param model The model data. + * @param model_type The model file type. + * @param weight_file The path of exported weight file. + * @param is_inference Whether to export weights from a reasoning model. Currently, only support this is `true`. + * @param enable_fp16 Float-weight is whether to be saved in float16 format. + * @param changeable_weights_name The set the name of these weight tensors, whose shape is changeable. + * @param num The number of changeable_weights_name. + * @return OH_AI_Status. + * @since 11 + */ +OH_AI_API OH_AI_Status OH_AI_ExportWeightsCollaborateWithMicro(OH_AI_ModelHandle model, OH_AI_ModelType model_type, + const char *weight_file, bool is_inference, + bool enable_fp16, char **changeable_weights_name, + size_t num); + #ifdef __cplusplus } #endif diff --git a/third_party/mindspore/kits/types.h b/third_party/mindspore/kits/types.h index 715ae02ce..92ff8d150 100644 --- a/third_party/mindspore/kits/types.h +++ b/third_party/mindspore/kits/types.h @@ -18,7 +18,7 @@ * @addtogroup MindSpore * @{ * - * @brief 提供MindSpore Lite的模型推理相关接口。 + * @brief provide the model reasoning related interfaces of MindSpore Lite. * * @Syscap SystemCapability.Ai.MindSpore * @since 9 @@ -27,7 +27,7 @@ /** * @file types.h * - * @brief 提供了MindSpore Lite支持的模型文件类型和设备类型。 + * @brief provides the model file types and device types supported by MindSpore Lite. * * @library libmindspore_lite_ndk.so * @since 9 @@ -47,57 +47,120 @@ extern "C" { #endif #endif +/** + * @brief model file type. + * + * @since 9 + */ typedef enum OH_AI_ModelType { - OH_AI_MODELTYPE_MINDIR = 0, - // insert new data type here - OH_AI_MODELTYPE_INVALID = 0xFFFFFFFF + /** the model type is MindIR, and the corresponding model file extension is .ms. */ + OH_AI_MODELTYPE_MINDIR = 0, + /** invaild model type */ + OH_AI_MODELTYPE_INVALID = 0xFFFFFFFF } OH_AI_ModelType; +/** + * @brief device type information. + * + * @since 9 + */ typedef enum OH_AI_DeviceType { - OH_AI_DEVICETYPE_CPU = 0, - OH_AI_DEVICETYPE_GPU, - OH_AI_DEVICETYPE_KIRIN_NPU, - // add new type here - // ohos-only device range: [60, 80) - OH_AI_DEVICETYPE_NNRT = 60, - OH_AI_DEVICETYPE_INVALID = 100, + /** cpu */ + OH_AI_DEVICETYPE_CPU = 0, + /** gpu */ + OH_AI_DEVICETYPE_GPU, + /** kirin npu */ + OH_AI_DEVICETYPE_KIRIN_NPU, + /** nnrt device, ohos-only device range: [60, 80) */ + OH_AI_DEVICETYPE_NNRT = 60, + /** invalid device type */ + OH_AI_DEVICETYPE_INVALID = 100, } OH_AI_DeviceType; +/** + * @brief the hard deivce type managed by NNRT. + * + * @since 10 + */ typedef enum OH_AI_NNRTDeviceType { - /** Devices that are not CPU, GPU, or dedicated accelerator */ - OH_AI_NNRTDEVICE_OTHERS = 0, - /** CPU device */ - OH_AI_NNRTDEVICE_CPU = 1, - /** GPU device */ - OH_AI_NNRTDEVICE_GPU = 2, - /** Dedicated hardware accelerator */ - OH_AI_NNRTDEVICE_ACCELERATOR = 3, + /** Devices that are not CPU, GPU, or dedicated accelerator */ + OH_AI_NNRTDEVICE_OTHERS = 0, + /** CPU device */ + OH_AI_NNRTDEVICE_CPU = 1, + /** GPU device */ + OH_AI_NNRTDEVICE_GPU = 2, + /** Dedicated hardware accelerator */ + OH_AI_NNRTDEVICE_ACCELERATOR = 3, } OH_AI_NNRTDeviceType; +/** + * @brief performance mode of the NNRT hard deivce. + * + * @since 10 + */ typedef enum OH_AI_PerformanceMode { - /** No performance mode preference */ - OH_AI_PERFORMANCE_NONE = 0, - /** Low power consumption mode*/ - OH_AI_PERFORMANCE_LOW = 1, - /** Medium performance mode */ - OH_AI_PERFORMANCE_MEDIUM = 2, - /** High performance mode */ - OH_AI_PERFORMANCE_HIGH = 3, - /** Ultimate performance mode */ - OH_AI_PERFORMANCE_EXTREME = 4 + /** No performance mode preference */ + OH_AI_PERFORMANCE_NONE = 0, + /** Low power consumption mode*/ + OH_AI_PERFORMANCE_LOW = 1, + /** Medium performance mode */ + OH_AI_PERFORMANCE_MEDIUM = 2, + /** High performance mode */ + OH_AI_PERFORMANCE_HIGH = 3, + /** Ultimate performance mode */ + OH_AI_PERFORMANCE_EXTREME = 4 } OH_AI_PerformanceMode; +/** + * @brief NNRT reasoning task priority. + * + * @since 10 + */ typedef enum OH_AI_Priority { - /** No priority preference */ - OH_AI_PRIORITY_NONE = 0, - /** Low priority */ - OH_AI_PRIORITY_LOW = 1, - /** Medium priority */ - OH_AI_PRIORITY_MEDIUM = 2, - /** High priority */ - OH_AI_PRIORITY_HIGH = 3 + /** No priority preference */ + OH_AI_PRIORITY_NONE = 0, + /** Low priority */ + OH_AI_PRIORITY_LOW = 1, + /** Medium priority */ + OH_AI_PRIORITY_MEDIUM = 2, + /** High priority */ + OH_AI_PRIORITY_HIGH = 3 } OH_AI_Priority; +/** + * @brief optimization level for train model. + * + * @since 11 + */ +typedef enum OH_AI_OptimizationLevel { + /** Do not change */ + OH_AI_KO0 = 0, + /** Cast network to float16, keep batchnorm and loss in float32 */ + OH_AI_KO2 = 2, + /** Cast network to float16, including bacthnorm */ + OH_AI_KO3 = 3, + /** Choose optimization based on device */ + OH_AI_KAUTO = 4, + /** Invalid optimizatin level */ + OH_AI_KOPTIMIZATIONTYPE = 0xFFFFFFFF +} OH_AI_OptimizationLevel; + +/** + * @brief quantization type + * + * @since 11 + */ +typedef enum OH_AI_QuantizationType { + /** Do not change */ + OH_AI_NO_QUANT = 0, + /** weight quantization */ + OH_AI_WEIGHT_QUANT = 1, + /** full quantization */ + OH_AI_FULL_QUANT = 2, + /** invalid quantization type */ + OH_AI_UNKNOWN_QUANT_TYPE = 0xFFFFFFFF +} OH_AI_QuantizationType; + typedef struct NNRTDeviceDesc NNRTDeviceDesc; #ifdef __cplusplus } diff --git a/third_party/mindspore/mindspore_lib.ndk.json b/third_party/mindspore/mindspore_lib.ndk.json deleted file mode 100644 index c4c8193e9..000000000 --- a/third_party/mindspore/mindspore_lib.ndk.json +++ /dev/null @@ -1,254 +0,0 @@ -[ - { - "first_introduced": "9", - "name": "OH_AI_ContextCreate" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextDestroy" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextSetThreadNum" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextGetThreadNum" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextSetThreadAffinityMode" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextGetThreadAffinityMode" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextSetThreadAffinityCoreList" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextGetThreadAffinityCoreList" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextSetEnableParallel" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextGetEnableParallel" - }, - { - "first_introduced": "9", - "name": "OH_AI_ContextAddDeviceInfo" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoCreate" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoDestroy" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoSetProvider" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoGetProvider" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoSetProviderDevice" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoGetProviderDevice" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoGetDeviceType" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoSetEnableFP16" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoGetEnableFP16" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoSetFrequency" - }, - { - "first_introduced": "9", - "name": "OH_AI_DeviceInfoGetFrequency" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelCreate" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelDestroy" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelBuild" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelBuildFromFile" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelResize" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelPredict" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelGetInputs" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelGetOutputs" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelGetInputByTensorName" - }, - { - "first_introduced": "9", - "name": "OH_AI_ModelGetOutputByTensorName" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorCreate" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorDestroy" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorClone" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorSetName" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetName" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorSetDataType" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetDataType" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorSetShape" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetShape" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorSetFormat" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetFormat" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorSetData" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetData" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetMutableData" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetElementNum" - }, - { - "first_introduced": "9", - "name": "OH_AI_TensorGetDataSize" - }, - { - "first_introduced": "10", - "name": "OH_AI_GetAllNNRTDeviceDescs" - }, - { - "first_introduced": "10", - "name": "OH_AI_DestroyAllNNRTDeviceDescs" - }, - { - "first_introduced": "10", - "name": "OH_AI_GetDeviceIdFromNNRTDeviceDesc" - }, - { - "first_introduced": "10", - "name": "OH_AI_GetNameFromNNRTDeviceDesc" - }, - { - "first_introduced": "10", - "name": "OH_AI_GetTypeFromNNRTDeviceDesc" - }, - { - "first_introduced": "10", - "name": "OH_AI_CreateNNRTDeviceInfoByName" - }, - { - "first_introduced": "10", - "name": "OH_AI_CreateNNRTDeviceInfoByType" - }, - { - "first_introduced": "10", - "name": "OH_AI_DeviceInfoSetDeviceId" - }, - { - "first_introduced": "10", - "name": "OH_AI_DeviceInfoGetDeviceId" - }, - { - "first_introduced": "10", - "name": "OH_AI_DeviceInfoSetPerformanceMode" - }, - { - "first_introduced": "10", - "name": "OH_AI_DeviceInfoGetPerformanceMode" - }, - { - "first_introduced": "10", - "name": "OH_AI_DeviceInfoSetPriority" - }, - { - "first_introduced": "10", - "name": "OH_AI_GetElementOfNNRTDeviceDescs" - }, - { - "first_introduced": "10", - "name": "OH_AI_DeviceInfoAddExtension" - }, - { - "first_introduced": "10", - "name": "OH_AI_TensorSetUserData" - } -] -- Gitee