diff --git a/interfaces/kits/c/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime.h
index 2ac9a5e14efba43e6a00c232f568ab84c3f12c18..b5cc5e2fbe56aecd45b3f22abec414b80cd5bdd9 100644
--- a/interfaces/kits/c/neural_network_runtime.h
+++ b/interfaces/kits/c/neural_network_runtime.h
@@ -17,7 +17,7 @@
* @addtogroup NeuralNeworkRuntime
* @{
*
- * @brief 提供Neural Network Runtime加速模型推理的相关接口。
+ * @brief Provides APIs of Neural Network Runtime for accelerating the model inference.
*
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
@@ -27,9 +27,9 @@
/**
* @file neural_network_runtime.h
*
- * @brief Neural Network Runtime部件接口定义,通过调用以下接口,在硬件加速器上执行深度学习模型推理计算。
- *
- * 注意:Neural Network Runtime的接口目前均不支持多线程调用。\n
+ * @brief Defines the Neural Network Runtime APIs. The AI inference framework uses the Native APIs provided by Neural Network Runtime
+ * to construct and compile models and perform inference and computing on acceleration hardware.
+ * Note: Currently, the APIs of Neural Network Runtime do not support multi-thread calling. \n
*
* @since 9
* @version 1.0
@@ -45,78 +45,84 @@ extern "C" {
#endif
/**
- * @brief 创建{@link OH_NNModel}类型的模型实例,搭配OH_NNModel模块提供的其他接口,完成模型实例的构造。
+ * @brief Creates a model instance of the {@link OH_NNModel} type and uses other APIs provided by OH_NNModel to construct the model instance.
*
- * 在开始构图前,先调用{@link OH_NNModel_Construct}创建模型实例,根据模型的拓扑结构,调用
- * {@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}和
- * {@link OH_NNModel_SetTensorData}方法,填充模型的数据节点和算子节点;然后调用
- * {@link OH_NNModel_SpecifyInputsAndOutputs}指定模型的输入和输出;当构造完模型的拓扑结构,调用
- * {@link OH_NNModel_Finish}完成模型的构建。\n
+ * Before composition, call {@link OH_NNModel_Construct} to create a model instance. Based on the model topology,
+ * call the {@link OH_NNModel_AddTensor}, {@link OH_NNModel_AddOperation}, and {@link OH_NNModel_SetTensorData} methods
+ * to fill in the data and operator nodes of the model, and then call {@link OH_NNModel_SpecifyInputsAndOutputs} to specify the inputs and outputs of the model.
+ * After the model topology is constructed, call {@link OH_NNModel_Finish} to build the model. \n
*
- * 模型实例使用完毕后,需要调用{@link OH_NNModel_Destroy}销毁模型实例,避免内存泄漏。\n
+ * After a model instance is used, you need to destroy it by calling {@link OH_NNModel_Destroy} to avoid memory leak. \n
*
- * @return 返回一个指向{@link OH_NNModel}实例的指针。
+ * @return Returns the pointer to a {@link OH_NNModel} instance.
* @since 9
* @version 1.0
*/
OH_NNModel *OH_NNModel_Construct(void);
/**
- * @brief 向模型实例中添加张量
+ * @brief Adds a tensor to a model instance.
*
- * Neural Network Runtime模型中的数据节点和算子参数均由模型的张量构成。本方法根据tensor,向model实
- * 例中添加张量。张量添加的顺序是模型中记录张量的索引值,{@link OH_NNModel_SetTensorData}、
- * {@link OH_NNModel_AddOperation}和{@link OH_NNModel_SpecifyInputsAndOutputs}
- * 方法根据该索引值,指定不同的张量。\n
+ * The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model.
+ * This method is used to add tensors to a model instance based on the tensor parameter.
+ * The sequence of adding tensors is specified by the index value recorded in the model. The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation},
+ * and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specifies tensors based on the index value. \n
*
- * Neural Network Runtime支持动态形状输入和输出。在添加动态形状的数据节点时,需要将tensor.dimensions中支持动态
- * 变化的维度设置为-1。例如:一个4维tensor,将tensor.dimensions设置为[1, -1, 2, 2],表示其第二个维度支持
- * 动态变化。\n
+ * Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic shape,
+ * you need to set the dimensions that support dynamic changes in tensor.dimensions to -1.
+ * For example, if tensor.dimensions of a four-dimensional tensor is set to [1, -1, 2, 2], the second dimension supports dynamic changes. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @param tensor {@link OH_NN_Tensor}张量的指针,tensor指定了添加到模型实例中张量的属性。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @param tensor Pointer to the {@link OH_NN_Tensor} tensor. The tensor specifies the attributes of the tensor added to the model instance.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor);
/**
- * @brief 设置张量的数值
+ * @brief Sets the tensor value.
*
- * 对于具有常量值的张量(如模型的权重),需要在构图阶段使用本方法设置数值。张量的索引值根据张量添加进模型的顺序决定,张量的添加参考
- * {@link OH_NNModel_AddTensor}。\n
+ * For tensors with constant values (such as model weights), you need to use this method in the composition phase.
+ * The index value of a tensor is determined by the sequence in which the tensor is added to the model.
+ * For details about how to add a tensor, see {@link OH_NNModel_AddTensor}. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @param index 张量的索引值。
- * @param dataBuffer 指向真实数据的指针。
- * @param length 数据缓冲区的长度。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @param index Index value of a tensor.
+ * @param dataBuffer Pointer to real data.
+ * @param length Length of the data buffer.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, uint32_t index, const void *dataBuffer, size_t length);
/**
- * @brief 向模型实例中添加算子
+ * @brief Adds an operator to a model instance.
*
- * 本方法向模型实例中添加算子,算子类型由op指定,算子的参数、输入和输出由paramIndices、inputIndices和
- * outputIndices指定。本方法将对算子参数的属性和输入输出的数量进行校验,这些属性需要在调用
- * {@link OH_NNModel_AddTensor}添加张量的时候正确设置。每个算子期望的参数、输入和输出属性请参考
- * {@link OH_NN_OperationType}。\n
+ * This method is used to add an operator to a model instance. The operator type is specified by op, and
+ * the operator parameters, inputs, and outputs are specified by paramIndices, inputIndices, and outputIndices respectively.
+ * This method verifies the attributes of operator parameters and the number of input and output parameters.
+ * These attributes must be correctly set when {@link OH_NNModel_AddTensor} is called to add tensors.
+ * For details about the expected parameters, input attributes, and output attributes of each operator, see {@link OH_NN_OperationType}. \n
*
- * paramIndices、inputIndices和outputIndices中存储的是张量的索引值,每个索引值根据张量添加进模型的顺序决定,正确
- * 设置并添加算子要求准确设置每个张量的索引值。张量的添加参考{@link OH_NNModel_AddTensor}。\n
+ * paramIndices, inputIndices, and outputIndices store index values of tensors.
+ * Index values are determined by the sequence in which tensors are added to the model.
+ * For details about how to add a tensor, see {@link OH_NNModel_AddTensor}. \n
*
- * 如果添加算子时,添加了额外的参数(非算子需要的参数),本方法返回{@link OH_NN_INVALID_PARAMETER};如果没有设置算子参数,
- * 则算子按默认值设置缺省的参数,默认值请参考{@link OH_NN_OperationType}。\n
+ * If unnecessary parameters are added for adding an operator, this method returns {@link OH_NN_INVALID_PARAMETER}.
+ * If no operator parameter is set, the operator uses the default parameter value.
+ * For details about the default values, see {@link OH_NN_OperationType}. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @param op 指定添加的算子类型,取值请参考{@link OH_NN_OperationType}的枚举值。
- * @param paramIndices OH_NN_UInt32Array实例的指针,设置算子的参数。
- * @param inputIndices OH_NN_UInt32Array实例的指针,指定算子的输入。
- * @param outputIndices OH_NN_UInt32Array实例的指针,设置算子的输出。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @param op Specifies the type of an operator to be added. For details, see the enumerated values of {@link OH_NN_OperationType}.
+ * @param paramIndices Pointer to the OH_NN_UInt32Array instance, which is used to set operator parameters.
+ * @param inputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator input.
+ * @param outputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator output.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -127,20 +133,21 @@ OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model,
const OH_NN_UInt32Array *outputIndices);
/**
- * @brief 指定模型的输入输出
+ * @brief Specifies the inputs and outputs of a model.
*
- * 模型实例需要指定张量作为端到端的输入和输出,设置为输入和输出的张量不能使用{@link OH_NNModel_SetTensorData}设置
- * 数值,需要在执行阶段调用OH_NNExecutor的方法设置输入、输出数据。\n
+ * A tensor must be specified as the end-to-end inputs and outputs of a model instance. This type of tensor cannot be set
+ * using {@link OH_NNModel_SetTensorData}. The OH_NNExecutor method needs to be called in the execution phase to set the input and output data. \n
*
- * 张量的索引值根据张量添加进模型的顺序决定,张量的添加参考
- * {@link OH_NNModel_AddTensor}。\n
+ * The index value of a tensor is determined by the sequence in which the tensor is added to the model.
+ * For details about how to add a tensor, see {@link OH_NNModel_AddTensor}. \n
*
- * 暂时不支持异步设置模型输入输出。\n
+ * Currently, the model inputs and outputs cannot be set asynchronously. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @param inputIndices OH_NN_UInt32Array实例的指针,指定算子的输入。
- * @param outputIndices OH_NN_UInt32Array实例的指针,指定算子的输出。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @param inputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator input.
+ * @param outputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator output.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -149,52 +156,56 @@ OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model,
const OH_NN_UInt32Array *outputIndices);
/**
- * @brief 完成模型构图
+ * @brief Completes model composition.
*
- * 完成模型拓扑结构的搭建后,调用本方法指示构图已完成。在调用本方法后,无法进行额外的构图操作,调用
- * {@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}、
- * {@link OH_NNModel_SetTensorData}和
- * {@link OH_NNModel_SpecifyInputsAndOutputs}将返回
- * {@link OH_NN_OPERATION_FORBIDDEN}。\n
+ * After the model topology is set up, call this method to indicate that the composition is complete. After this method is called,
+ * additional composition operations cannot be performed. If {@link OH_NNModel_AddTensor}, {@link OH_NNModel_AddOperation},
+ * {@link OH_NNModel_SetTensorData}, and {@link OH_NNModel_SpecifyInputsAndOutputs} are called,
+ * {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n
*
- * 在调用{@link OH_NNModel_GetAvailableOperations}和{@link OH_NNCompilation_Construct}
- * 之前,必须先调用本方法完成构图。\n
+ * Before calling {@link OH_NNModel_GetAvailableOperations} and {@link OH_NNCompilation_Construct},
+ * you must call this method to complete composition. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model);
/**
- * @brief 释放模型实例。
+ * @brief Releases a model instance.
*
- * 调用{@link OH_NNModel_Construct}创建的模型实例需要调用本方法主动释放,否则将造成内存泄漏。\n
+ * This method needs to be called to release the model instance created by calling {@link OH_NNModel_Construct}. Otherwise, memory leak will occur. \n
*
- * 如果model为空指针或者*model为空指针,本方法只打印warning日志,不执行释放逻辑。\n
+ * If model or *model is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
- * @param model 指向{@link OH_NNModel}实例的二级指针。模型实例销毁后,本方法将*model主动设置为空指针。
+ * @param model Level-2 pointer to the {@link OH_NNModel} instance. After a model instance is destroyed, this method sets *model to a null pointer.
* @since 9
* @version 1.0
*/
void OH_NNModel_Destroy(OH_NNModel **model);
/**
- * @brief 查询硬件对模型内所有算子的支持情况,通过布尔值序列指示支持情况。
+ * @brief Queries whether the device supports operators in the model. The support status is indicated by the Boolean value.
*
- * 查询底层硬件对模型实例内每个算子的支持情况,硬件由deviceID指定,结果将通过isSupported指向的数组表示。如果支持第i个算子,则
- * (*isSupported)[i] == true,否则为 false。\n
+ * Queries whether underlying device supports operators in a model instance. The device is specified by deviceID,
+ * and the result is represented by the array pointed by isSupported. If the ith operator is supported,
+ * the value of (*isSupported)[i] is true. Otherwise, the value is false. \n
*
- * 本方法成功执行后,(*isSupported)将指向记录算子支持情况的bool数组,数组长度和模型实例的算子数量相等。该数组对应的内存由
- * Neural Network Runtime管理,在模型实例销毁或再次调用本方法后自动销毁。\n
+ * After this method is successfully executed, (*isSupported) points to the bool array that records the operator support status.
+ * The operator quantity for the array length is the same as that for the model instance. The memory corresponding to this array is
+ * managed by Neural Network Runtime and is automatically destroyed after the model instance is destroyed or this method is called again. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @param deviceID 指定查询的硬件ID,通过{@link OH_NNDevice_GetAllDevicesID}获取。
- * @param isSupported 指向bool数组的指针。调用本方法时,要求(*isSupported)为空指针,否则返回
- * {@link OH_NN_INVALID_PARAMETER}。
- * @param opCount 模型实例中算子的数量,对应(*isSupported)数组的长度。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @param deviceID Device ID to be queried, which can be obtained by using {@link OH_NNDevice_GetAllDevicesID}.
+ * @param isSupported Pointer to the bool array. When this method is called, (*isSupported) must be a null pointer.
+ * Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
+ *
+ * @param opCount Number of operators in a model instance, corresponding to the length of the (*isSupported) array.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -205,88 +216,94 @@ OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model,
/**
- * @brief 创建{@link OH_NNCompilation}类型的编译实例
+ * @brief Creates a compilation instance of the {@link OH_NNCompilation} type.
*
- * 使用OH_NNModel模块完成模型的构造后,借助OH_NNCompilation模块提供的接口,将模型传递到底层硬件完成编译。本方法接受一个
- * {@link OH_NNModel}实例,创建出{@link OH_NNCompilation}实例;通过
- * {@link OH_NNCompilation_SetDevice}方法,设置编译的设备,最后调用
- * {@link OH_NNCompilation_Build}完成编译。\n
+ * After the OH_NNModel module completes model construction, APIs provided by the OH_NNCompilation module pass the model
+ * to underlying device for compilation. This method creates a {@link OH_NNCompilation} instance
+ * based on the passed {@link OH_NNModel} instance. The {@link OH_NNCompilation_SetDevice} method is called
+ * to set the device to compile on, and {@link OH_NNCompilation_Build} is then called to complete compilation.\n
*
- * 除了计算硬件的选择,OH_NNCompilation模块支持模型缓存、性能偏好、优先级设置、float16计算等特性,参考以下方法:
+ * In addition to computing device selection, the OH_NNCompilation module supports features such as model caching, performance preference,
+ * priority setting, and float16 computing, which can be implemented by the following methods:
* - {@link OH_NNCompilation_SetCache}
* - {@link OH_NNCompilation_SetPerformanceMode}
* - {@link OH_NNCompilation_SetPriority}
- * - {@link OH_NNCompilation_EnableFloat16}\n
+ * - {@link OH_NNCompilation_EnableFloat16} \n
*
- * 调用本方法创建{@link OH_NNCompilation}后,{@link OH_NNModel}实例可以释放。\n
+ * After {@link OH_NNCompilation} is created by calling this method, the {@link OH_NNModel} instance can be released. \n
*
- * @param model 指向{@link OH_NNModel}实例的指针。
- * @return 返回一个指向{@link OH_NNCompilation}实例的指针。
+ * @param model Pointer to the {@link OH_NNModel} instance.
+ * @return Returns the pointer to a {@link OH_NNCompilation} instance.
* @since 9
* @version 1.0
*/
OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model);
/**
- * @brief 指定模型编译和计算的硬件。
+ * @brief Specifies the device for model compilation and computing.
*
- * 编译阶段,需要指定模型编译和执行计算的硬件设备。先调用{@link OH_NNDevice_GetAllDevicesID}获取可用的设备ID,
- * 通过{@link OH_NNDevice_GetType}和{@link OH_NNDevice_GetType}获取设备信息后,将期望编译执行的
- * 设备ID传入本方法进行设置。\n
+ * In the compilation phase, you need to specify the device for model compilation and computing. Call {@link OH_NNDevice_GetAllDevicesID}
+ * to obtain available device IDs. Call {@link OH_NNDevice_GetType} and {@link OH_NNDevice_GetName} to obtain device information
+ * and pass target device IDs to this method for setting. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @param deviceID 指定的硬件ID。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @param deviceID Device ID.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID);
/**
- * @brief 设置编译后的模型缓存路径和缓存版本。
+ * @brief Set the cache directory and version of the compiled model.
*
- * 在支持缓存的硬件上,模型在硬件驱动层编译后可以保存为缓存文件,下次编译时直接从缓存文件读取模型,减少重新编译的耗时。本方法接受缓存路径和版本,根据缓存
- * 路径中和版本的不同情况,本方法采取不同的行为:\n
+ * On the device that supports caching, a model can be saved as a cache file after being compiled at the device driver layer.
+ * The model can be directly read from the cache file in the next compilation, saving recompilation time.
+ * This method performs different operations based on the passed cache directory and version:\n
*
- * - 缓存路径指定的目录下没有文件:
- * 将编译后的模型缓存到目录下,设置缓存版本等于version。\n
+ * - No file exists in the cache directory:
+ * Caches the compiled model to the directory and sets the cache version to version. \n
*
- * - 缓存路径指定的目录下存在完整的缓存文件,且版本号 == version:
- * 读取路径下的缓存文件,传递到底层硬件中转换为可以执行的模型实例。\n
+ * - A complete cache file exists in the cache directory, and its version is version:
+ * Reads the cache file in the path and passes the data to the underlying device for conversion into executable model instances. \n
*
- * - 缓存路径指定的目录下存在完整的缓存文件,但版本号 < version:
- * 路径下的缓存文件需要更新,模型在底层硬件完成编译后,覆写路径下的缓存文件,将版本号更新为version。\n
+ * - A complete cache file exists in the cache directory, and its version is earlier than version:
+ * When model compilation is complete on the underlying device, overwrites the cache file and changes the version number to version. \n
*
- * - 缓存路径指定的目录下存在完整的缓存文件,但版本号 > version:
- * 路径下的缓存文件版本高于version,不读取缓存文件,同时返回{@link OH_NN_INVALID_PARAMETER}错误码。\n
+ * - A complete cache file exists in the cache directory, and its version is later than version:
+ * Returns the {@link OH_NN_INVALID_PARAMETER} error code without reading the cache file. \n
*
- * - 缓存路径指定的目录下的缓存文件不完整或没有缓存文件的访问权限:
- * 返回{@link OH_NN_INVALID_FILE}错误码。\n
+ * - The cache file in the cache directory is incomplete or you do not have the permission to access the cache file.
+ * Returns the {@link OH_NN_INVALID_FILE} error code. \n
*
- * - 缓存目录不存在,或者没有访问权限:
- * 返回{@link OH_NN_INVALID_PATH}错误码。\n
+ * - The cache directory does not exist or you do not have the access permission.
+ * Returns the {@link OH_NN_INVALID_PATH} error code. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @param cachePath 模型缓存文件目录,本方法在cachePath目录下为不同的硬件创建缓存目录。建议每个模型使用单独的缓存目录。
- * @param version 缓存版本。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @param cachePath Directory for storing model cache files. This method creates directories for different devices in the cachePath directory.
+ * You are advised to use a separate cache directory for each model.
+ * @param version Cache version.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, const char *cachePath, uint32_t version);
/**
- * @brief 设置模型计算的性能模式。
+ * @brief Sets the performance mode for model computing.
*
- * Neural Network Runtime 支持为模型计算设置性能模式,满足低功耗到极致性能的需求。如果编译阶段没有调用本方法设置性能模式,
- * 编译实例为模型默认分配{@link OH_NN_PERFORMANCE_NONE}模式。在{@link OH_NN_PERFORMANCE_NONE}
- * 模式下,硬件按默认的性能模式执行计算。\n
+ * Neural Network Runtime allows you to set the performance mode for model computing to meet the requirements of low power consumption
+ * and ultimate performance. If this method is not called to set the performance mode in the compilation phase, the compilation instance assigns
+ * the {@link OH_NN_PERFORMANCE_NONE} mode for the model by default. In this case, the device performs computing in the default performance mode. \n
*
- * 在不支持性能模式设置的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n
+ * If this method is called on the device that does not support the setting of the performance mode, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @param performanceMode 指定性能模式,可选的性能模式参考{@link OH_NN_PerformanceMode}。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @param performanceMode Performance mode. For details about the available performance modes, see {@link OH_NN_PerformanceMode}.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -294,61 +311,66 @@ OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilati
OH_NN_PerformanceMode performanceMode);
/**
- * @brief 设置模型计算的优先级。
+ * @brief Sets the model computing priority.
*
- * Neural Network Runtime 支持为模型设置计算优先级,优先级仅作用于相同uid进程创建的模型,不同uid进程、不同设备的优先级不会
- * 相互影响。\n
+ * Neural Network Runtime allows you to set computing priorities for models.
+ * The priorities apply only to models created by the process with the same UID.
+ * The settings will not affect models created by processes with different UIDs on different devices. \n
*
- * 在不支持优先级设置的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n
+ * If this method is called on the device that does not support the priority setting, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @param priority 指定优先级,可选的优先级参考{@link OH_NN_Priority}。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @param priority Priority. For details about the optional priorities, see {@link OH_NN_Priority}.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, OH_NN_Priority priority);
/**
- * @brief 是否以float16的浮点数精度计算。
+ * @brief Enables float16 for computing.
*
- * Neural Network Runtime目前仅支持构造float32浮点模型和int8量化模型。在支持float16精度的硬件上调用本方法,
- * float32浮点数精度的模型将以float16的精度执行计算,以减少内存占用和执行时间。\n
+ * Currently, Neural Network Runtime supports only float32 and int8. If this method is called on a device that supports float16,
+ * float16 will be used for computing the float32 model to reduce memory usage and execution time. \n
*
- * 在不支持float16精度计算的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n
+ * If this method is called on the device that does not support float16, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @param enableFloat16 Float16低精度计算标志位。设置为true时,执行Float16推理;设置为false时,执行float32推理。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @param enableFloat16 Indicates whether to enable float16. If this parameter is set to true, float16 inference is performed.
+ * If this parameter is set to false, float32 inference is performed.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, an error code is returned.
+ * For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16);
/**
- * @brief 进行模型编译
+ * @brief Compiles a model.
*
- * 完成编译配置后,调用本方法指示模型编译已完成。编译实例将模型和编译选项推送至硬件设备进行编译。在调用本方法后,无法进行额外的编译操作,调用
- * {@link OH_NNCompilation_SetDevice}、{@link OH_NNCompilation_SetCache}、
- * {@link OH_NNCompilation_SetPerformanceMode}、
- * {@link OH_NNCompilation_SetPriority}和{@link OH_NNCompilation_EnableFloat16}
- * 方法将返回{@link OH_NN_OPERATION_FORBIDDEN}。\n
+ * After the compilation configuration is complete, call this method to return the compilation result. The compilation instance pushes the model and
+ * compilation options to the device for compilation. After this method is called, additional compilation operations cannot be performed.
+ * If the {@link OH_NNCompilation_SetDevice}, {@link OH_NNCompilation_SetCache}, {@link OH_NNCompilation_SetPerformanceMode},
+ * {@link OH_NNCompilation_SetPriority}, and {@link OH_NNCompilation_EnableFloat16} methods are called, {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation);
/**
- * @brief 释放Compilation对象。
+ * @brief Releases the Compilation object.
*
- * 调用{@link OH_NNCompilation_Construct}创建的编译实例需要调用本方法主动释放,否则将造成内存泄漏。\n
+ * This method needs to be called to release the compilation instance created by calling {@link OH_NNCompilation_Construct}. Otherwise, memory leak will occur. \n
*
- * 如果compilation为空指针或者*compilation为空指针,本方法只打印warning日志,不执行释放逻辑。\n
+ * If compilation or *compilation is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的二级指针。编译实例销毁后,本方法将*compilation主动设置为空指针。
+ * @param compilation Level-2 pointer to the {@link OH_NNCompilation} instance. After a compilation instance is destroyed,
+ * this method sets *compilation to a null pointer.
* @since 9
* @version 1.0
*/
@@ -356,45 +378,50 @@ void OH_NNCompilation_Destroy(OH_NNCompilation **compilation);
/**
- * @brief 创建{@link OH_NNExecutor}类型的执行器实例
+ * @brief Creates an executor instance of the {@link OH_NNExecutor} type.
*
- * 本方法接受一个编译器,构造一个与硬件关联的模型推理执行器。通过{@link OH_NNExecutor_SetInput}设置模型输入数据,
- * 设置输入数据后,调用{@link OH_NNExecutor_Run}方法执行推理,最后通过
- * {@link OH_NNExecutor_SetOutput}获取计算结果。\n
+ * This method constructs a model inference executor associated with the device based on the passed compiler. Use {@link OH_NNExecutor_SetInput}
+ * to set the model input data. After the input data is set, call {@link OH_NNExecutor_Run} to perform inference and then call
+ * {@link OH_NNExecutor_SetOutput} to obtain the computing result. \n
*
- * 调用本方法创建{@link OH_NNExecutor}实例后,如果不需要创建其他执行器,可以安全释放{@link OH_NNCompilation}实例。\n
+ * After calling this method to create the {@link OH_NNExecutor} instance, you can release the {@link OH_NNCompilation}
+ * instance if you do not need to create any other executors. \n
*
- * @param compilation 指向{@link OH_NNCompilation}实例的指针。
- * @return 返回指向{@link OH_NNExecutor}实例的指针。
+ * @param compilation Pointer to the {@link OH_NNCompilation} instance.
+ * @return Pointer to a {@link OH_NNExecutor} instance.
* @since 9
* @version 1.0
*/
OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation);
/**
- * @brief 设置模型单个输入的数据。
+ * @brief Sets the single input data for a model.
*
- * 本方法将dataBuffer中,长度为length个字节的数据,拷贝到底层硬件的共享内存。inputIndex指定设置的输入,tensor用于设置输入的
- * 形状、类型、量化参数等信息。\n
+ * This method copies the data whose length is specified by length (in bytes) in dataBuffer to the shared memory
+ * of the underlying device. inputIndex specifies the input to be set and tensor sets information such as the input shape,
+ * type, and quantization parameters. \n
*
- * 由于Neural Network Runtime支持动态输入形状的模型,在固定形状输入和动态形状输入的场景下,本方法采取不同的处理策略:
+ * Neural Network Runtime supports models with dynamical shape input. For fixed shape input and dynamic shape input scenarios,
+ * this method uses different processing policies.
*
- * - 固定形状输入的场景:tensor各属性必须和构图阶段调用{@link OH_NNModel_AddTensor}添加的张量保持一致;
- * - 动态形状输入的场景:在构图阶段,由于动态输入的形状不确定,调用本方法时,要求tensor.dimensions中的每个值必须大于0,
- * 以确定执行计算阶段输入的形状。设置形状时,只允许调整数值为-1的维度。假设在构图阶段,输入A的维度为
- * [-1, 224, 224, 3],调用本方法时,只能调整第一个维度的尺寸,如:[3, 224, 224, 3]。调整其他维度将返回
- * {@link OH_NN_INVALID_PARAMETER}。\n
+ * - Fixed shape input: The attributes of tensor must be the same as those of the tensor added by calling
+ * {@link OH_NNModel_AddTensor} in the composition phase.
+ * - Dynamic shape input: In the composition phase, because the shape is not fixed, each value in tensor.dimensions must be greater than
+ * 0 in the method calls to determine the shape input in the calculation phase. When setting the shape, you can modify
+ * only the dimension whose value is -1. Assume that [-1, 224, 224, 3] is input as the the dimension of A in the composition phase.
+ * When this method is called, only the size of the first dimension can be modified, for example, to [3, 224, 224, 3].
+ * If other dimensions are adjusted, {@link OH_NN_INVALID_PARAMETER} is returned. \n
*
- * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3},
- * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of inputIndices is {1, 5, 9} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * In input settings, the index value for the three inputs is {0, 1, 2}. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param inputIndex 输入的索引值。
- * @param tensor 设置输入数据对应的张量。
- * @param dataBuffer 指向输入数据的指针。
- * @param length 数据缓冲区的字节长度。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param tensor Sets the tensor corresponding to the input data.
+ * @param dataBuffer Pointer to the input data.
+ * @param length Length of the data buffer, in bytes.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -405,26 +432,28 @@ OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor,
size_t length);
/**
- * @brief 设置模型单个输出的缓冲区。
- *
- * 本方法将dataBuffer指向的缓冲区与outputIndex指定的输出绑定,缓冲区的长度由length指定。\n
+ * @brief Sets the buffer for a single output of a model.
*
- * 调用{@link OH_NNExecutor_Run}完成单次模型推理后,Neural Network Runtime将比对dataBuffer指向的缓冲区与
- * 输出数据的长度,根据不同情况,返回不同结果:\n
+ * This method binds the buffer to which dataBuffer points to the output specified by outputIndex.
+ * The length of the buffer is specified by length. \n
*
- * - 如果缓冲区大于或等于数据长度:则推理后的结果将拷贝至缓冲区,并返回{@link OH_NN_SUCCESS},可以通过访问dataBuffer读取推理结果。
- * - 如果缓冲区小于数据长度:则{@link OH_NNExecutor_Run}将返回{@link OH_NN_INVALID_PARAMETER},
- * 并输出日志告知缓冲区太小的信息。\n
+ * After {@link OH_NNExecutor_Run} is called to complete a single model inference, Neural Network Runtime compares
+ * the length of the buffer to which dataBuffer points with the length of the output data and returns different results
+ * based on the actual situation. \n
*
- * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6},
- * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n
+ * - If the buffer length is greater than or equal to the data length, the inference result is copied to the buffer and
+ * {@link OH_NN_SUCCESS} is returned. You can read the inference result from dataBuffer.
+ * - If the buffer length is smaller than the data length, {@link OH_NNExecutor_Run} returns {@link OH_NN_INVALID_PARAMETER}
+ * and generates a log indicating that the buffer is too small. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param outputIndex 输出的索引值。
- * @param dataBuffer 指向输出数据的指针。
- * @param length 数据缓冲区的字节长度。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of outputIndices is {4, 6, 8} when {@link OH_NNModel_SpecifyInputsAndOutputs}
+ * is called. In output buffer settings, the index value for the three outputs is {0, 1, 2}.
+ * @param dataBuffer Pointer to the output data.
+ * @param length Length of the data buffer, in bytes.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -434,19 +463,21 @@ OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor,
size_t length);
/**
- * @brief 获取输出tensor的维度信息。
+ * @brief Obtains the dimension information about the output tensor.
*
- * 调用{@link OH_NNExecutor_Run}完成单次推理后,本方法获取指定输出的维度信息和维数。在动态形状输入、输出的场景中常用。\n
+ * After {@link OH_NNExecutor_Run} is called to complete a single inference, call this method to obtain the specified output dimension
+ * information and number of dimensions. It is commonly used in dynamic shape input and output scenarios. \n
*
- * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6},
- * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that outputIndices is {4, 6, 8} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * When {@link OH_NNExecutor_GetOutputShape} is called to obtain dimension information about the output tensor,
+ * outputIndices is {0, 1, 2}.
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param outputIndex 输出的索引值。
- * @param shape 指向int32_t数组的指针,数组中的每个元素值,是输出tensor在每个维度上的长度。
- * @param shapeLength uint32_t类型的指针,返回输出的维数。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param shape Pointer to the int32_t array. The value of each element in the array is the length of the output tensor in each dimension.
+ * @param shapeLength Pointer to the uint32_t type. The number of output dimensions is returned.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -456,110 +487,106 @@ OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor,
uint32_t *shapeLength);
/**
- * @brief 执行推理。
+ * @brief Performs inference.
*
- * 在执行器关联的硬件上,执行模型的端到端推理计算。\n
+ * Performs end-to-end inference and computing of the model on the device associated with the executor. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor);
/**
- * @brief 在硬件上为单个输入申请共享内存。
+ * @brief Allocates shared memory to a single input on a device.
*
- * Neural Network Runtime 提供主动申请硬件共享内存的方法。通过指定执行器和输入索引值,本方法在单个输入关联的硬件
- * 上,申请大小为length的共享内存,通过{@link OH_NN_Memory}实例返回。\n
+ * Neural Network Runtime provides a method for proactively allocating shared memory on a device. By specifying the executor and input index value,
+ * this method allocates shared memory whose size is specified by length on the device associated with a single input and returns the
+ * operation result through the {@link OH_NN_Memory} instance. \n
*
- * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3},
- * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n
- *
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param inputIndex 输入的索引值。
- * @param length 申请的内存字节。
- * @return 指向{@link OH_NN_Memory}实例的指针。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of inputIndices is {1, 5, 9} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * In the memory input application, the index value for the three inputs is {0, 1, 2}.
+ * @param length Memory size to be applied for, in bytes.
+ * @return Pointer to a {@link OH_NN_Memory} instance.
* @since 9
* @version 1.0
*/
OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length);
/**
- * @brief 在硬件上为单个输出申请共享内存。
- *
- * Neural Network Runtime 提供主动申请硬件共享内存的方法。通过指定执行器和输出索引值,本方法在单个输出关联的硬件
- * 上,申请大小为length的共享内存,通过{@link OH_NN_Memory}实例返回。\n
+ * @brief Allocates shared memory to a single output on a device.
*
- * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6},
- * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n
+ * Neural Network Runtime provides a method for proactively allocating shared memory on a device. By specifying the executor and
+ * output index value, this method allocates shared memory whose size is specified by length on the device associated with
+ * a single output and returns the operation result through the {@link OH_NN_Memory} instance. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param outputIndex 输出的索引值。
- * @param length 申请的内存字节。
- * @return 指向{@link OH_NN_Memory}实例的指针。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of outputIndices is {4, 6, 8} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * In output memory application, the index value for the three outputs is {0, 1, 2}.
+ * @param length Memory size to be applied for, in bytes.
+ * @return Pointer to a {@link OH_NN_Memory} instance.
* @since 9
* @version 1.0
*/
OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length);
/**
- * @brief 释放{@link OH_NN_Memory}实例指向的输入内存。
+ * @brief Releases the input memory to which the {@link OH_NN_Memory} instance points.
*
- * 调用{@link OH_NNExecutor_AllocateInputMemory}创建的内存实例,需要主动调用本方法进行释放,否则将造成内存泄漏。
- * inputIndex和memory的对应关系需要和创建内存实例时保持一致。\n
+ * This method needs to be called to release the memory instance created by calling {@link OH_NNExecutor_AllocateInputMemory}.
+ * Otherwise, memory leak will occur.
+ * The mapping between inputIndex and memory must be the same as that in memory instance creation. \n
*
- * 如果memory或*memory为空指针,本方法只打印warning日志,不执行释放逻辑。\n
- *
- * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3},
- * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n
+ * If memory or *memory is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param inputIndex 输入的索引值。
- * @param memory 指向{@link OH_NN_Memory}实例的二级指针。共享内存销毁后,本方法将*memory主动设置为空指针。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of inputIndices is {1, 5, 9} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * In memory input release, the index value for the three inputs is {0, 1, 2}.
+ * @param memory Level-2 pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed, this method sets *memory to a null pointer.
* @since 9
* @version 1.0
*/
void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory);
/**
- * @brief 释放{@link OH_NN_Memory}实例指向的输出内存。
+ * @brief Releases the output memory to which the {@link OH_NN_Memory} instance points.
*
- * 调用{@link OH_NNExecutor_AllocateOutputMemory}创建的内存实例,需要主动调用本方法进行释放,否则将造成内存泄漏。
- * outputIndex和memory的对应关系需要和创建内存实例时保持一致。\n
+ * This method needs to be called to release the memory instance created by calling {@link OH_NNExecutor_AllocateOutputMemory}. Otherwise, memory leak will occur.
+ * The mapping between outputIndex and memory must be the same as that in memory instance creation. \n
*
- * 如果memory或*memory为空指针,本方法只打印warning日志,不执行释放逻辑。\n
- *
- * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6},
- * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n
+ * If memory or *memory is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param outputIndex 输出的索引值。
- * @param memory 指向{@link OH_NN_Memory}实例的二级指针。共享内存销毁后,本方法将*memory主动设置为空指针。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of outputIndices is {4, 6, 8} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * In output memory release, the index value for the three outputs is {0, 1, 2}.
+ * @param memory Level-2 pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed, this method sets *memory to a null pointer.
* @since 9
* @version 1.0
*/
void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory);
/**
- * @brief 将{@link OH_NN_Memory}实例指向的硬件共享内存,指定为单个输入使用的共享内存。
+ * @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory used by a single input.
*
- * 在需要自行管理内存的场景下,本方法将执行输入和{@link OH_NN_Memory}内存实例绑定。执行计算时,底层硬件从内存实例指向的共享内存中读取
- * 输入数据。通过本方法,可以实现设置输入、执行计算、读取输出的并发执行,提升数据流的推理效率。\n
- *
- * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3},
- * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n
+ * In scenarios where memory needs to be managed by yourself, this method binds the execution input to the {@link OH_NN_Memory} memory instance.
+ * During computing, the underlying device reads the input data from the shared memory pointed to by the memory instance.
+ * By using this method, concurrent execution of input setting, computing, and read can be implemented to improve inference efficiency of a data flow. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的指针。
- * @param inputIndex 输入的索引值。
- * @param tensor 指向{@link OH_NN_Tensor}的指针,设置单个输入所对应的张量。
- * @param memory 指向{@link OH_NN_Memory}的指针。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param executor Pointer to the {@link OH_NNExecutor} instance.
+ * @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of inputIndices is {1, 5, 9} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * When the input shared memory is specified, the index value for the three inputs is {0, 1, 2}.
+ * @param tensor Pointer to {@link OH_NN_Tensor}, used to set the tensor corresponding to a single input.
+ * @param memory Pointer to {@link OH_NN_Memory}.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -569,19 +596,19 @@ OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor,
const OH_NN_Memory *memory);
/**
- * @brief 将{@link OH_NN_Memory}实例指向的硬件共享内存,指定为单个输出使用的共享内存。
+ * @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory used by a single output.
*
- * 在需要自行管理内存的场景下,本方法将执行输出和{@link OH_NN_Memory}内存实例绑定。执行计算时,底层硬件将计算结果直接写入内存实例指向
- * 的共享内存。通过本方法,可以实现设置输入、执行计算、读取输出的并发执行,提升数据流的推理效率。\n
- *
- * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序,
- * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6},
- * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n
+ * In scenarios where memory needs to be managed by yourself, this method binds the execution output to the {@link OH_NN_Memory} memory instance.
+ * When computing is performed, the underlying hardware directly writes the computing result to the shared memory to which the memory instance points.
+ * By using this method, concurrent execution of input setting, computing, and read can be implemented to improve inference efficiency of a data flow. \n
*
- * @param executor 执行器。
- * @param outputIndex 输出的索引值。
- * @param memory 指向{@link OH_NN_Memory}的指针。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param executor Executor.
+ * @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * Assume that the value of outputIndices is {4, 6, 8} when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
+ * When output shared memory is specified, the index value for the three outputs is {0, 1, 2}.
+ * @param memory Pointer to {@link OH_NN_Memory}.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -590,13 +617,14 @@ OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor,
const OH_NN_Memory *memory);
/**
- * @brief 销毁执行器实例,释放执行器占用的内存。
+ * @brief Destroys an executor instance to release the memory occupied by the executor.
*
- * 调用{@link OH_NNExecutor_Construct}创建的执行器实例需要调用本方法主动释放,否则将造成内存泄漏。\n
+ * This method needs to be called to release the executor instance created by calling {@link OH_NNExecutor_Construct}. Otherwise,
+ * memory leak will occur. \n
*
- * 如果executor为空指针或者*executor为空指针,本方法只打印warning日志,不执行释放逻辑。\n
+ * If executor or *executor is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
- * @param executor 指向{@link OH_NNExecutor}实例的二级指针。
+ * @param executor Level-2 pointer to the {@link OH_NNExecutor} instance.
* @since 9
* @version 1.0
*/
@@ -604,48 +632,51 @@ void OH_NNExecutor_Destroy(OH_NNExecutor **executor);
/**
- * @brief 获取对接到 Neural Network Runtime 的硬件ID。
+ * @brief Obtains the ID of the device connected to Neural Network Runtime.
*
- * 每个硬件在 Neural Network Runtime 中存在唯一且固定ID,本方法通过uin32_t数组返回当前设备上已经对接的硬件ID。\n
+ * Each device has a unique and fixed ID in Neural Network Runtime. This method returns device IDs on the current device through the uint32_t array. \n
*
- * 硬件ID通过size_t数组返回,数组的每个元素是单个硬件的ID值。数组内存由Neural Network Runtime管理。在下次调用本方法前,
- * 数据指针有效。\n
+ * Device IDs are returned through the size_t array. Each element of the array is the ID of a single device.
+ * The array memory is managed by Neural Network Runtime.
+ * The data pointer is valid before this method is called next time. \n
*
- * @param allDevicesID 指向size_t数组的指针。要求传入的(*allDevicesID)为空指针,否则返回
- * {@link OH_NN_INVALID_PARAMETER}。
- * @param deviceCount uint32_t类型的指针,用于返回(*allDevicesID)的长度。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param allDevicesID Pointer to the size_t array. The input *allDevicesID must be a null pointer. Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
+ * @param deviceCount Pointer of the uint32_t type, which is used to return the length of (*allDevicesID).
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned.
+ * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount);
/**
- * @brief 获取指定硬件的类型信息。
+ * @brief Obtains the name of the specified device.
*
- * 通过deviceID指定计算硬件,获取硬件的名称。硬件ID需要调用{@link OH_NNDevice_GetAllDevicesID}获取。\n
+ * deviceID specifies the device whose name will be obtained. The device ID needs to be obtained by calling {@link OH_NNDevice_GetAllDevicesID}. \n
*
- * @param deviceID 指定硬件ID。
- * @param name 指向char数组的指针,要求传入的(*char)为空指针,否则返回
- * {@link OH_NN_INVALID_PARAMETER}。(*name)以C风格字符串保存硬件名称,数组以\0结尾。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param deviceID Device ID.
+ * @param name Pointer to the char array. The passed (*char) must be a null pointer. Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
+ * The value of (*name) is a C-style string ended with '\0'.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name);
/**
- * @brief 获取指定硬件的类别信息。
+ * @brief Obtains the type information of the specified device.
*
- * 通过deviceID指定计算硬件,获取硬件的类别。目前 Neural Network Runtime 支持的设备类型有:
- * - CPU设备:OH_NN_CPU
- * - GPU设备:OH_NN_GPU
- * - 机器学习专用加速器:OH_NN_ACCELERATOR
- * - 不属于以上类型的其他硬件类型:OH_NN_OTHERS\n
+ * deviceID specifies the device whose type will be obtained. Currently, Neural Network Runtime supports the following device types:
+ * - OH_NN_CPU: CPU device.
+ * - OH_NN_GPU: GPU device.
+ * - OH_NN_ACCELERATOR: machine learning dedicated accelerator.
+ * - OH_NN_OTHERS: other hardware types. \n
*
- * @param deviceID 指定硬件ID。
- * @param deviceType 指向{@link OH_NN_DeviceType}实例的指针,返回硬件的类别信息。
- * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。
+ * @param deviceID Device ID.
+ * @param deviceType Pointer to the {@link OH_NN_DeviceType} instance. The device type information is returned.
+ * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails,
+ * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@@ -655,5 +686,5 @@ OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType *deviceTy
}
#endif // __cplusplus
+/** @} */
#endif // NEURAL_NETWORK_RUNTIME_H
-/** @} */
\ No newline at end of file
diff --git a/interfaces/kits/c/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime_type.h
index f3ae84faa0cdcd2b9554cec929974942559cd028..3bbbb4c528ffe81b10f8c8e73a8fd0c6980d2088 100644
--- a/interfaces/kits/c/neural_network_runtime_type.h
+++ b/interfaces/kits/c/neural_network_runtime_type.h
@@ -17,7 +17,7 @@
* @addtogroup NeuralNeworkRuntime
* @{
*
- * @brief 提供Neural Network Runtime加速模型推理的相关接口。
+ * @brief Provides APIs for accelerating the Neural Network Runtime model inference.
*
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
@@ -27,7 +27,7 @@
/**
* @file neural_network_runtime_type.h
*
- * @brief Neural Network Runtime定义的结构体和枚举值。
+ * @brief Defines the structure and enumeration for Neural Network Runtime.
*
* @since 9
* @version 1.0
@@ -44,7 +44,7 @@ extern "C" {
#endif
/**
- * @brief Neural Network Runtime的模型句柄
+ * @brief Defines the handles of models for Neural Network Runtime.
*
* @since 9
* @version 1.0
@@ -52,7 +52,7 @@ extern "C" {
typedef struct OH_NNModel OH_NNModel;
/**
- * @brief Neural Network Runtime的编译器句柄
+ * @brief Defines the compiler handle for Neural Network Runtime.
*
* @since 9
* @version 1.0
@@ -60,7 +60,7 @@ typedef struct OH_NNModel OH_NNModel;
typedef struct OH_NNCompilation OH_NNCompilation;
/**
- * @brief Neural Network Runtime的执行器句柄
+ * @brief Defines the executor handle for Neural Network Runtime.
*
* @since 9
* @version 1.0
@@ -68,1574 +68,1710 @@ typedef struct OH_NNCompilation OH_NNCompilation;
typedef struct OH_NNExecutor OH_NNExecutor;
/**
- * @brief 硬件的执行性能模式
+ * @brief Defines the hardware performance mode.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** 无性能模式偏好 */
+ /** No performance mode preference */
OH_NN_PERFORMANCE_NONE = 0,
- /** 低能耗模式 */
+ /** Low power consumption mode*/
OH_NN_PERFORMANCE_LOW = 1,
- /** 中性能模式 */
+ /** Medium performance mode */
OH_NN_PERFORMANCE_MEDIUM = 2,
- /** 高性能模式 */
+ /** High performance mode */
OH_NN_PERFORMANCE_HIGH = 3,
- /** 极致性能模式 */
+ /** Ultimate performance mode */
OH_NN_PERFORMANCE_EXTREME = 4
} OH_NN_PerformanceMode;
/**
- * @brief 模型推理任务优先级
+ * @brief Defines the model inference task priority.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** 无优先级偏好 */
+ /** No priority preference */
OH_NN_PRIORITY_NONE = 0,
- /** 低优先级 */
+ /** Low priority */
OH_NN_PRIORITY_LOW = 1,
- /** 中优先级 */
+ /** Medium priority */
OH_NN_PRIORITY_MEDIUM = 2,
- /** 高优先级 */
+ /** High priority */
OH_NN_PRIORITY_HIGH = 3
} OH_NN_Priority;
/**
- * @brief Neural Network Runtime 定义的错误码类型
+ * @brief Defines error codes for Neural Network Runtime.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** 操作成功 */
+ /** The operation is successful. */
OH_NN_SUCCESS = 0,
- /** 操作失败 */
+ /** The operation failed. */
OH_NN_FAILED = 1,
- /** 非法参数 */
+ /** Invalid parameter. */
OH_NN_INVALID_PARAMETER = 2,
- /** 内存相关的错误,包括:内存不足、内存数据拷贝失败、内存申请失败等。 */
+ /** Memory-related error, for example, insufficient memory, memory data copy failure, or memory application failure. */
OH_NN_MEMORY_ERROR = 3,
- /** 非法操作 */
+ /** Invalid operation. */
OH_NN_OPERATION_FORBIDDEN = 4,
- /** 空指针异常 */
+ /** Null pointer exception */
OH_NN_NULL_PTR = 5,
- /** 无效文件 */
+ /** Invalid file. */
OH_NN_INVALID_FILE = 6,
- /** 硬件发生错误,错误可能包含:HDL服务崩溃 */
+ /** A hardware error occurs, for example, HDL service crash. */
OH_NN_UNAVALIDABLE_DEVICE = 7,
- /** 非法路径 */
+ /** Invalid path. */
OH_NN_INVALID_PATH = 8
} OH_NN_ReturnCode;
/**
- * @brief Neural Network Runtime 融合算子中激活函数的类型
+ * @brief Defines activation function types in the fusion operator for Neural Network Runtime.
*
* @since 9
* @version 1.0
*/
typedef enum : int8_t {
- /** 未指定融合激活函数 */
+ /** The fusion activation function is not specified. */
OH_NN_FUSED_NONE = 0,
- /** 融合relu激活函数 */
+ /** Fusion relu activation function */
OH_NN_FUSED_RELU = 1,
- /** 融合relu6激活函数 */
+ /** Fusion relu6 activation function */
OH_NN_FUSED_RELU6 = 2
} OH_NN_FuseType;
/**
- * @brief tensor数据的排布类型
+ * @brief Defines the layout type of tensor data.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** 当tensor没有特定的排布类型时(如标量或矢量),使用{@link OH_NN_FORMAT_NONE} */
+ /** The tensor does not have a specific layout type (such as scalar or vector). */
OH_NN_FORMAT_NONE = 0,
- /** 读取(使用)维度信息时按照NCHW读取(使用)*/
+ /** The tensor arranges data in NCHW format.*/
OH_NN_FORMAT_NCHW = 1,
- /** 读取(使用)维度信息时按照NHWC读取(使用) */
+ /** The tensor arranges data in NHWC format.*/
OH_NN_FORMAT_NHWC = 2
} OH_NN_Format;
/**
- * @brief Neural Network Runtime 支持的设备类型
+ * @brief Defines device types supported by Neural Network Runtime.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** 不属于CPU、GPU、专用加速器的设备 */
+ /** Devices that are not CPU, GPU, or dedicated accelerator*/
OH_NN_OTHERS = 0,
- /** CPU设备 */
+ /** CPU device */
OH_NN_CPU = 1,
- /** GPU设备 */
+ /** GPU device */
OH_NN_GPU = 2,
- /** 专用硬件加速器 */
+ /** Dedicated hardware accelerator */
OH_NN_ACCELERATOR = 3,
} OH_NN_DeviceType;
/**
- * @brief Neural Network Runtime 支持的数据类型
+ * @brief Defines tensor data types supported by Neural Network Runtime.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** 张量数据类型未知 */
+ /** Unknown type */
OH_NN_UNKNOWN = 0,
- /** 张量数据类型为bool */
+ /** bool */
OH_NN_BOOL = 1,
- /** 张量数据类型为int8 */
+ /** int8 */
OH_NN_INT8 = 2,
- /** 张量数据类型为int16 */
+ /** int16 */
OH_NN_INT16 = 3,
- /** 张量数据类型为int32 */
+ /** int32 */
OH_NN_INT32 = 4,
- /** 张量数据类型为int64 */
+ /** int64 */
OH_NN_INT64 = 5,
- /** 张量数据类型为uint8 */
+ /** uint8 */
OH_NN_UINT8 = 6,
- /** 张量数据类型为uint16 */
+ /** uint16 */
OH_NN_UINT16 = 7,
- /** 张量数据类型为uint32 */
+ /** uint32 */
OH_NN_UINT32 = 8,
- /** 张量数据类型为uint64 */
+ /** uint64 */
OH_NN_UINT64 = 9,
- /** 张量数据类型为float16 */
+ /** float16 */
OH_NN_FLOAT16 = 10,
- /** 张量数据类型为float32 */
+ /** float32 */
OH_NN_FLOAT32 = 11,
- /** 张量数据类型为float64 */
+ /** float64 */
OH_NN_FLOAT64 = 12
} OH_NN_DataType;
/**
- * @brief Neural Network Runtime 支持算子的类型
+ * @brief Defines operator types supported by Neural Network Runtime.
*
* @since 9
* @version 1.0
*/
typedef enum {
/**
- * 返回两个输入张量对应元素相加的和的张量。
+ * Returns the tensor of the sum of the elements corresponding to two input tensors.
*
- * 输入:
+ * Inputs:
*
- * * x,第一个输入的张量,数据类型要求为布尔值或者数字。
- * * y,第二个输入的张量,数据类型和形状需要和第一个输入保持一致。
+ * * input1: first input tensor, of the Boolean or number type.
+ * * input2: second input tensor, whose data type must be the same as that of the first tensor.
*
- * 参数:
+ * Parameters:
*
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * 0 输出x和y的和,数据形状与输入broadcast之后一样,数据类型与较高精度的输入精度一致
+ * * output: sum of input1 and input2.
+ * The data shape is the same as that of the input after broadcasting,
+ * and the data type is the same as that of the input with a higher precision.
*/
OH_NN_OPS_ADD = 1,
/**
- * 在输入tensor上应用 2D 平均池化,仅支持NHWC格式的tensor。支持int8量化输入。
+ * Apply 2D average pooling to the input tensor, which now must be in NHWC format. The int8 quantization input is supported.
*
- * 如果输入中含有padMode参数:
+ * If the input contains the padMode parameter:
*
- * 输入:
+ * Inputs:
*
- * * x,一个张量。
+ * * input: tensor.
*
- * 参数:
+ * Parameters:
*
- * * kernelSize,用来取平均值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight],
- * 第一个数表示kernel高度,第二个数表示kernel宽度。
- * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight],
- * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。
- * * padMode,填充模式,int类型的可选值,0表示same,1表示valid,并且以最近邻的值填充。
- * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部
- * 和底部、左侧和右侧。否则,最后一个额外的填充将从底部和右侧完成。
- * valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * kernelSize indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width].
+ * The first number indicates the kernel height, and the second number indicates the kernel width.
+ * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
+ * The first number indicates the moving step in height, and the second number indicates the moving step in width.
+ * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same) or 1 (valid).
+ * The nearest neighbor value is used for padding.
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * Otherwise, the last additional padding will be completed from the bottom and right.
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. Excessive pixels will be discarded.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 如果输入中含有padList参数:
+ * If the input contains the padList parameter:
*
- * 输入:
+ * Inputs:
*
- * * x,一个张量。
+ * * input: tensor.
*
- * 参数:
+ * Parameters:
*
- * * kernelSize,用来取平均值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight],
- * 第一个数表示kernel高度,第二个数表示kernel宽度。
- * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight],
- * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。
- * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right],并且以最近邻的值填充。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * kernelSize indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width].
+ * The first number indicates the kernel height, and the second number indicates the kernel width.
+ * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
+ * The first number indicates the moving step in height, and the second number indicates the moving step in width.
+ * * padList: padding around input. It is an int array [top, bottom, left, right], and the nearest neighbor values are used for padding.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * 输出x平均池化后的张量。
+ * * output: average pooling result of the input.
*/
OH_NN_OPS_AVG_POOL = 2,
/**
- * 对一个tensor进行batch normalization,对tensor元素进行缩放和位移,缓解一批数据中潜在的covariate shift。
+ * Batch normalization is performed on a tensor to scale and shift tensor elements, relieving potential covariate shift in a batch of data.
*
- * 输入:
+ * Inputs:
*
- * * x,一个n维的tensor,要求形状为[N,...,C],即第n维是通道数(channel)。
- * * scale,缩放因子的1D张量,用于缩放归一化的第一个张量。
- * * offset,用于偏移的1D张量,以移动到归一化的第一个张量。
- * * mean,总体均值的一维张量,仅用于推理;对于训练,必须为空。
- * * variance,用于总体方差的一维张量。仅用于推理;对于训练,必须为空。
+ * * input: n-dimensional tensor of shape [N, ..., C]. The nth dimension is the number of channels.
+ * * scale: 1D tensor of the scaling factor used to scale the first normalized tensor.
+ * * offset: 1D tensor used to move to the first normalized tensor.
+ * * mean: 1D tensor of the overall mean value. It is used only for inference. In case of training, this parameter must be left empty.
+ * * variance: 1D tensor used for the overall variance. It is used only for inference. In case of training, this parameter must be left empty.
*
- * 参数:
+ * Parameters:
*
- * * epsilon,数值稳定性的小附加值。
+ * * epsilon: fixed small additional value.
*
- * 输出:
+ * Outputs:
*
- * * 输出张量,形状和数据类型与输入x一致。
+ * * output: n-dimensional output tensor whose shape and data type are the same as those of the input.
*/
OH_NN_OPS_BATCH_NORM = 3,
/**
- * 将一个4维tensor的batch维度按block_shape切分成小块,并将这些小块拼接到空间维度。
+ * Divides the batch dimension of a 4D tensor into small blocks by block_shape, and interleaves these blocks back into the spatial dimension.
*
- * 参数:
+ * Parameters:
*
- * * x,输入张量,维将被切分,拼接回空间维度。
+ * * input: input tensor. The dimension will be divided into small blocks, and these blocks will be interleaved into the spatial dimension.
*
- * 输出:
+ * Outputs:
*
- * * blockSize,一个长度为2的数组[height_block,weight_block],指定切分到空间维度上的block大小。
- * * crops,一个shape为(2,2)的2维数组[[crop0_start,crop0_end],[crop1_start,crop1_end]],
- * 表示在output的空间维度上截掉部分元素。
+ * * blockSize: size of each block to be interleaved into the spatial dimension. The value is an array [height_block, width_block].
+ * * crops: elements truncated from the spatial dimension of the output. The value is a 2D array [[crop0_start, crop0_end],
+ * [crop1_start, crop1_end]] with the shape of (2, 2).
+ *
*
- * 输出:
+ * Outputs:
*
- * * 输出张量,假设x的形状为(n,h,w,c),output的形状为(n',h',w',c'):
- * n' = n / (block_shape[0] * block_shape[1])
- * h' = h * block_shape[0] - crops[0][0] - crops[0][1]
- * w' = w * block_shape[1] - crops[1][0] - crops[1][1]
- * c'= c
+ * * output. Assume that the shape of input is (n,h,w,c) and the shape of output is (n',h',w',c'):
+ * n' = n / (block_shape[0] * block_shape[1])
+ * h' = h * block_shape[0] - crops[0][0] - crops[0][1]
+ * w' = w * block_shape[1] - crops[1][0] - crops[1][1]
+ * c'= c
*/
OH_NN_OPS_BATCH_TO_SPACE_ND = 4,
/**
- * 对给出的输入张量上的各个维度方向上的数据进行偏置。
+ * Offsets the data in each dimension of the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,输入张量,可为2-5维度。
- * * bias,参数对应输入维度数量的偏移值。
+ * * input: input tensor, which can have two to five dimensions.
+ * * bias: offset of the number of input dimensions.
*
- * 输出:
+ * Outputs:
*
- * * 输出张量,根据输入中每个维度方向偏移后的结果。
+ * * output: sum of the input tensor and the bias in each dimension.
*/
OH_NN_OPS_BIAS_ADD = 5,
/**
- * 对输入张量中的数据类型进行转换。
+ * Converts the data type in the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,输入张量。
- * * type,输入转换目的的数据类型。
+ * * input: input tensor.
+ * * type: converted data type.
*
- * 输出:
+ * Outputs:
*
- * * 输出张量,输出转换为目的数据类型后的张量。
+ * * output: converted tensor.
*/
OH_NN_OPS_CAST = 6,
/**
- * 在指定轴上连接张量,将输入张量按给定的轴连接起来。
+ * Connects tensors in a specified dimension.
*
- * 输入:
+ * Inputs:
*
- * * x:N个输入张量。
+ * * input: N input tensors.
*
- * 参数:
+ * Parameters:
*
- * * axis,指定轴的位置。
+ * * axis: dimension for connecting tensors.
*
- * 输出:
+ * Outputs:
*
- * * 输出n个张量按axis轴连接的结果。
+ * * output: result of connecting N tensors along the axis.
*/
OH_NN_OPS_CONCAT = 7,
/**
- * 二维卷积层。
- *
- * 如果输入中含有padMode参数:
- *
- * 输入:
- *
- * * x,输入张量。
- * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group],
- * inChannel必须要能整除group。
- * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
- *
- * 参数:
- *
- * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。
- * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth],
- * 值必须大于或等于1,并且不能超过x的height和width。
- * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid。
- * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧
- * 和右侧。否则,最后一个额外的填充将从底部和右侧完成。
- * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。
- * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
- *
- *
- * 如果输入中含有padList参数:
- *
- * 输入:
- *
- * * x,输入张量。
- * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group],
- * inChannel必须要能整除group。
- * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
- *
- * 参数:
- *
- * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。
- * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。
- * 值必须大于或等于1,并且不能超过x的height和width。
- * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。
- * * group,将输入x按in_channel分组,int类型。
- * group等于1,这是常规卷积。
- * group等于in_channel,这是depthwiseConv2d,此时group==in_channel==out_channel。
- * group大于1且小于in_channel,这是分组卷积,out_channel==group。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
- *
- * 输出:
- *
- * * 输出张量,卷积的输出。
+ * 2D convolutional layer.
+ *
+ * If the input contains the padMode parameter:
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * The value of inChannel must be exactly divided by the value of group.
+ *
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
+ *
+ * Parameters:
+ *
+ * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
+ * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ *
+ * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid).
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * Otherwise, the last additional padding will be completed from the bottom and right.
+ *
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
+ * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
+ * If group is 1, it is a conventional convolution. If group is greater than 1 and
+ * less than or equal to in_channel, it is a group convolution.
+ * * activationType is an integer constant which is contained in FuseType. The specified activation function is called before output.
+ *
+ * If the input contains the padList parameter:
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * The value of inChannel must be exactly divided by the value of group.
+ *
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
+ *
+ * Parameters:
+ *
+ * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
+ * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * padList: padding around input. It is an int array [top, bottom, left, right].
+ * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
+ * If group is 1, it is a conventional convolution.
+ * If group is in_channel, it is depthwiseConv2d. In this case, group==in_channel==out_channel.
+ * If group is greater than 1 and less than in_channel, it is a group convolution. In this case, out_channel==group.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
+ *
+ * Outputs:
+ *
+ * * output: convolution computing result.
*/
OH_NN_OPS_CONV2D = 8,
/**
- * 二维卷积转置。
- *
- * 如果输入中含有padMode参数:
- *
- * 输入:
- *
- * * x,输入张量。
- * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group],
- * inChannel必须要能整除group。
- * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
- * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。
- *
- * 参数:
- *
- * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。
- * 值必须大于或等于1,并且不能超过x的height和width。
- * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid。
- * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧
- * 和右侧。否则,最后一个额外的填充将从底部和右侧完成。
- * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。
- * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。
- * * outputPads,一个整数或元组/2 个整数的列表,指定沿输出张量的高度和宽度的填充量。可以是单个整数,用于为所
- * 有空间维度指定相同的值。沿给定维度的输出填充量必须小于沿同一维度的步幅。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
- *
- * 如果输入中含有padList参数:
- *
- * 输入:
- *
- * * x,输入张量。
- * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group],
- * inChannel必须要能整除group。
- * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
- *
- * 参数:
- *
- * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。
- * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。
- * 值必须大于或等于1,并且不能超过x的height和width。
- * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。
- * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。
- * * outputPads,一个整数或元组/2 个整数的列表,指定沿输出张量的高度和宽度的填充量。可以是单个整数,用于为所
- * 有空间维度指定相同的值。沿给定维度的输出填充量必须小于沿同一维度的步幅。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
- *
- * 输出:
- *
- * * 输出张量,卷积转置后的输出。
+ * 2D convolution transposition.
+ *
+ * If the input contains the padMode parameter:
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * The value of inChannel must be exactly divided by the value of group.
+ *
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
+ *
+ * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ *
+ * Parameters:
+ *
+ * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
+ * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid).
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * Otherwise, the last additional padding will be completed from the bottom and right.
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
+ * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
+ * If group is 1, it is a conventional convolution. If group is greater than 1 and
+ * less than or equal to in_channel, it is a group convolution.
+ * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple.
+ * It can be a single integer to specify the same value for all spatial dimensions. The amount of output
+ * padding along a dimension must be less than the stride along this dimension.
+ *
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
+ *
+ * If the input contains the padList parameter:
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * The value of inChannel must be exactly divided by the value of group.
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
+ *
+ * Parameters:
+ *
+ * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
+ * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * padList: padding around input. It is an int array [top, bottom, left, right].
+ * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
+ * If group is 1, it is a conventional convolution. If group is greater than 1
+ * and less than or equal to in_channel, it is a group convolution.
+ * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple.
+ * It can be a single integer to specify the same value for all spatial dimensions. The amount of output padding
+ * along a dimension must be less than the stride along this dimension.
+ *
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
+ *
+ * Outputs:
+ *
+ * * output: computing result after convolution and transposition.
*/
OH_NN_OPS_CONV2D_TRANSPOSE = 9,
/**
- * 2维深度可分离卷积
- *
- * 如果输入中含有padMode参数:
- *
- * 输入:
- *
- * * x,输入张量。
- * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,1],outChannel = channelMultiplier x inChannel。
- * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
- *
- * 参数:
- *
- * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。
- * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。
- * 值必须大于或等于1,并且不能超过x的height和width。
- * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid
- * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧
- * 和右侧。否则,最后一个额外的填充将从底部和右侧完成
- * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
- *
- * 如果输入中含有padList 参数:
- *
- * 输入:
- *
- * * x,输入张量。
- * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,1],outChannel = channelMultiplier x inChannel。
- * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
- *
- * 参数:
- *
- * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。
- * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。
- * 值必须大于或等于1,并且不能超过x的height和width。
- * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
- *
- * 输出:
- *
- * * 输出张量,卷积后的输出。
+ * 2D depthwise separable convolution.
+ *
+ * If the input contains the padMode parameter:
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
+ * outChannel is equal to channelMultiplier multiplied by inChannel.
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
+ *
+ * Parameters:
+ *
+ * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
+ * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid).
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * Otherwise, the last additional padding will be completed from the bottom and right.
+ *
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
+ *
+ * If the input contains the padList parameter:
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
+ * outChannel is equal to channelMultiplier multiplied by inChannel.
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
+ *
+ * Parameters:
+ *
+ * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
+ * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * padList: padding around input. It is an int array [top, bottom, left, right].
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
+ *
+ * Outputs:
+ *
+ * * output: convolution computing result.
*/
OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE = 10,
/**
- * 对输入的两个标量或张量做商。
+ * Divides two input scalars or tensors.
*
- * 输入:
+ * Inputs:
*
- * * x1,第一个输入是标量或布尔值或数据类型为数字或布尔值的张量。
- * * x2,数据类型根据x1的类型,要求有所不同:
- * 当第一个输入是张量时,第二个输入可以是实数或布尔值或数据类型为实数/布尔值的张量。
- * 当第一个输入是实数或布尔值时,第二个输入必须是数据类型为实数/布尔值的张量。
+ * * input1: first input, which is a number, a bool, or a tensor whose data type is number or Boolean.
+ * * input2: second input, which must meet the following requirements:
+ * If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose data type is real number or Boolean value.
+ * If the first input is a real number or Boolean value, the second input must be a tensor whose data type is real number or Boolean value.
*
- * 参数:
+ * Parameters:
*
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * 输出张量,输出两输入相除后的结果。
+ * * output: result of dividing input1 by input2.
*/
OH_NN_OPS_DIV = 11,
/**
- * 设置参数对输入进行product(点乘)、sum(相加减)或max(取大值)。
+ * Sets parameters to perform product (dot product), sum (addition and subtraction), or max (larger value) on the input.
*
- * 输入:
+ * Inputs:
*
- * * x1,第一个输入张量。
- * * x2,第二个输入张量。
+ * * input1: first input tensor.
+ * * input2: second input tensor.
*
- * 参数:
+ * Parameters:
*
- * * mode,枚举,选择操作方式。
+ * * mode: operation mode. The value is an enumerated value.
*
- * 输出:
- *
- * * 输出tensor,与x1有相同的数据类型和形状。
+ * Outputs:
*
+ * * output: computing result, which has the same data type and shape of output and input1.
*/
OH_NN_OPS_ELTWISE = 12,
/**
- * 在给定轴上为tensor添加一个额外的维度。
+ * Adds an additional dimension to a tensor in the given dimension.
*
- * 输入:
+ * Inputs:
*
- * * x,输入张量。
- * * axis,需要添加的维度的index,int32_t类型,值必须在[-dim-1,dim],且只允许常量值。
+ * * input: input tensor.
+ * * axis: index of the dimension to be added. The value is of the int32_t type and must be a constant in the range [-dim-1, dim].
*
- * 输出:
+ * Outputs:
*
- * * 输出tensor,与x有相同的数据类型和形状。
+ * * output: tensor after dimension expansion.
*/
OH_NN_OPS_EXPAND_DIMS = 13,
/**
- * 根据指定的维度,创建由一个标量填充的张量。
+ * Creates a tensor of the specified dimensions and fills it with a scalar.
*
- * 输入:
+ * Inputs:
*
- * * value,填充的标量。
- * * shape,指定创建张量的维度。
+ * * value: scalar used to fill the tensor.
+ * * shape: dimensions of the tensor to be created.
*
- * 输出:
+ * Outputs:
*
- * * 输出张量,与value有相同的数据类型,shape由输入指定。
+ * * output: generated tensor, which has the same data type as value. The tensor shape is specified by the shape parameter.
*/
OH_NN_OPS_FILL = 14,
/**
- * 全连接,整个输入作为feature map,进行特征提取。
+ * Full connection. The entire input is used as the feature map for feature extraction.
*
- * 输入:
+ * Inputs:
*
- * * x,全连接的输入张量。
- * * weight,全连接的权重张量。
- * * bias,全连接的偏置,在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
+ * * input: full-connection input tensor.
+ * * weight: weight tensor for a full connection.
+ * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter.
+ * If quantization is required, the data must be of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
*
- * 参数:
+ * Parameters:
*
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * output,输出运算后的张量。
-
- * 如果输入中含有axis参数:
+ * * output: computed tensor.
+ *
+ * If the input contains the axis parameter:
*
- * 输入:
+ * Inputs:
*
- * * x,全连接的输入张量。
- * * weight,全连接的权重张量。
- * * bias,全连接的偏置,在量化场景下,bias 参数不需要量化参数,其量化
- * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。
+ * * input: full-connection input tensor.
+ * * weight: weight tensor for a full connection.
+ * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter.
+ * If quantization is required, the data must be of the OH_NN_INT32 type. The actual quantization parameters
+ * are determined by input and weight.
*
- * 参数:
+ * Parameters:
*
- * * axis,x做全连接的轴,从指定轴axis开始,将axis和axis后面的轴展开成1维去做全连接。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * axis: axis in which the full connection is applied. The specified axis and its following axes are
+ * converted into a 1D tensor for applying the full connection.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * output,输出运算后的张量。
+ * * output: computed tensor.
*/
OH_NN_OPS_FULL_CONNECTION = 15,
/**
- * 根据指定的索引和轴返回输入tensor的切片。
+ * Returns the slice of the input tensor based on the specified index and axis.
*
- * 输入:
+ * Inputs:
*
- * * x,输入待切片的tensor。
- * * inputIndices,指定输入x在axis上的索引,是一个int类型的数组,值必须在[0,x.shape[axis])范围内
- * * axis,输入x被切片的轴,int32_t类型的数组,数组长度为1。
+ * * input: tensor to be sliced.
+ * * inputIndices: indices of the specified input on the axis. The value is an array of the int type
+ * and must be in the range [0,input.shape[axis]).
+ * * axis: axis on which input is sliced. The value is an array with one element of the int32_t type.
*
- * 输出:
+ * Outputs:
*
- * * Output,输出切片后的tensor。
+ * * output: sliced tensor.
*/
OH_NN_OPS_GATHER = 16,
/**
- * 计算输入的Hswish激活值。
+ * Calculate the Hswish activation value of the input.
*
- * 输入:
+ * Inputs:
*
- * * 一个n维输入tensor。
+ * * An n-dimensional input tensor.
*
- * 输出:
+ * Outputs:
*
- * * n维Hswish激活值,数据类型和shape和input一致。
+ * * output: n-dimensional Hswish activation value. The data type is the same as that of shape and input.
*/
OH_NN_OPS_HSWISH = 17,
/**
- * 对输入x1和x2,计算每对元素的x<=y的结果。
+ * For input1 and input2, calculate the result of input1[i]<=input2[i] for each pair of elements,
+ * where i is the index of each element in the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * x1,可以是实数、布尔值或数据类型是实数/NN_BOOL的tensor。
- * * x2,如果input_x是tensor,input_y可以是实数、布尔值,否则只能是tensor,其数据类型是实数或NN_BOOL。
+ * * input1, which can be a real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
+ * * input2, which can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or NN_BOOL if input1 is not a tensor.
*
- * 输出:
+ * Outputs:
*
- * * Tensor,数据类型为NN_BOOL的tensor,使用量化模型时,output的量化参数不可省略,但量化参数的数值不会对输入结果产生影响。
+ * * A tensor of the data type NN_BOOL. When a quantization model is used, the quantization parameters of the output
+ * cannot be omitted. However, values of the quantization parameters do not affect the result.
*/
OH_NN_OPS_LESS_EQUAL = 18,
/**
- * 计算x1和x2的内积
+ * Calculate the inner product of input1 and input2.
*
- * 输入:
+ * Inputs:
*
- * * x1,n维输入tensor。
- * * x2,n维输入tensor。
+ * * input1: n-dimensional input tensor.
+ * * input2: n-dimensional input tensor.
*
- * 参数:
+ * Parameters:
*
- * * TransposeX,布尔值,是否对x1进行转置。
- * * TransposeY,布尔值,是否对x2进行转置。
+ * * TransposeX: Boolean value indicating whether to transpose input1.
+ * * TransposeY: Boolean value indicating whether to transpose input2.
*
- * 输出:
+ * Outputs:
*
- * * output,计算得到内积,当type!=NN_UNKNOWN时,output数据类型由type决定;当type==NN_UNKNOWN时,
- * output的数据类型取决于inputX和inputY进行计算时转化的数据类型。
+ * * output: inner product obtained after calculation. In case of type!=NN_UNKNOWN, the output data type is
+ * determined by type. In case of type==NN_UNKNOWN, the output data type depends on the data type
+ * converted during computing of inputX and inputY.
+ *
*/
OH_NN_OPS_MATMUL = 19,
/**
- * 计算input1和input2对应元素最大值,input1和input2的输入遵守隐式类型转换规则,使数据类型一致。输入必须
- * 是两个张量或一个张量和一个标量。当输入是两个张量时,它们的数据类型不能同时为NN_BOOL。它们的形状支持
- * broadcast成相同的大小。当输入是一个张量和一个标量时,标量只能是一个常数。
+ * Calculates the maximum of input1 and input2 element-wise. The inputs of input1 and input2
+ * comply with the implicit type conversion rules to make the data types consistent. * The inputs must be two tensors or one tensor and one scalar.
+ * When the inputs are two tensors, their data types cannot be both NN_BOOL. Their shapes can be broadcast to the same size.
+ * When the inputs are one tensor and one scalar, the scalar must be a constant.
*
- * 输入:
+ * Inputs:
*
- * * x1,n维输入tensor,实数或NN_BOOL类型。
- * * x2,n维输入tensor,实数或NN_BOOL类型。
+ * * input1: n-dimensional input tensor of the real number or NN_BOOL type.
+ * * input2: n-dimensional input tensor of the real number or NN_BOOL type.
*
- * 输出:
+ * Outputs:
*
- * * output,n维输出tensor,output的shape和数据类型和两个input中精度或者位数高的相同。
+ * * output: n-dimensional output tensor. The shape and data type of
+ * output are the same as those of the two inputs with a higher precision.
*/
OH_NN_OPS_MAXIMUM = 20,
/**
- * 在输入tensor上应用 2D 最大值池化。
+ * Applies 2D maximum pooling to the input tensor.
*
- * 如果输入中含有padMode参数:
+ * If the input contains the padMode parameter:
*
- * 输入:
+ * Inputs:
*
- * * x,一个张量。
+ * * input: tensor.
*
- * 参数:
+ * Parameters:
*
- * * kernelSize,用来取最大值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight],
- * 第一个数表示kernel高度,第二个数表示kernel宽度。
- * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight],
- * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。
- * * padMode,填充模式,int类型的可选值,0表示same,1表示valid,并且以最近邻的值填充。
- * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部
- * 和底部、左侧和右侧。否则,最后一个额外的填充将从底部和右侧完成。
- * valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width].
+ * The first number indicates the kernel height, and the second number indicates the kernel width.
+ * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
+ * The first number indicates the moving step in height, and the second number indicates the moving step in width.
+ * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same)
+ * or 1 (valid). The nearest neighbor value is used for padding.
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * Otherwise, the last additional padding will be completed from the bottom and right.
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 如果输入中含有padList参数:
+ * If the input contains the padList parameter:
*
- * 输入:
+ * Inputs:
*
- * * x,一个张量。
+ * * input: tensor.
*
- * 参数:
+ * Parameters:
*
- * * kernelSize,用来取最大值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight],
- * 第一个数表示kernel高度,第二个数表示kernel宽度。
- * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight],
- * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。
- * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right],并且以最近邻的值填充。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width].
+ * The first number indicates the kernel height, and the second number indicates the kernel width.
+ * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
+ * The first number indicates the moving step in height, and the second number indicates the moving step in width.
+ * * padList: padding around input. It is an int array [top, bottom, left, right],
+ * and the nearest neighbor values are used for padding.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * output,输出x最大值池化后的张量。
+ * * output: tensor obtained after maximum pooling is applied to the input.
*/
OH_NN_OPS_MAX_POOL = 21,
/**
- * 将inputX和inputY相同的位置的元素相乘得到output。如果inputX和inputY类型shape不同,要求inputX和inputY可以
- * 通过broadcast扩充成相同的shape进行相乘。
+ * Multiplies elements in the same positions of inputX and inputY to obtain the output.
+ * If inputX and inputY have different shapes, expand them to the same shape
+ * through broadcast and then perform multiplication.
*
- * 输入:
+ * Inputs:
*
- * * x1,一个n维tensor。
- * * x2,一个n维tensor。
+ * * input1: n-dimensional tensor.
+ * * input2: n-dimensional tensor.
*
- * 参数:
+ * Parameters:
*
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * output,x1和x2每个元素的乘积。
+ * * Product of each element of input1 and input2.
*/
OH_NN_OPS_MUL = 22,
/**
- * 根据indices指定的位置,生成一个由one-hot向量构成的tensor。每个onehot向量中的有效值由on_value决定,其他位置由off_value决定。
+ * Generates a one-hot tensor based on the positions specified by indices. The positions specified by indices
+ * are determined by on_value, and other positions are determined by off_value.
*
- * 输入:
+ * Inputs:
*
- * * indices,n维tensor。indices中每个元素决定每个one-hot向量,on_value的位置
- * * depth,一个整型标量,决定one-hot向量的深度。要求depth>0。
- * * on_value,一个标量,指定one-hot向量中的有效值。
- * * off_value,(一个标量,指定one-hot向量中除有效位以外,其他位置的值。
+ * * indices: n-dimensional tensor. Each element in indices determines the position of
+ * on_value in each one-hot vector.
+ * * depth: integer scalar that determines the depth of the one-hot vector. The value of depth
+ * must be greater than 0.
+ * * on_value: scalar that specifies a valid value in the one-hot vector.
+ * * off_value: scalar that specifies the values of other posistions in the one-hot vector except the valid value.
*
- * 参数:
+ * Parameters:
*
- * * axis,一个整型标量,指定插入one-hot的维度。
- * indices的形状是[N,C],depth的值是D,当axis=0时,output形状为[D,N,C],
- * indices的形状是[N,C],depth的值是D,当axis=-1时,output形状为[N,C,D],
- * indices的形状是[N,C],depth的值是D,当axis=1时,output形状为[N,D,C]。
+ * * axis: integer scalar that specifies the dimension for inserting the one-hot. Assume that the shape
+ * of indices is [N, C], and the value of depth is D.
+ * When axis is 0, the shape of the output is [D, N, C].
+ * When axis is -1, the shape of the output is [N, C, D].
+ * When axis is 1, the shape of the output is [N, D, C].
*
- * 输出:
+ * Outputs:
*
- * * output,如果indices时n维tensor,则output是(n+1)维tensor。output的形状由indices和axis共同决定。
+ * * output: (n+1)-dimensional tensor if indices is an n-dimensional tensor.
+ * The output shape is determined by indices and axis.
*/
OH_NN_OPS_ONE_HOT = 23,
/**
- * 在inputX指定维度的数据前后,添加指定数值进行增广。
+ * Pads inputX in the specified dimensions.
*
- * 输入:
+ * Inputs:
*
- * * inputX,一个n维tensor,要求inputX的排布为[BatchSize,…]。
- * * paddings,一个2维tensor,指定每一维度增补的长度,shape为[n,2]。paddings[i][0]表示第i维上,需要在inputX前增补的数量;
- * paddings[i][1]表示第i维上,需要在inputX后增补的数量。
+ * * inputX: n-dimensional tensor in [BatchSize, ...] format.
+ * * paddings: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2].
+ * For example, paddings[i][0] indicates the number of paddings to be added preceding inputX in the ith dimension.
+ * paddings[i][1] indicates the number of paddings to be added following inputX in the ith dimension.
*
- * 参数:
+ * Parameters:
*
- * * padValues,一个常数,数据类型和inputX一致,指定Pad操作补全的数值。
+ * * padValues: value to be added to the pad operation. The value is a constant with the same data type as inputX.
*
- * 输出:
+ * Outputs:
*
- * * output,一个n维tensor,维数和数据类型和inputX保持一致。shape由inputX和paddings共同决定
- * output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]。
+ * * output: n-dimensional tensor after padding, with the same dimensions and data type as inputX.
+ * The shape is determined by inputX and paddings.
+ * output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]
*/
OH_NN_OPS_PAD = 24,
/**
- * 求x的y次幂,输入必须是两个tensor或一个tensor和一个标量。当输入是两个tensor时,它们的数据类型不能同时为NN_BOOL,
- * 且要求两个tensor的shape相同。当输入是一个tensor和一个标量时,标量只能是一个常数。
+ * Calculates the y power of each element in input. The inputs must be two tensors or one tensor and one scalar.
+ * When the inputs are two tensors, their data types cannot be both NN_BOOL, and their shapes must be the same.
+ * When the inputs are one tensor and one scalar, the scalar must be a constant.
*
- * 输入:
+ * Inputs:
*
- * * x,实数、bool值或tensor,tensor的数据类型为实数/NN_BOOL。
- * * y,实数、bool值或tensor,tensor的数据类型为实数/NN_BOOL。
+ * * input: real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
+ * * y: real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
*
- * 输出:
+ * Outputs:
*
- * * output,形状由x和y broadcast后的形状决定。
+ * * output: tensor, whose shape is determined by the shape of input and y after broadcasting.
*/
OH_NN_OPS_POW = 25,
/**
- * 给定一个tensor,计算其缩放后的值。
+ * Scales a tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,一个n维tensor。
- * * scale,缩放tensor。
- * * bias,偏置tensor。
+ * * input: n-dimensional tensor.
+ * * scale: scaling tensor.
+ * * bias: bias tensor.
*
- * 参数:
+ * Parameters:
*
- * * axis,指定缩放的维度。
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * axis: dimensions to be scaled.
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * output,scale的计算结果,一个n维tensor,类型和input一致,shape由axis决定。
+ * * output: scaled n-dimensional tensor, whose data type is the same as that of input and
+ * shape is determined by axis.
*/
OH_NN_OPS_SCALE = 26,
/**
- * 输入一个tensor,计算其shape。
+ * Calculates the shape of the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,一个n维tensor。
+ * * input: n-dimensional tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,输出tensor的维度,一个整型数组。
+ * * output: integer array representing the dimensions of the input tensor.
*/
OH_NN_OPS_SHAPE = 27,
/**
- * 给定一个tensor,计算其sigmoid结果。
+ * Applies the sigmoid operation to the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * input,一个n维tensor。
+ * * input: n-dimensional tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,sigmoid的计算结果,一个n维tensor,类型和shape和input一致。
+ * * output: result of the sigmoid operation. It is an n-dimensional tensor
+ * with the same data type and shape as input.
*/
OH_NN_OPS_SIGMOID = 28,
/**
- * 在input tensor各维度,以begin为起点,截取size长度的切片。
+ * Slices a tensor of the specified size from the input in each dimension.
*
- * 输入:
+ * Inputs:
*
- * * x,n维输入tensor。
- * * begin,一组不小于0的整数,指定每个维度上的起始切分点。
- * * size,一组不小于1的整数,指定每个维度上切片的长度。假设某一维度i,1<=size[i]<=input.shape[i]-begin[i]。
+ * * input: n-dimensional input tensor.
+ * * begin: start of the slice, which is an array of integers greater than or equal to 0.
+ * * size: slice length, which is an array of integers greater than or equal to 0.
+ * Assume that a dimension is i and 1<=size[i]<=input.shape[i]-begin[i].
*
- * 输出:
+ * Outputs:
*
- * * output,切片得到的n维tensor,其TensorType和input一致,shape和size相同。
+ * * output: n-dimensional tensor obtained by slicing.
+ * The TensorType, shape, and size of the output are the same as those of the input.
*/
OH_NN_OPS_SLICE = 29,
/**
- * 给定一个tensor,计算其softmax结果。
+ * Applies the softmax operation to the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,n维输入tensor。
+ * * input: n-dimensional input tensor.
*
- * 参数:
+ * Parameters:
*
- * * axis,int64类型,指定计算softmax的维度。整数取值范围为[-n,n)。
+ * * axis: dimension in which the softmax operation is performed.
+ * The value is of the int64 type. It is an integer in the range [-n, n).
*
- * 输出:
+ * Outputs:
*
- * * output,softmax的计算结果,一个n维tensor,类型和shape和x一致。
+ * * output: result of the softmax operation. It is an n-dimensional tensor with
+ * the same data type and shape as input.
*/
OH_NN_OPS_SOFTMAX = 30,
/**
- * 将4维tensor在空间维度上进行切分成blockShape[0] * blockShape[1]个小块,然后在batch维度上拼接这些小块。
+ * Divides a 4D tensor into small blocks and combines these blocks in the original batch.
+ * The number of blocks is blockShape[0] multiplied by blockShape[1].
*
- * 输入:
+ * Inputs:
*
- * * x,一个4维tensor
+ * * input: 4D tensor.
*
- * 参数:
+ * Parameters:
*
- * * blockShape,一对整数,每个整数不小于1。
- * * paddings,一对数组,每个数组由两个整数组成。组成paddings的4个整数都不小于0。paddings[0][0]和paddings[0][1]指
- * 定了第三个维度上padding的数量,paddings[1][0]和paddings[1][1]指定了第四个维度上padding的数量。
+ * * blockShape: a pair of integers. Each of them is greater than or equal to 1.
+ * * paddings: a pair of arrays. Each of them consists of two integers. The four integers that form paddings
+ * must be greater than or equal to 0. paddings[0][0] and paddings[0][1]
+ * specify the number of paddings in the third dimension, and paddings[1][0] and paddings[1][1]
+ * specify the number of paddings in the fourth dimension.
*
- * 输出:
+ * Outputs:
*
- * * output,一个4维tensor,数据类型和input一致。shape由input,blockShape和paddings共同决定,假设input shape为[n,c,h,w],则有
- * output.shape[0] = n * blockShape[0] * blockShape[1]
- * output.shape[1] = c
- * output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0]
- * output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1]
- * 要求(h + paddings[0][0] + paddings[0][1])和(w + paddings[1][0] + paddings[1][1])能被
- * blockShape[0]和blockShape[1]整除。
+ * * output: 4D tensor with the same data type as input. The shape is determined by input,
+ * blockShape, and paddings. Assume that the input shape is [n,c,h,w], then:
+ * output.shape[0] = n * blockShape[0] * blockShape[1]
+ * output.shape[1] = c
+ * output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0]
+ * output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1]
+ * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by
+ * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]).
+ *
*/
OH_NN_OPS_SPACE_TO_BATCH_ND = 31,
/**
- * Split 算子沿 axis 维度将 input 拆分成多个 tensor,tensor 数量由 outputNum 指定。
+ * Splits the input into multiple tensors along the axis dimension. The number of tensors is specified by outputNum.
*
- * 输入:
+ * Inputs:
*
- * * x,n维tensor。
+ * * input: n-dimensional tensor.
*
- * 参数:
+ * Parameters:
*
- * * outputNum,long,输出tensor的数量,output_num类型为int。
- * * size_splits,1维tensor,指定 tensor 沿 axis 轴拆分后,每个 tensor 的大小,size_splits 类型为 int。
- * 如果 size_splits 的数据为空,则 tensor 被拆分成大小均等的 tensor,此时要求 input.shape[axis] 可以被 outputNum 整除;
- * 如果 size_splits 不为空,则要求 size_splits 所有元素之和等于 input.shape[axis]。
- * * axis,指定拆分的维度,axis类型为int。
+ * * outputNum: number of output tensors. The data type is long.
+ * * size_splits: size of each tensor split from the input. The value is a 1D tensor of the int type.
+ * If size_splits is empty, the input will be evenly split into tensors of the same size. In this case,
+ * input.shape[axis] can be exactly divisible by outputNum.
+ * If size_splits is not empty, the sum of all its elements must be equal to input.shape[axis].
+ * * axis: splitting dimension of the int type.
*
- * 输出:
+ * Outputs:
*
- * * outputs,一组n维tensor,每一个tensor类型和shape相同,每个tensor的类型和input一致。
+ * * outputs: array of n-dimensional tensors, with the same data type and dimensions.
+ * The data type of each tensor is the same as that of input.
*/
OH_NN_OPS_SPLIT = 32,
/**
- * 给定一个tensor,计算其平方根。
+ * Calculates the square root of a tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,一个n维tensor。
+ * * input: n-dimensional tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,输入的平方根,一个n维tensor,类型和shape和input一致。
+ * * output: square root of the input. It is an n-dimensional tensor with the same data type and shape as input.
*/
OH_NN_OPS_SQRT = 33,
/**
- * 计算两个输入的差值并返回差值的平方。SquaredDifference算子支持tensor和tensor相减。
- * 如果两个tensor的TensorType不相同,Sub算子会将低精度的tensor转成更高精度的类型。
- * 如果两个tensor的shape不同,要求两个tensor可以通过broadcast拓展成相同shape的tensor。
+ * Calculates the square of the difference between two tensors. The SquaredDifference operator supports tensor and tensor subtraction.
+ * If two tensors have different TensorTypes, the Sub operator converts the low-precision tensor to a high-precision one.
+ * If two tensors have different shapes, the two tensors can be extended to tensors with the same shape through broadcast.
*
- * 输入:
+ * Inputs:
*
- * * x,被减数,inputX是一个tensor,tensor的类型可以是NN_FLOAT16、NN_FLOAT32、NN_INT32或NN_BOOL。
- * * y,减数,inputY是一个tensor,tensor的类型可以是NN_FLOAT16、NN_FLOAT32、NN_INT32或NN_BOOL。
+ * * input1: minuend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type.
+ * * input2: subtrahend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type.
*
- * 输出:
+ * Outputs:
*
- * * output,两个input差值的平方。output的shape由inputX和inputY共同决定,inputX和inputY的shape相同时,
- * output的shape和inputX、inputY相同;shape不同时,需要将inputX或inputY做broadcast操作后,相减得到output。
- * output的TensorType由两个输入中更高精度的TensorType决定。
+ * * output: square of the difference between two inputs. The output shape is determined
+ * byinput1 and input2. If they have the same shape, the output tensor has the same shape as them.
+ * If they have different shapes, perform the broadcast operation on input1 and input2 and perform subtraction.
+ * TensorType of the output is the same as that of the input tensor with higher precision.
*/
OH_NN_OPS_SQUARED_DIFFERENCE = 34,
/**
- * 去除axis中,长度为1的维度。支持int8量化输入假设input的shape为[2,1,1,2,2],axis为[0,1],
- * 则output的shape为[2,1,2,2]。第0维到第1维之间,长度为0的维度被去除。
+ * Removes the dimension with a length of 1 from the specified axis. The int8 quantization input is supported.
+ * Assume that the input shape is [2, 1, 1, 2, 2] and axis is [0,1], the output shape is [2, 1, 2, 2],
+ * which means the dimension whose length is 0 between dimensions 0 and dimension 1 is removed.
*
- * 输入:
+ * Inputs:
*
- * * x,n维tensor。
+ * * input: n-dimensional tensor.
*
- * 参数:
+ * Parameters:
*
- * * axis,指定删除的维度。axis可以是一个int64_t的整数或数组,整数的取值范围为[-n,n)。
+ * * axis: dimension to be removed. The value is of int64_t type and can be an integer in the range [-n, n) or an array.
*
- * 输出:
+ * Outputs:
*
- * * output,输出tensor。
+ * * output: output tensor.
*/
OH_NN_OPS_SQUEEZE = 35,
/**
- * 将一组tensor沿axis维度进行堆叠,堆叠前每个tensor的维数为n,则堆叠后output维数为n+1。
+ * Stacks multiple tensors along the specified axis. If each tensor has n dimensions before stacking,
+ * the output tensor will have n+1 dimensions.
*
- * 输入:
+ * Inputs:
*
- * * x,Stack支持传入多个输入n维tensor,每个tensor要求shape相同且类型相同。
+ * * input: input for stacking, which can contain multiple n-dimensional tensors.
+ * Each of them must have the same shape and type.
*
- * 参数:
+ * Parameters:
*
- * * axis,一个整数,指定tensor堆叠的维度。axis可以是负数,axis取值范围为[-(n+1),(n+1))。
+ * * axis: dimension for tensor stacking, which is an integer. The value range is [-(n+1),(n+1)),
+ * which means a negative number is allowed.
*
- * 输出:
+ * Outputs:
*
- * * output,将input沿axis维度堆叠的输出,n+1维tensor,TensorType和input相同。
+ * * output: stacking result of the input along the axis dimension. The value is an n+1-dimensional tensor
+ * and has the same TensorType as the input.
*/
OH_NN_OPS_STACK = 36,
/**
- * 跨步截取Tensor
- *
- * 输入:
- *
- * * x,n维输入tensor。
- * * begin,1维tensor,begin的长度等于n,begin[i]指定第i维上截取的起点。
- * * end,1维tensor,end的长度等于n,end[i]指定第i维上截取的终点。
- * * strides,1维tensor,strides的长度等于n,strides[i]指定第i维上截取的步长。
- *
- * 参数:
- *
- * * beginMask,一个整数,用于解除begin的限制。将beginMask转成二进制表示,如果binary(beginMask)[i]==1,
- * 则对于第i维,从第一个元素开始,以strides[i]为步长截取元素直到第end[i]-1个元素。
- * * endMask,个整数,用于解除end的限制。将endMask转成二进制表示,如果binary(endMask)[i]==1,则对于第i维,
- * 从第begin[i]个元素起,以strides[i]为步长截取元素直到tensor边界。
- * * ellipsisMask,一个整数,用于解除begin和end的限制。将ellipsisMask转成二进制表示,如果binary(ellipsisMask)[i]==1,
- * 则对于第i维,从第一个元素开始,以strides[i]为补偿,截取元素直到tensor边界。binary(ellipsisMask)仅允许有一位不为0。
- * * newAxisMask,一个整数,用于新增维度。将newAxisMask转成二进制表示,如果binary(newAxisMask)[i]==1,则在第i维插入长度为1的新维度。
- * * shrinkAxisMask,一个整数,用于压缩指定维度。将shrinkAxisMask转成二进制表示,如果binary(shrinkAxisMask)[i]==1,
- * 则舍去第i维所有元素,第i维长度压缩至1。
- *
- * 输出:
- *
- * * 堆叠运算后的Tensor,数据类型与x相同。输出维度rank(x[0])+1 维。
+ * Slices a tensor with the specified stride.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional input tensor.
+ * * begin: start of slicing, which is a 1D tensor. The length of begin is n.
+ * begin[i] specifies the start of slicing in the ith dimension.
+ * * end: end of slicing, which is a 1D tensor. The length of end is n.
+ * end[i] specifies the end of slicing in the ith dimension.
+ * * strides: slicing stride, which is a 1D tensor. The length of strides is n.
+ * strides[i] specifies the stride at which the tensor is sliced in the ith dimension.
+ *
+ * Parameters:
+ *
+ * * beginMask: an integer used to mask begin. beginMask is represented in binary code.
+ * In case of binary(beginMask)[i]==1, for the ith dimension, elements are sliced from the first element
+ * at strides[i] until the end[i]-1 element.
+ *
+ * * endMask: an integer used to mask end. endMask is represented in binary code.
+ * In case of binary(endMask)[i]==1, elements are sliced from the element at the begin[i] position
+ * in the ith dimension until the tensor boundary at strides[i].
+ *
+ * * ellipsisMask: integer used to mask begin and end. ellipsisMask is represented in binary code.
+ * In case of binary(ellipsisMask)[i]==1, elements are sliced from the first element at strides[i] in the ith dimension
+ * until the tensor boundary. Only one bit of binary(ellipsisMask) can be a non-zero value.
+ *
+ * * newAxisMask: new dimension, which is an integer. newAxisMask is represented in binary code.
+ * In case of binary(newAxisMask)[i]==1, a new dimension whose length is 1 is inserted into the ith dimension.
+ * * shrinkAxisMask: shrinking dimension, which is an integer. * shrinkAxisMask is represented in binary code.
+ * In the case of binary(shrinkAxisMask)[i]==1, all elements in the ith dimension will be discarded,
+ * and the length of the ith dimension is shrunk to 1.
+ *
+ * Outputs:
+ *
+ * * A tensor, with the same data type as input. The number of dimensions of the output tensor is rank(input[0])+1.
*/
OH_NN_OPS_STRIDED_SLICE = 37,
/**
- * 计算两个输入的差值。
+ * Calculates the difference between two tensors.
*
- * 输入:
+ * Inputs:
*
- * * x,被减数,x是一个tensor。
- * * y,减数,y是一个tensor。
+ * * input1: minuend, which is a tensor.
+ * * input2: subtrahend, which is a tensor.
*
- * 参数:
+ * Parameters:
*
- * * activationType,是一个整型常量,且必须是FuseType中含有的值。
- * 在输出之前调用指定的激活。
+ * * activationType is an integer constant which is contained in FuseType.
+ * The specified activation function is called before output.
*
- * 输出:
+ * Outputs:
*
- * * output,两个input相减的差。output的shape由inputX和inputY共同决定,inputX和inputY的shape相同时,output的shape和inputX、inputY相同;
- * shape不同时,需要将inputX或inputY做broadcast操作后,相减得到output。output的TensorType由两个输入中更高精度的TensorType决定。
+ * * output: difference between the two tensors. The output shape is determined byinput1 and input2.
+ * If they have the same shape, the output tensor has the same shape as them.
+ * If they have different shapes, perform the broadcast operation on input1 and input2 and perform subtraction.
+ * TensorType of the output is the same as that of the input tensor with higher precision.
*/
OH_NN_OPS_SUB = 38,
/**
- * 计算输入tensor的双曲正切值。
+ * Computes hyperbolic tangent of the input tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,n维tensor。
+ * * input: n-dimensional tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,input的双曲正切,TensorType和tensor shape和input相同。
+ * * output: hyperbolic tangent of the input. The TensorType and tensor shape are the same as those of the input.
*/
OH_NN_OPS_TANH = 39,
/**
- * 以multiples指定的次数拷贝input。
- *
- * 输入:
- * * x,n维tensor。
- * * multiples,1维tensor,指定各个维度拷贝的次数。其长度m不小于input的维数n。
- *
- * 输出:
- * * Tensor,m维tensor,TensorType与input相同。如果input和multiples长度相同,
- * 则output和input维数一致,都是n维tensor;如果multiples长度大于n,则用1填充input的维度,
- * 再在各个维度上拷贝相应的次数,得到m维tensor。
+ * Copies a tensor the specified times.
+ *
+ * Inputs:
+ * * input: n-dimensional tensor.
+ * * multiples: number of times that the input tensor is copied in each dimension. The value is a 1D tensor.
+ * The length m is not less than the number of dimensions, that is, n.
+ *
+ * Outputs:
+ * * An m-dimensional tensor whose TensorType is the same as that of the input. If input and
+ * multiples have the same length, input and output have the same number of dimensions.
+ * If the length of multiples is greater than n, 1 is used to fill the input dimension,
+ * and then the input is copied in each dimension the specified times to obtain the m-dimensional tensor.
*/
OH_NN_OPS_TILE = 40,
/**
- * 根据permutation对input 0进行数据重排。
+ * Transposes data of input 0 based on permutation.
*
- * 输入:
+ * Inputs:
*
- * * x,n维tensor,待重排的tensor。
- * * perm,1维tensor,其长度和input 0的维数一致。
+ * * input: n-dimensional tensor to be transposed.
+ * * permutation: The value is a 1D tensor whose length is the same as the number of dimensions of input 0.
*
- * 输出:
+ * Outputs:
*
- * * output,n维tensor,output 0的TensorType与input 0相同,shape由input 0的shape和permutation共同决定。
+ * * output: n-dimensional tensor. TensorType of output 0 is the same as that of input 0,
+ * and the output shape is determined by the shape and permutation of input 0.
*/
OH_NN_OPS_TRANSPOSE = 41,
/**
- * keepDims为false时,计算指定维度上的平均值,减少input的维数;当keepDims为true时,计算指定维度上的平均值,保留相应的维度。
+ * Calculates the average value in the specified dimension. If keepDims is set to false, the number of dimensions
+ * is reduced for the input; if keepDims is set to true, the number of dimensions is retained.
*
- * 输入:
+ * Inputs:
*
- * * input,n维输入tensor,n<8。
- * * axis,1维tensor,指定计算均值的维度,axis中每个元素的取值范围为[-n,n)。
+ * * input: n-dimensional input tensor, where n is less than 8.
+ * * axis: dimension used to calculate the average value. The value is a 1D tensor. The value range of each element in axis is [–n, n).
*
- * 参数:
+ * Parameters:
*
- * * keepDims,布尔值,是否保留维度的标志位。
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
*
- * 输出:
+ * Outputs:
*
- * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,moutput: m-dimensional output tensor whose data type is the same as that of the input. If keepDims is
+ * false, m==n. If keepDims is true, minput: 4D input tensor. Each element in the input cannot be less than 0. The input layout must be [batchSize, height, width, channels].
*
- * 参数:
+ * Parameters:
*
- * * newHeight,resize之后4维tensor的height值。
- * * newWidth,resize之后4维tensor的width值。
- * * preserveAspectRatio,一个布尔值,指示resize操作是否保持input tensor的height/width比例。
- * * coordinateTransformMode,一个int32整数,指示Resize操作所使用的坐标变换方法,目前支持以下方法:
- * * excludeOutside,一个int64浮点数。当excludeOutside=1时,超出input边界的采样权重被置为0,其余权重重新归一化处理。
+ * * newHeight: resized height of the 4D tensor.
+ * * newWidth: resized width of the 4D tensor.
+ * * preserveAspectRatio: indicates whether to maintain the height/width ratio of input after resizing.
+ * * coordinateTransformMode: coordinate transformation method used by the resize operation. The value is an int32 integer.
+ * Currently, the following methods are supported:
+ * * excludeOutside: an int64 floating point number. When its value is 1, the sampling weight of the part that
+ * exceeds the boundary of input is set to 0, and other weights are normalized.
*
- * 输出:
+ * Outputs:
*
- * * output,n维输出tensor,output的shape和数据类型和input相同。
+ * * output: n-dimensional tensor, with the same shape and data type as input.
*/
OH_NN_OPS_RESIZE_BILINEAR = 43,
- /**
- * 求input平方根的倒数。
+ /**
+ * Calculates the reciprocal of the square root of a tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,n维输入tensor,input中的每个元素不能小于0,n<8。
+ * * input: n-dimensional tensor, where n is less than 8. Each element of the tensor cannot be less than 0.
*
- * 输出:
+ * Outputs:
*
- * * output,n维输出tensor,output的shape和数据类型和input相同。
-
+ * * output: n-dimensional tensor, with the same shape and data type as input.
*/
OH_NN_OPS_RSQRT = 44,
- /**
- * 根据inputShape调整input的形状。
+ /**
+ * Reshapes a tensor.
*
- * 输入:
+ * Inputs:
*
- * * x,一个n维输入tensor。
- * * InputShape,一个1维tensor,表示输出tensor的shape,需要是一个常量tensor。
+ * * input: n-dimensional input tensor.
+ * * InputShape: shape of the output tensor. The value is a 1D constant tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,输出tensor,数据类型和input一致,shape由inputShape决定。
+ * * output: tensor whose data type is the same as that of input and shape is determined by InputShape.
*/
OH_NN_OPS_RESHAPE = 45,
/**
- * 计算input和weight的PReLU激活值。
+ * Calculates the PReLU activation value of input and weight.
+ *
+ * Inputs:
*
- * 输入:
+ * * input: n-dimensional tensor. If n is greater than or equal to 2, inputX must be [BatchSize, ..., Channels].
+ * The second dimension is the number of channels.
+ * * weight: 1D tensor. The length of weight must be 1 or equal to the number of channels. If the length of weight is 1,
+ * all channels share the same weight.
+ * If the length of weight is equal to the number of channels, each channel exclusively has a weight.
+ * If n is less than 2 for inputX, the weight length must be 1.
*
- * * x,一个n维tensor,如果n>=2,则要求inputX的排布为[BatchSize,…,Channels],第二个维度为通道数。
- * * weight,一个1维tensor。weight的长度只能是1或者等于通道数。当weight长度为1,则inputX所有通道共享一个权重值。
- * 若weight长度等于通道数,每个通道独享一个权重,若inputX维数n<2,weight长度只能为1。
- * 输出:
+ * Outputs:
*
- * output,x的PReLU激活值。形状和数据类型和inputX保持一致。
+ * * output: PReLU activation value of x, with the same shape and data type as inputX.
*/
OH_NN_OPS_PRELU = 46,
/**
- * 计算input的Relu激活值。
+ * Calculates the Relu activation value of input.
*
- * 输入:
+ * Inputs:
*
- * * input,一个n维输入tensor。
+ * * input: n-dimensional input tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,n维Relu输出tensor,数据类型和shape和input一致。
+ * * output: n-dimensional tensor, with the same data type and shape as the input tensor.
*/
OH_NN_OPS_RELU = 47,
/**
- * 计算input的Relu6激活值,即对input中每个元素x,计算min(max(x,0),6)。
+ * Calculates the Relu6 activation value of the input, that is, calculate min(max(x, 0), 6) for each element x in the input.
*
- * 输入:
+ * Inputs:
*
- * * input,一个n维输入tensor。
+ * * input: n-dimensional input tensor.
*
- * 输出:
+ * Outputs:
*
- * * output,n维Relu6输出tensor,数据类型和shape和input一致。
+ * * output: n-dimensional Relu6 tensor, with the same data type and shape as the input tensor.
*/
OH_NN_OPS_RELU6 = 48,
/**
- * 对一个tensor从某一axis开始做层归一化。
+ * Applies layer normalization for a tensor from the specified axis.
*
- * 输入:
+ * Inputs:
*
- * * input,一个n维输入tensor。
- * * gamma,一个m维tensor,gamma维度应该与input做归一化部分的shape一致。
- * * beta,一个m维tensor,shape与gamma一样。
+ * * input: n-dimensional input tensor.
+ * * gamma: m-dimensional tensor. The dimensions of gamma must be the same as
+ * the shape of the part of the input tensor to normalize.
+ * * beta: m-dimensional tensor with the same shape as gamma.
*
- * 参数:
+ * Parameters:
*
- * * beginAxis,是一个NN_INT32的标量,指定开始做归一化的轴,取值范围是[1,rank(input))。
- * * epsilon,是一个NN_FLOAT32的标量,是归一化公式中的微小量,常用值是1e-7。
+ * * beginAxis is an NN_INT32 scalar that specifies the axis from which normalization starts. The value range is [1, rank(input)).
+ * * epsilon is a scalar of NN_FLOAT32. It is a tiny amount in the normalization formula. The common value is 1e-7.
*
- * 输出:
+ * Outputs:
*
- * * output,n维输出tensor,数据类型和shape和input一致。
+ * * output: n-dimensional tensor, with the same data type and shape as the input tensor.
*/
OH_NN_OPS_LAYER_NORM = 49,
/**
- * 当keepDims为false时,过乘以维度中的所有元素来减小张量的维度,减少input的维数;当keepDims为true时,过乘以维度中的所有元素来减小张量的维度,保留相应的维度。
+ * Calculates the accumulated value for a tensor along the specified dimension.
*
- * 输入:
+ * Inputs:
*
- * * input,n维输入tensor,n<8。
- * * axis,1维tensor,指定计算乘的维度,axis中每个元素的取值范围为[-n,n)。
+ * * input: n-dimensional input tensor, where n is less than 8.
+ * * axis: dimension used to calculate the product. The value is a 1D tensor. The value range of each element in axis is [–n, n).
*
- * 参数:
+ * Parameters:
*
- * * keepDims,布尔值,是否保留维度的标志位。
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * When its value is true, the number of output dimensions is the same as that of the input.
+ * When its value is false, the number of output dimensions is reduced.
*
- * 输出:
+ * Outputs:
*
- * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,moutput: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, m==n. If keepDims is true, mkeepDims is set to false,
+ * the number of dimensions is reduced for the input; if keepDims is set to true, the number of dimensions is retained.
*
- * 输入:
+ * Inputs:
*
- * * n维输入tensor,n<8。
- * * 1维tensor,指定计算逻辑与的维度,axis中每个元素的取值范围为[-n,n)。
+ * * A n-dimensional input tensor, where n is less than 8.
+ * * A 1D tensor specifying the dimension used to operate the logical OR. The value range of each element in axis is [–n, n).
*
- * 参数:
+ * Parameters:
*
- * * keepDims,布尔值,是否保留维度的标志位。
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
*
- * 输出:
- * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,moutput: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, m==n. If keepDims is true, minput: n-dimensional tensor.
*
- * 参数:
+ * Parameters:
*
- * * src_t,定义输入的数据类型。
- * * dst_t,定义输出的数据类型。
+ * * src_t: data type of the input.
+ * * dst_t: data type of the output.
*
- * 输出:
+ * Outputs:
*
- * * output,n维tensor,数据类型由input2决定 输出shape和输入相同。
+ * * output: n-dimensional tensor. The data type is determined by input2.
+ * The output shape is the same as the input shape.
*/
OH_NN_OPS_QUANT_DTYPE_CAST = 52,
/**
- * 查找沿最后一个维度的k个最大条目的值和索引。
+ * Obtains the values and indices of the largest k entries in the last dimension.
*
- * 输入:
+ * Inputs:
*
- * * x,n维tensor。
- * * input k,指明是得到前k个数据以及其index。
+ * * input: n-dimensional tensor.
+ * * input k: first k records of data and their indices.
*
- * 参数:
+ * Parameters:
*
- * * sorted,如果为True,按照大到小排序,如果为False,按照小到大排序。
+ * * sorted: order of sorting. The value true means descending and false means ascending.
*
- * 输出:
+ * Outputs:
*
- * * output0,最后一维的每个切片中的k个最大元素。
- * * output1,输入的最后一个维度内的值的索引。
+ * * output0: largest k elements in each slice of the last dimension.
+ * * output1: index of the value in the last dimension of the input.
*/
OH_NN_OPS_TOP_K = 53,
/**
- * 返回跨轴的张量最大值的索引。
+ * Returns the index of the maximum tensor value across axes.
*
- * 输入:
+ * Inputs:
*
- * * input,n维tensor,输入张量(N,∗),其中∗意味着任意数量的附加维度。
+ * * input: n-dimensional tensor (N, ∗), where ∗ means any number of additional dimensions.
*
- * 参数:
+ * Parameters:
*
- * * axis,指定求最大值索引的维度。
- * * keep_dims,bool值,是否维持输入张量维度。
+ * * axis: dimension for calculating the index of the maximum.
+ * * keep_dims: indicates whether to maintain the input tensor dimension. The value is a Boolean value.
*
- * 输出:
- * * output,tensor,轴上输入张量最大值的索引。
+ * Outputs:
+ * * output: index of the maximum input tensor on the axis. The value is a tensor.
*/
OH_NN_OPS_ARG_MAX = 54,
/**
- * 根据输入axis的值。增加一个维度。
+ * Adds a dimension based on the value of axis.
*
- * 输入:
- * * x,n维tensor。
+ * Inputs:
+ * * input: n-dimensional tensor.
*
- * 参数:
+ * Parameters:
*
- * * axis,指定增加的维度。axis可以是一个整数或一组整数,整数的取值范围为[-n,n)。
+ * * axis: dimension to be added. The value of axis can be an integer or an array of integers.
+ * The value range of the integer is [-n, n).
*
- * 输出:
- * * output,输出tensor。
+ * Outputs:
+ * * output: output tensor.
*/
OH_NN_OPS_UNSQUEEZE = 55,
/**
- * 高斯误差线性单元激活函数。output=0.5∗x∗(1+tanh(x/2)),不支持int量化输入。
+ * Gaussian error linear unit activation function. The int quantization input is not supported. output=0.5∗input∗(1+tanh(input/2))
*
- * 输入:
- * * 一个n维输入tensor。
+ * Inputs:
+ * * An n-dimensional input tensor.
*
- * 输出:
- * * output,n维Relu输出tensor,数据类型和shape和input一致。
+ * Outputs:
+ * * output: n-dimensional tensor, with the same data type and shape as the input tensor.
*/
OH_NN_OPS_GELU = 56,
} OH_NN_OperationType;
/**
- * @brief 张量的类型
+ * @brief Enumerates the tensor data types.
*
- * 张量通常用于设置模型的输入、输出和算子参数。作为模型(或算子)的输入和输出时,需要将张量类型设置为{@link OH_NN_TENSOR};张量
- * 用于设置算子参数时,需要指定参数类型。假设正在设置{@link OH_NN_OPS_CONV2D}算子的pad参数,则需要将
- * {@link OH_NN_Tensor}实例的type属性设置为{@link OH_NN_CONV2D_PAD}。其他算子参数的设置以此类推,枚举值
- * 的命名遵守 OH_NN_{算子名称}_{属性名} 的格式。
+ * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used
+ * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}.
+ * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the tensor type.
+ * Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set.
+ * You need to set the type attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}.
+ * The settings of other operator parameters are similar. The enumerated values are named
+ * in the format OH_NN_{Operator name}_{Attribute name}.
*
* @since 9
* @version 1.0
*/
typedef enum {
- /** Tensor类型 */
+ /** This enumerated value is used when the tensor is used as the input or output of a model (or operator). */
OH_NN_TENSOR = 0,
- /** Add算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Add operator. */
OH_NN_ADD_ACTIVATIONTYPE = 1,
- /** AvgPool算子的kernel_size参数 */
+ /** This enumerated value is used when the tensor is used as the kernel_size parameter of the AvgPool operator. */
OH_NN_AVG_POOL_KERNEL_SIZE = 2,
- /** AvgPool算子的stride参数 */
+ /** This enumerated value is used when the tensor is used as the stride parameter of the AvgPool operator. */
OH_NN_AVG_POOL_STRIDE = 3,
- /** AvgPool算子的pad_mode参数 */
+ /** This enumerated value is used when the tensor is used as the pad_mode parameter of the AvgPool operator. */
OH_NN_AVG_POOL_PAD_MODE = 4,
- /** AvgPool算子的pad参数 */
+ /** This enumerated value is used when the tensor is used as the pad parameter of the AvgPool operator. */
OH_NN_AVG_POOL_PAD = 5,
- /** AvgPool算子的activation_type参数 */
+ /** This enumerated value is used when the tensor is used as the activation_type parameter of the AvgPool operator. */
OH_NN_AVG_POOL_ACTIVATION_TYPE = 6,
- /** BatchNorm算子的eosilon参数 */
+ /** This enumerated value is used when the tensor is used as the eosilon parameter of the BatchNorm operator. */
OH_NN_BATCH_NORM_EPSILON = 7,
- /** BatchToSpaceND算子的blockSize参数 */
+ /** This enumerated value is used when the tensor is used as the blockSize parameter of the BatchToSpaceND operator. */
OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE = 8,
- /** BatchToSpaceND算子的crops参数 */
+ /** This enumerated value is used when the tensor is used as the crops parameter of the BatchToSpaceND operator. */
OH_NN_BATCH_TO_SPACE_ND_CROPS = 9,
- /** Concat算子的axis参数 */
+ /** This enumerated value is used when the tensor is used as the axis parameter of the Concat operator. */
OH_NN_CONCAT_AXIS = 10,
- /** Conv2D算子的strides参数 */
+ /** This enumerated value is used when the tensor is used as the strides parameter of the Conv2D operator. */
OH_NN_CONV2D_STRIDES = 11,
- /** Conv2D算子的pad参数 */
+ /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2D operator. */
OH_NN_CONV2D_PAD = 12,
- /** Conv2D算子的dilation参数 */
+ /** This enumerated value is used when the tensor is used as the dilation parameter of the Conv2D operator. */
OH_NN_CONV2D_DILATION = 13,
- /** Conv2D算子的padMode参数 */
+ /** This enumerated value is used when the tensor is used as the padMode parameter of the Conv2D operator. */
OH_NN_CONV2D_PAD_MODE = 14,
- /** Conv2D算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Conv2D operator. */
OH_NN_CONV2D_ACTIVATION_TYPE = 15,
- /** Conv2D算子的group参数 */
+ /** This enumerated value is used when the tensor is used as the group parameter of the Conv2D operator. */
OH_NN_CONV2D_GROUP = 16,
- /** Conv2DTranspose算子的strides参数 */
+ /** This enumerated value is used when the tensor is used as the strides parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_STRIDES = 17,
- /** Conv2DTranspose算子的pad参数 */
+ /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_PAD = 18,
- /** Conv2DTranspose算子的dilation参数 */
+ /** This enumerated value is used when the tensor is used as the dilation parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_DILATION = 19,
- /** Conv2DTranspose算子的outputPaddings参数 */
+ /** This enumerated value is used when the tensor is used as the outputPaddings parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS = 20,
- /** Conv2DTranspose算子的padMode参数 */
+ /** This enumerated value is used when the tensor is used as the padMode parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_PAD_MODE = 21,
- /** Conv2DTranspose算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE = 22,
- /** Conv2DTranspose算子的group参数 */
+ /** This enumerated value is used when the tensor is used as the group parameter of the Conv2DTranspose operator. */
OH_NN_CONV2D_TRANSPOSE_GROUP = 23,
- /** DepthwiseConv2dNative算子的strides参数 */
+ /** This enumerated value is used when the tensor is used as the strides parameter of the DepthwiseConv2dNative operator. */
OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES = 24,
- /** DepthwiseConv2dNative算子的pad参数 */
+ /** This enumerated value is used when the tensor is used as the pad parameter of the DepthwiseConv2dNative operator. */
OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD = 25,
- /** DepthwiseConv2dNative算子的dilation参数 */
+ /** This enumerated value is used when the tensor is used as the dilation parameter of the DepthwiseConv2dNative operator. */
OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION = 26,
- /** DepthwiseConv2dNative算子的padMode参数 */
+ /** This enumerated value is used when the tensor is used as the padMode parameter of the DepthwiseConv2dNative operator. */
OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE = 27,
- /** DepthwiseConv2dNative算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the DepthwiseConv2dNative operator. */
OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE = 28,
- /** Div算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Div operator. */
OH_NN_DIV_ACTIVATIONTYPE = 29,
- /** Eltwise算子的mode参数 */
+ /** This enumerated value is used when the tensor is used as the mode parameter of the Eltwise operator. */
OH_NN_ELTWISE_MODE = 30,
- /** FullConnection算子的axis参数 */
+ /** This enumerated value is used when the tensor is used as the axis parameter of the FullConnection operator. */
OH_NN_FULL_CONNECTION_AXIS = 31,
- /** FullConnection算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the FullConnection operator. */
OH_NN_FULL_CONNECTION_ACTIVATIONTYPE = 32,
- /** Matmul算子的transposeA参数 */
+ /** This enumerated value is used when the tensor is used as the transposeA parameter of the Matmul operator. */
OH_NN_MATMUL_TRANSPOSE_A = 33,
- /** Matmul算子的transposeB参数 */
+ /** This enumerated value is used when the tensor is used as the transposeB parameter of the Matmul operator. */
OH_NN_MATMUL_TRANSPOSE_B = 34,
- /** Matmul算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Matmul operator. */
OH_NN_MATMUL_ACTIVATION_TYPE = 35,
- /** MaxPool算子的kernel_size参数 */
+ /** This enumerated value is used when the tensor is used as the kernel_size parameter of the MaxPool operator. */
OH_NN_MAX_POOL_KERNEL_SIZE = 36,
- /** MaxPool算子的stride参数 */
+ /** This enumerated value is used when the tensor is used as the stride parameter of the MaxPool operator. */
OH_NN_MAX_POOL_STRIDE = 37,
- /** MaxPool算子的pad_mode参数 */
+ /** This enumerated value is used when the tensor is used as the pad_mode parameter of the MaxPool operator. */
OH_NN_MAX_POOL_PAD_MODE = 38,
- /** MaxPool算子的pad参数 */
+ /** This enumerated value is used when the tensor is used as the pad parameter of the MaxPool operator. */
OH_NN_MAX_POOL_PAD = 39,
- /** MaxPool算子的activation_type参数 */
+ /** This enumerated value is used when the tensor is used as the activation_type parameter of the MaxPool operator. */
OH_NN_MAX_POOL_ACTIVATION_TYPE = 40,
- /** Mul算子的activationType参数 */
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Mul operator. */
OH_NN_MUL_ACTIVATION_TYPE = 41,
- /** OneHot算子的axis参数 */
+ /** This enumerated value is used when the tensor is used as the axis parameter of the OneHot operator. */
OH_NN_ONE_HOT_AXIS = 42,
- /** Pad算子的constant_value参数 */
+ /** This enumerated value is used when the tensor is used as the constant_value parameter of the Pad operator. */
OH_NN_PAD_CONSTANT_VALUE = 43,
- /** Scale算子的activationType参数*/
+ /** This enumerated value is used when the tensor is used as the activationType parameter of the Scale operator. */
OH_NN_SCALE_ACTIVATIONTYPE = 44,
- /** Scale算子的axis参数*/
+ /** This enumerated value is used when the tensor is used as the axis parameter of the Scale operator. */
OH_NN_SCALE_AXIS = 45,
- /** Softmax算子的axis参数 */
+ /** This enumerated value is used when the tensor is used as the axis parameter of the Softmax operator. */
OH_NN_SOFTMAX_AXIS = 46,
- /** SpaceToBatchND算子的BlockShape参数 */
+ /** This enumerated value is used when the tensor is used as the BlockShape parameter of the SpaceToBatchND operator. */
OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE = 47,
- /** SpaceToBatchND算子的Paddings参数 */
+ /** This enumerated value is used when the tensor is used as the Paddings parameter of the SpaceToBatchND operator. */
OH_NN_SPACE_TO_BATCH_ND_PADDINGS = 48,
- /** Split算子的Axis参数 */
+ /** This enumerated value is used when the tensor is used as the Axis parameter of the Split operator. */
OH_NN_SPLIT_AXIS = 49,
- /** Split算子的OutputNum参数 */
+ /** This enumerated value is used when the tensor is used as the OutputNum parameter of the Split operator. */
OH_NN_SPLIT_OUTPUT_NUM = 50,
- /** Split算子的SizeSplits参数 */
+ /** This enumerated value is used when the tensor is used as the SizeSplits parameter of the Split operator. */
OH_NN_SPLIT_SIZE_SPLITS = 51,
- /** Squeeze算子的Axis参数 */
+ /** This enumerated value is used when the tensor is used as the Axis parameter of the Squeeze operator. */
OH_NN_SQUEEZE_AXIS = 52,
- /** Stack算子的Axis参数 */
+ /** This enumerated value is used when the tensor is used as the Axis parameter of the Stack operator. */
OH_NN_STACK_AXIS = 53,
- /** StridedSlice算子的BeginMask参数 */
+ /** This enumerated value is used when the tensor is used as the BeginMask parameter of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_BEGIN_MASK = 54,
- /** StridedSlice算子的EndMask参数 */
+ /** This enumerated value is used when the tensor is used as the EndMask parameter of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_END_MASK = 55,
- /** StridedSlice算子的EllipsisMask参数 */
+ /** This enumerated value is used when the tensor is used as the EllipsisMask parameter of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_ELLIPSIS_MASK = 56,
- /** StridedSlice算子的NewAxisMask参数 */
+ /** This enumerated value is used when the tensor is used as the NewAxisMask parameter of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_NEW_AXIS_MASK = 57,
- /** StridedSlice算子的ShrinkAxisMask参数 */
+ /** This enumerated value is used when the tensor is used as the ShrinkAxisMask parameter of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK = 58,
- /** Sub算子的ActivationType参数 */
+ /** This enumerated value is used when the tensor is used as the ActivationType parameter of the Sub operator. */
OH_NN_SUB_ACTIVATIONTYPE = 59,
- /** ReduceMean算子的keep_dims参数*/
+ /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceMean operator. */
OH_NN_REDUCE_MEAN_KEEP_DIMS = 60,
- /** ResizeBilinear算子的new_height参数*/
+ /** This enumerated value is used when the tensor is used as the new_height parameter of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_NEW_HEIGHT = 61,
- /** ResizeBilinear算子的new_width参数*/
+ /** This enumerated value is used when the tensor is used as the new_width parameter of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_NEW_WIDTH = 62,
- /** ResizeBilinear算子的preserve_aspect_ratio参数*/
+ /** This enumerated value is used when the tensor is used as the preserve_aspect_ratio parameter of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO = 63,
- /** ResizeBilinear算子的coordinate_transform_mode参数*/
+ /** This enumerated value is used when the tensor is used as the coordinate_transform_mode parameter of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE = 64,
- /** ResizeBilinear算子的exclude_outside参数*/
+ /** This enumerated value is used when the tensor is used as the exclude_outside parameter of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE = 65,
- /** LayerNorm算子的beginNormAxis参数 */
+ /** This enumerated value is used when the tensor is used as the beginNormAxis parameter of the LayerNorm operator. */
OH_NN_LAYER_NORM_BEGIN_NORM_AXIS = 66,
- /** LayerNorm算子的epsilon参数 */
+ /** This enumerated value is used when the tensor is used as the epsilon parameter of the LayerNorm operator. */
OH_NN_LAYER_NORM_EPSILON = 67,
- /** LayerNorm算子的beginParamsAxis参数 */
+ /** This enumerated value is used when the tensor is used as the beginParamsAxis parameter of the LayerNorm operator. */
OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS = 68,
- /** LayerNorm算子的elementwiseAffine参数 */
+ /** This enumerated value is used when the tensor is used as the elementwiseAffine parameter of the LayerNorm operator. */
OH_NN_LAYER_NORM_ELEMENTWISE_AFFINE = 69,
- /** ReduceProd算子的keep_dims参数*/
+ /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceProd operator. */
OH_NN_REDUCE_PROD_KEEP_DIMS = 70,
- /** ReduceAll算子的keep_dims参数*/
+ /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceAll operator. */
OH_NN_REDUCE_ALL_KEEP_DIMS = 71,
- /** QuantDTypeCast算子的src_t参数*/
+ /** This enumerated value is used when the tensor is used as the src_t parameter of the QuantDTypeCast operator. */
OH_NN_QUANT_DTYPE_CAST_SRC_T = 72,
- /** QuantDTypeCast算子的dst_t参数*/
+ /** This enumerated value is used when the tensor is used as the dst_t parameter of the QuantDTypeCast operator. */
OH_NN_QUANT_DTYPE_CAST_DST_T = 73,
- /** Topk算子的Sorted参数 */
+ /** This enumerated value is used when the tensor is used as the Sorted parameter of the Topk operator. */
OH_NN_TOP_K_SORTED = 74,
- /** ArgMax算子的axis参数 */
+ /** This enumerated value is used when the tensor is used as the axis parameter of the ArgMax operator. */
OH_NN_ARG_MAX_AXIS = 75,
- /** ArgMax算子的keepDims参数 */
+ /** This enumerated value is used when the tensor is used as the keepDims parameter of the ArgMax operator. */
OH_NN_ARG_MAX_KEEPDIMS = 76,
- /** Unsqueeze算子的Axis参数 */
+ /** This enumerated value is used when the tensor is used as the Axis parameter of the Unsqueeze operator. */
OH_NN_UNSQUEEZE_AXIS = 77,
} OH_NN_TensorType;
/**
- * @brief 自定义的32位无符号整型数组类型
- *
- * 该结构体用于存储32位无符号整型数组,size要求记录数组的长度。
+ * @brief This structure is used to store a 32-bit unsigned integer array.
*
* @since 9
* @version 1.0
*/
typedef struct OH_NN_UInt32Array {
- /** 无符号整型数组的指针 */
+ /** Pointer to the unsigned integer array */
uint32_t *data;
- /** 数组长度 */
+ /** Array length */
uint32_t size;
} OH_NN_UInt32Array;
/**
- * @brief 量化信息
- *
- * 在量化的场景中,32位浮点型数据需要根据量化参数,按公式 `浮点数=scale*(量化值-zeroPoint)` 量化成比特位更少的数据类型,
- * 其中r是浮点数,q是量化后的结果。
+ * @brief Quantization information.
*
+ * In quantization scenarios, the 32-bit floating-point data type is quantized into the fixed-point data type according to the following formula:
+ \f[
+ q = clamp(round(\frac{r}{s}+z), q_{min}, q_{max})
+ \f]
+ * s and z are quantization parameters, which are stored by scale and zeroPoint in {@link OH_NN_QuantParam}.
+ * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and
+ * q_max is an upper bound of a quantization result. The calculation method is as follows:
+ *
+ \f[
+ \text{clamp}(x,min,max) =
+ \begin{cases}
+ q_{min} = -(1 << (numBits - 1)) \\
+ q_{max} = (1 << (numBits - 1)) \\
+ \end{cases}
+ \f]
+ * The clamp function is defined as follows:
+ \f[
+ \text{clamp}(x,min,max) =
+ \begin{cases}
+ \text{max} & \text{ if } x > \text{ max } \\
+ \text{min} & \text{ if } x < \text{ min } \\
+ x & \text{ otherwise } \\
+ \end{cases}
+ \f]
+ *
* @since 9
* @version 1.0
*/
typedef struct OH_NN_QuantParam {
- /** 指定numBits、scale和zeroPoint数组的长度。在per-layer量化的场景下,quantCount通常指定为1,即一个tensor所有通道
- * 共享一套量化参数;在per-channel量化场景下,quantCount通常和tensor通道数一致,每个通道使用自己的量化参数。
+ /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario,
+ * quantCount is usually set to 1. That is, all channels of a tensor share a set of quantization parameters.
+ * In the per-channel quantization scenario, quantCount is usually the same as the number of tensor channels,
+ * and each channel uses its own quantization parameters.
*/
uint32_t quantCount;
- /** 量化位数 */
+ /** Number of quantization bits */
const uint32_t *numBits;
- /** 指向scale量化信息的指针 */
+ /** Pointer to the scale data in the quantization formula */
const double *scale;
- /** 指向zero point量化信息的指针 */
+ /** Pointer to the zero point data in the quantization formula */
const int32_t *zeroPoint;
} OH_NN_QuantParam;
/**
- * @brief 张量结构体
+ * @brief Defines the tensor structure.
*
- * {@link OH_NN_Tensor}类型通常用于构造模型图中的数据节点和算子参数,在构造张量时需要明确数据类型、维数、维度信息和量化信息。
- * type成员指定张量的用途,当张量用作模型图中的输入、输出,则要求type置为{@link OH_NN_TENSOR};当张量用作算子参数,
- * 则需要指定为具体的枚举值,具体参考{@link OH_NN_TensorType}。
+ * It is usually used to construct data nodes and operator parameters in a model graph. When constructing a tensor,
+ * you need to specify the data type, number of dimensions, dimension information, and quantization information.
*
* @since 9
* @version 1.0
*/
typedef struct OH_NN_Tensor {
- /** 指定张量的数据类型,要求从{@link OH_NN_DataType}枚举类型中取值。 */
+ /** Data type of the specified tensor. The value must be an enumerated value of {@link OH_NN_DataType}. */
OH_NN_DataType dataType;
- /** 指定张量的维数 */
+ /** Number of dimensions of the specified tensor */
uint32_t dimensionCount;
- /** 指定张量的维度信息 */
+ /** Dimension information (shape) of the specified tensor*/
const int32_t *dimensions;
- /** 指定张量的量化信息,数据类型要求为{@link OH_NN_QuantParam}。 */
+ /** Quantization information of the specified tensor. The data type must be {@link OH_NN_QuantParam}. */
const OH_NN_QuantParam *quantParam;
- /** 指定张量的类型, 要求从{@link OH_NN_TensorType}枚举类型中取值。 */
+ /** Specifies the tensor type. The value of type is related to the tensor usage.
+ * When the tensor is used as the input or output of the model, set type to {@link OH_NN_TENSOR}.
+ * When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR} from {@link OH_NN_TensorType}.
+ */
OH_NN_TensorType type;
} OH_NN_Tensor;
/**
- * @brief 内存结构体
+ * @brief Defines the memory structure.
*
* @since 9
* @version 1.0
*/
typedef struct OH_NN_Memory {
- /** 指向共享内存的指针,该共享内存通常由底层硬件驱动申请 */
+ /** Pointer to the shared memory. The shared memory is usually allocated by the underlying hardware driver. */
void * const data;
- /** 记录共享内存的字节长度 */
+ /** Records the length of the shared memory, in bytes. */
const size_t length;
} OH_NN_Memory;
@@ -1643,5 +1779,5 @@ typedef struct OH_NN_Memory {
}
#endif // __cplusplus
+/** @} */
#endif // NEURAL_NETWORK_RUNTIME_TYPE_H
-/** @} */
\ No newline at end of file