diff --git a/ai/neural_network_runtime/neural_network_runtime_type.h b/ai/neural_network_runtime/neural_network_runtime_type.h
index edd879626b781067d3fd566bfe6709d9694d2487..883799d4a0dbe04b0e69de4e90a9de284d488e65 100644
--- a/ai/neural_network_runtime/neural_network_runtime_type.h
+++ b/ai/neural_network_runtime/neural_network_runtime_type.h
@@ -27,7 +27,7 @@
* @file neural_network_runtime_type.h
*
* @brief Defines the structure and enumeration.
- *
+ *
* include "neural_network_runtime/neural_network_runtime_type.h"
* @library libneural_network_runtime.so
* @kit Neural Network Runtime Kit
@@ -143,7 +143,8 @@ typedef enum {
OH_NN_FAILED = 1,
/** Invalid parameter. */
OH_NN_INVALID_PARAMETER = 2,
- /** Memory-related error, for example, insufficient memory, memory data copy failure, or memory application failure. */
+ /** Memory-related error, for example, insufficient memory, memory data copy failure,
+ * or memory application failure. */
OH_NN_MEMORY_ERROR = 3,
/** Invalid operation. */
OH_NN_OPERATION_FORBIDDEN = 4,
@@ -151,22 +152,22 @@ typedef enum {
OH_NN_NULL_PTR = 5,
/** Invalid file. */
OH_NN_INVALID_FILE = 6,
- /** A hardware error occurs, for example, HDL service crash.
+ /** A hardware error occurs, for example, HDL service crash.
* @deprecated since 11
* @useinstead {@link OH_NN_UNAVAILABLE_DEVICE}
*/
OH_NN_UNAVALIDABLE_DEVICE = 7,
/** Invalid path. */
OH_NN_INVALID_PATH = 8,
- /** Timeout.
+ /** Timeout.
* @since 11
*/
OH_NN_TIMEOUT = 9,
- /** Unsupported.
+ /** Unsupported.
* @since 11
*/
OH_NN_UNSUPPORTED = 10,
- /** Connection Exception.
+ /** Connection Exception.
* @since 11
*/
OH_NN_CONNECTION_EXCEPTION = 11,
@@ -178,7 +179,7 @@ typedef enum {
* @since 11
*/
OH_NN_DYNAMIC_SHAPE = 13,
- /** A hardware error occurs, for example, HDL service crash.
+ /** A hardware error occurs, for example, HDL service crash.
* @since 11
*/
OH_NN_UNAVAILABLE_DEVICE = 14
@@ -187,15 +188,15 @@ typedef enum {
/**
* @brief Defines the callback function handle for the post-process when the asynchronous execution has been done.
- *
+ *
* Use userData to identify the asynchronous execution you want to get.
* It is the argument userData passed to {@link OH_NNExecutor_RunAsync}.\n
- *
+ *
* Use errCode of type {@link OH_NN_ReturnCode} to get the error code returned by the asynchronous execution.\n
- *
+ *
* The outputTensor and outputCount are the inference results, which is the same as ones passed to
* {@link OH_NNExecutor_RunAsync}.\n
- *
+ *
* @param userData Asynchronous execution identifier, which is the argument userData passed to
* {@link OH_NNExecutor_RunAsync}.
* @param errCode Error code {@link OH_NN_ReturnCode} returned by the asynchronous execution.
@@ -211,12 +212,12 @@ typedef void (*NN_OnRunDone)(void *userData, OH_NN_ReturnCode errCode, void *out
/**
* @brief Defines the callback function handle for the post-process when the device driver service is dead during
* asynchronous execution.
- *
+ *
* You should recompile the model if this callback function is called.\n
- *
+ *
* Use userData to identify the asynchronous execution you want to get.
* It is the argument userData passed to {@link OH_NNExecutor_RunAsync}.\n
- *
+ *
* @param userData Asynchronous execution identifier, which is the argument userData passed to
* {@link OH_NNExecutor_RunAsync}.
* @since 11
@@ -315,7 +316,7 @@ typedef enum {
* @brief Defines operator types.
*
* @since 9
- * @version 1.0
+ * @version 2.0
*/
typedef enum {
/**
@@ -328,19 +329,20 @@ typedef enum {
*
* Parameters:
*
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
*
- * * output: sum of input1 and input2.
- * The data shape is the same as that of the input after broadcasting,
+ * * output: sum of input1 and input2.
+ * The data shape is the same as that of the input after broadcasting,
* and the data type is the same as that of the input with a higher precision.
*/
OH_NN_OPS_ADD = 1,
/**
- * Apply 2D average pooling to the input tensor, which now must be in NHWC format. The int8 quantization input is supported.
+ * Apply 2D average pooling to the input tensor, which now must be in NHWC format.
+ * The int8 quantization input is supported.
*
* If the input contains the padMode parameter:
*
@@ -350,18 +352,25 @@ typedef enum {
*
* Parameters:
*
- * * kernelSize indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width].
+ * * kernelSize indicates the kernel size used to obtain the average value.
+ * It is an int array [kernelHeight, kernelWidth].
* The first number indicates the kernel height, and the second number indicates the kernel width.
- * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
- * The first number indicates the moving step in height, and the second number indicates the moving step in width.
- * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same) or 1 (valid).
- * The nearest neighbor value is used for padding.
- * 0 (same): The height and width of the output are the same as those of the input.
- * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * * strides indicates the distance of kernel moving. The value is an int array
+ * [strideHeight, strideWidth]. The first number indicates the moving step in height,
+ * and the second number indicates the moving step in width.
+ * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same)
+ * or 1 (valid). The nearest neighbor value is used for padding.
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total and padding quantity is calculated horizontally and vertically and
+ * evenly distributed to the top, bottom, left, right if possible.
* Otherwise, the last additional padding will be completed from the bottom and right.
- * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. Excessive pixels will be discarded.
- * * activationType is an integer constant which is contained in FuseType.
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no
+ * padding. Excessive pixels will be discarded.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
+ * * global Whether to do global pooling.
+ * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map,
+ * the output feature map is rounded up, 0 means round down, 1 means round up.
*
* If the input contains the padList parameter:
*
@@ -371,13 +380,19 @@ typedef enum {
*
* Parameters:
*
- * * kernelSize indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width].
+ * * kernelSize indicates the kernel size used to obtain the average value.
+ * It is an int array [kernelHeight, kernelWidth].
* The first number indicates the kernel height, and the second number indicates the kernel width.
- * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
- * The first number indicates the moving step in height, and the second number indicates the moving step in width.
- * * padList: padding around input. It is an int array [top, bottom, left, right], and the nearest neighbor values are used for padding.
- * * activationType is an integer constant which is contained in FuseType.
+ * * strides indicates the distance of kernel moving. The value is an int array
+ * [strideHeight, strideWidth]. The first number indicates the moving step in height,
+ * and the second number indicates the moving step in width.
+ * * padList: padding around input. It is an int array [top, bottom, left, right],
+ * and the nearest neighbor values are used for padding.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
+ * * global Whether to do global pooling.
+ * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map,
+ * the output feature map is rounded up, 0 means round down, 1 means round up.
*
* Outputs:
*
@@ -386,15 +401,19 @@ typedef enum {
OH_NN_OPS_AVG_POOL = 2,
/**
- * Batch normalization is performed on a tensor to scale and shift tensor elements, relieving potential covariate shift in a batch of data.
+ * Performs batch normalization on the input tensors. Apply a transformation to keep the average output
+ * close to 0 and the output standard deviation close to 1.
*
* Inputs:
*
- * * input: n-dimensional tensor of shape [N, ..., C]. The nth dimension is the number of channels.
+ * * input: n-dimensional tensor of shape [N, ..., C].
+ * The nth dimension is the number of channels.
* * scale: 1D tensor of the scaling factor used to scale the first normalized tensor.
* * offset: 1D tensor used to move to the first normalized tensor.
- * * mean: 1D tensor of the overall mean value. It is used only for inference. In case of training, this parameter must be left empty.
- * * variance: 1D tensor used for the overall variance. It is used only for inference. In case of training, this parameter must be left empty.
+ * * mean: 1D tensor of the overall mean value. It is used only for inference.
+ * In case of training, this parameter must be left empty.
+ * * variance: 1D tensor used for the overall variance. It is used only for inference.
+ * In case of training, this parameter must be left empty.
*
* Parameters:
*
@@ -402,30 +421,35 @@ typedef enum {
*
* Outputs:
*
- * * output: n-dimensional output tensor whose shape and data type are the same as those of the input.
+ * * output: n-dimensional output tensor whose shape
+ * and data type are the same as those of the input.
*/
OH_NN_OPS_BATCH_NORM = 3,
/**
- * Divides the batch dimension of a 4D tensor into small blocks by block_shape, and interleaves these blocks back into the spatial dimension.
+ * Divides the batch dimension of a 4D tensor into small blocks by blockShape,
+ * and interleaves these blocks back into the spatial dimension.
*
* Parameters:
*
- * * input: input tensor. The dimension will be divided into small blocks, and these blocks will be interleaved into the spatial dimension.
+ * * input: input tensor. The dimension will be divided into small blocks,
+ * and these blocks will be interleaved into the spatial dimension.
*
* Outputs:
*
- * * blockSize: size of each block to be interleaved into the spatial dimension. The value is an array [height_block, width_block].
- * * crops: elements truncated from the spatial dimension of the output. The value is a 2D array [[crop0_start, crop0_end],
- * [crop1_start, crop1_end]] with the shape of (2, 2).
- *
+ * * blockSize: size of each block to be interleaved into the spatial dimension.
+ * The value is an array [heightBlock, widthBlock].
+ * * crops: elements truncated from the spatial dimension of the output. The value is a 2D array
+ * [[crop0Start, crop0End], [crop1Start, crop1End]] with the shape of (2, 2).
+ *
*
* Outputs:
*
- * * output. Assume that the shape of input is (n,h,w,c) and the shape of output is (n',h',w',c'):
- * n' = n / (block_shape[0] * block_shape[1])
- * h' = h * block_shape[0] - crops[0][0] - crops[0][1]
- * w' = w * block_shape[1] - crops[1][0] - crops[1][1]
+ * * output. Assume that the shape of input is (n,h,w,c) and
+ * the shape of output is (n',h',w',c'):
+ * n' = n / (blockShape[0] * blockShape[1])
+ * h' = h * blockShape[0] - crops[0][0] - crops[0][1]
+ * w' = w * blockShape[1] - crops[1][0] - crops[1][1]
* c'= c
*/
OH_NN_OPS_BATCH_TO_SPACE_ND = 4,
@@ -483,55 +507,64 @@ typedef enum {
* Inputs:
*
* * input: input tensor.
- * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
* The value of inChannel must be exactly divided by the value of group.
- *
- * * bias: bias of the convolution. It is an array with a length of [outChannel].
- * In quantization scenarios, the bias parameter does not require quantization parameters.
- * The quantization version requires data input of the OH_NN_INT32 type.
+ *
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
*
* Parameters:
*
- * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
- * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
- * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
- *
- * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid).
- * 0 (same): The height and width of the output are the same as those of the input.
- * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * * stride: movement stride of the convolution kernel in height and width.
+ * It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width.
+ * It is an int array [dilationHeight, dilationWidth]. The value must be greater than
+ * or equal to 1 and cannot exceed the height and width of input.
+ *
+ * * padMode: padding mode of input.
+ * The value is of the int type and can be 0 (same) or 1 (valid).
+ * 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically
+ * and evenly distributed to the top, bottom, left, and right if possible.
* Otherwise, the last additional padding will be completed from the bottom and right.
- *
- * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
- * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
- * If group is 1, it is a conventional convolution. If group is greater than 1 and
- * less than or equal to in_channel, it is a group convolution.
- * * activationType is an integer constant which is contained in FuseType. The specified activation function is called before output.
+ *
+ * 1 (valid): The possible maximum height and width of the output will be returned
+ * in case of no padding. The excessive pixels will be discarded.
+ * * group: number of groups in which the input is divided by inChannel. The value is of the
+ * int type. If group is 1, it is a conventional convolution. If group is greater
+ * than 1 and less than or equal to inChannel, it is a group convolution.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
+ * The specified activation function is called before output.
*
* If the input contains the padList parameter:
*
* Inputs:
*
* * input: input tensor.
- * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
* The value of inChannel must be exactly divided by the value of group.
- *
- * * bias: bias of the convolution. It is an array with a length of [outChannel].
- * In quantization scenarios, the bias parameter does not require quantization parameters.
- * The quantization version requires data input of the OH_NN_INT32 type.
+ *
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
*
* Parameters:
*
- * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
- * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
- * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * stride: movement stride of the convolution kernel in height and width.
+ * It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width.
+ * It is an int array [dilationHeight, dilationWidth]. The value must be greater than
+ * or equal to 1 and cannot exceed the height and width of input.
* * padList: padding around input. It is an int array [top, bottom, left, right].
- * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
- * If group is 1, it is a conventional convolution.
- * If group is in_channel, it is depthwiseConv2d. In this case, group==in_channel==out_channel.
- * If group is greater than 1 and less than in_channel, it is a group convolution. In this case, out_channel==group.
- * * activationType is an integer constant which is contained in FuseType.
+ * * group: number of groups in which the input is divided by inChannel.
+ * The value is of the int type. If group is 1, it is a conventional convolution.
+ * If group is inChannel, it is depthwiseConv2d. In this case, group==inChannel==outChannel.
+ * If group is greater than 1 and less than inChannel, it is a group convolution.
+ * In this case, outChannel==group.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
@@ -548,33 +581,37 @@ typedef enum {
* Inputs:
*
* * input: input tensor.
- * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
* The value of inChannel must be exactly divided by the value of group.
- *
- * * bias: bias of the convolution. It is an array with a length of [outChannel].
- * In quantization scenarios, the bias parameter does not require quantization parameters.
- * The quantization version requires data input of the OH_NN_INT32 type.
+ *
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
- *
- * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
+ *
+ * * stride: movement stride of the convolution kernel in height and width.
+ * It is an int array [strideHeight, strideWidth].
*
* Parameters:
*
- * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
- * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
- * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid).
- * 0 (same): The height and width of the output are the same as those of the input.
- * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
+ * * dilation: dilation size of the convolution kernel in height and width.
+ * It is an int array [dilationHeight, dilationWidth]. The value must be greater than
+ * or equal to 1 and cannot exceed the height and width of input.
+ * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or
+ * 1 (valid). 0 (same): The height and width of the output are the same as those of the input.
+ * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top,
+ * bottom, left, and right if possible.
* Otherwise, the last additional padding will be completed from the bottom and right.
- * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
- * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
- * If group is 1, it is a conventional convolution. If group is greater than 1 and
- * less than or equal to in_channel, it is a group convolution.
- * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple.
- * It can be a single integer to specify the same value for all spatial dimensions. The amount of output
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of
+ * no padding. The excessive pixels will be discarded.
+ * * group: number of groups in which the input is divided by inChannel. The value is of the int
+ * type. If group is 1, it is a conventional convolution. If group is greater than
+ * 1 and less than or equal to inChannel, it is a group convolution.
+ * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple.
+ * It can be a single integer to specify the same value for all spatial dimensions. The amount of output
* padding along a dimension must be less than the stride along this dimension.
- *
- * * activationType is an integer constant which is contained in FuseType.
+ *
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* If the input contains the padList parameter:
@@ -582,27 +619,29 @@ typedef enum {
* Inputs:
*
* * input: input tensor.
- * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
- * The value of inChannel must be exactly divided by the value of group.
- * * bias: bias of the convolution. It is an array with a length of [outChannel].
- * In quantization scenarios, the bias parameter does not require quantization parameters.
- * The quantization version requires data input of the OH_NN_INT32 type.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
+ * The value of inChannel must be exactly divided by the value of group.
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
- *
+ *
* Parameters:
*
- * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
- * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
- * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * stride: movement stride of the convolution kernel in height and width.
+ * It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width.
+ * It is an int array [dilationHeight, dilationWidth]. The value must be greater than
+ * or equal to 1 and cannot exceed the height and width of input.
* * padList: padding around input. It is an int array [top, bottom, left, right].
- * * group: number of groups in which the input is divided by in_channel. The value is of the int type.
- * If group is 1, it is a conventional convolution. If group is greater than 1
- * and less than or equal to in_channel, it is a group convolution.
- * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple.
- * It can be a single integer to specify the same value for all spatial dimensions. The amount of output padding
- * along a dimension must be less than the stride along this dimension.
- *
- * * activationType is an integer constant which is contained in FuseType.
+ * * group: number of groups in which the input is divided by inChannel. The value is of the int
+ * type. If group is 1, it is a conventional convolution. If group is greater than
+ * 1 and less than or equal to inChannel, it is a group convolution.
+ * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple.
+ * It can be a single integer to specify the same value for all spatial dimensions. The amount of output
+ * padding along a dimension must be less than the stride along this dimension.
+ *
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
@@ -619,25 +658,29 @@ typedef enum {
* Inputs:
*
* * input: input tensor.
- * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
* outChannel is equal to channelMultiplier multiplied by inChannel.
- * * bias: bias of the convolution. It is an array with a length of [outChannel].
- * In quantization scenarios, the bias parameter does not require quantization parameters.
- * The quantization version requires data input of the OH_NN_INT32 type.
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
*
* Parameters:
*
- * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
- * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
- * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
- * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid).
- * 0 (same): The height and width of the output are the same as those of the input.
- * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
- * Otherwise, the last additional padding will be completed from the bottom and right.
- *
- * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
- * * activationType is an integer constant which is contained in FuseType.
+ * * stride: movement stride of the convolution kernel in height and width.
+ * It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width.
+ * It is an int array [dilationHeight, dilationWidth]. The value must be greater than
+ * or equal to 1 and cannot exceed the height and width of input.
+ * * padMode: padding mode of input.
+ * The value is of the int type and can be 0 (same) or 1 (valid).
+ * 0 (same): The height and width of the output are the same as those of the input. The total padding
+ * quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and
+ * right if possible. Otherwise, the last additional padding will be completed from the bottom and right.
+ *
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of no
+ * padding. The excessive pixels will be discarded.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* If the input contains the padList parameter:
@@ -645,20 +688,22 @@ typedef enum {
* Inputs:
*
* * input: input tensor.
- * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
+ * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
* outChannel is equal to channelMultiplier multiplied by inChannel.
- * * bias: bias of the convolution. It is an array with a length of [outChannel].
- * In quantization scenarios, the bias parameter does not require quantization parameters.
- * The quantization version requires data input of the OH_NN_INT32 type.
+ * * bias: bias of the convolution. It is an array with a length of [outChannel].
+ * In quantization scenarios, the bias parameter does not require quantization parameters.
+ * The quantization version requires data input of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
*
* Parameters:
*
- * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
- * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
- * The value must be greater than or equal to 1 and cannot exceed the height and width of input.
+ * * stride: movement stride of the convolution kernel in height and width.
+ * It is an int array [strideHeight, strideWidth].
+ * * dilation: dilation size of the convolution kernel in height and width.
+ * It is an int array [dilationHeight, dilationWidth]. The value must be greater than
+ * or equal to 1 and cannot exceed the height and width of input.
* * padList: padding around input. It is an int array [top, bottom, left, right].
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
@@ -673,13 +718,14 @@ typedef enum {
* Inputs:
*
* * input1: first input, which is a number, a bool, or a tensor whose data type is number or Boolean.
- * * input2: second input, which must meet the following requirements:
- * If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose data type is real number or Boolean value.
- * If the first input is a real number or Boolean value, the second input must be a tensor whose data type is real number or Boolean value.
+ * * input2: second input, which must meet the following requirements:
+ * If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose
+ * data type is real number or Boolean value. If the first input is a real number or Boolean value,
+ * the second input must be a tensor whose data type is real number or Boolean value.
*
* Parameters:
*
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
@@ -689,7 +735,8 @@ typedef enum {
OH_NN_OPS_DIV = 11,
/**
- * Sets parameters to perform product (dot product), sum (addition and subtraction), or max (larger value) on the input.
+ * Sets parameters to perform product (dot product), sum (addition and subtraction),
+ * or max (larger value) on the input.
*
* Inputs:
*
@@ -702,7 +749,7 @@ typedef enum {
*
* Outputs:
*
- * * output: computing result, which has the same data type and shape of output and input1.
+ * * output: computing result, which has the same data type and shape of input1.
*/
OH_NN_OPS_ELTWISE = 12,
@@ -712,7 +759,8 @@ typedef enum {
* Inputs:
*
* * input: input tensor.
- * * axis: index of the dimension to be added. The value is of the int32_t type and must be a constant in the range [-dim-1, dim].
+ * * axis: index of the dimension to be added.
+ * The value is of the int32_t type and must be a constant in the range [-dim-1, dim].
*
* Outputs:
*
@@ -730,7 +778,8 @@ typedef enum {
*
* Outputs:
*
- * * output: generated tensor, which has the same data type as value. The tensor shape is specified by the shape parameter.
+ * * output: generated tensor, which has the same data type as value.
+ * The tensor shape is specified by the shape parameter.
*/
OH_NN_OPS_FILL = 14,
@@ -741,35 +790,38 @@ typedef enum {
*
* * input: full-connection input tensor.
* * weight: weight tensor for a full connection.
- * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter.
- * If quantization is required, the data must be of the OH_NN_INT32 type.
+ * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required
+ * for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type.
* The actual quantization parameters are determined by input and weight.
*
* Parameters:
*
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
+ * * hasBias Whether to use the bias.
*
* Outputs:
*
* * output: computed tensor.
*
- * If the input contains the axis parameter:
+ * If the input contains the axis parameter or useAxis parameter:
*
* Inputs:
*
* * input: full-connection input tensor.
* * weight: weight tensor for a full connection.
- * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter.
- * If quantization is required, the data must be of the OH_NN_INT32 type. The actual quantization parameters
- * are determined by input and weight.
+ * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required
+ * for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type.
+ * The actual quantization parameters are determined by input and weight.
*
* Parameters:
*
* * axis: axis in which the full connection is applied. The specified axis and its following axes are
* converted into a 1D tensor for applying the full connection.
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
+ * * useAxis Whether to use the axis.
+ * * hasBias Whether to use the bias.
*
* Outputs:
*
@@ -802,7 +854,8 @@ typedef enum {
*
* Outputs:
*
- * * output: n-dimensional Hswish activation value. The data type is the same as that of shape and input.
+ * * output: n-dimensional Hswish activation value.
+ * The data type is the same as that of shape and input.
*/
OH_NN_OPS_HSWISH = 17,
@@ -812,14 +865,15 @@ typedef enum {
*
* Inputs:
*
- * * input1, which can be a real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
- * * input2, which can be a real number or a Boolean value if input1 is a tensor and must be a tensor
- * with the data type of real number or NN_BOOL if input1 is not a tensor.
+ * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or OH_NN_BOOL if input1 is not a tensor.
*
* Outputs:
*
- * * A tensor of the data type NN_BOOL. When a quantization model is used, the quantization parameters of the output
- * cannot be omitted. However, values of the quantization parameters do not affect the result.
+ * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
+ * the quantization parameters of the output cannot be omitted.
+ * However, values of the quantization parameters do not affect the result.
*/
OH_NN_OPS_LESS_EQUAL = 18,
@@ -835,26 +889,30 @@ typedef enum {
*
* * TransposeX: Boolean value indicating whether to transpose input1.
* * TransposeY: Boolean value indicating whether to transpose input2.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
+ * The specified activation function is called before output.
*
* Outputs:
*
* * output: inner product obtained after calculation. In case of type!=NN_UNKNOWN, the output data type is
* determined by type. In case of type==NN_UNKNOWN, the output data type depends on the data type
* converted during computing of inputX and inputY.
- *
+ *
*/
OH_NN_OPS_MATMUL = 19,
/**
- * Calculates the maximum of input1 and input2 element-wise. The inputs of input1 and input2
- * comply with the implicit type conversion rules to make the data types consistent. * The inputs must be two tensors or one tensor and one scalar.
- * When the inputs are two tensors, their data types cannot be both NN_BOOL. Their shapes can be broadcast to the same size.
+ * Calculates the maximum of input1 and input2 element-wise. The inputs of input1\n
+ * and input2 comply with the implicit type conversion rules to make the data types consistent.
+ * The inputs must be two tensors or one tensor and one scalar.
+ * When the inputs are two tensors, their data types cannot be both OH_NN_BOOL.
+ * Their shapes can be broadcast to the same size.
* When the inputs are one tensor and one scalar, the scalar must be a constant.
*
* Inputs:
*
- * * input1: n-dimensional input tensor of the real number or NN_BOOL type.
- * * input2: n-dimensional input tensor of the real number or NN_BOOL type.
+ * * input1: n-dimensional input tensor of the real number or OH_NN_BOOL type.
+ * * input2: n-dimensional input tensor of the real number or OH_NN_BOOL type.
*
* Outputs:
*
@@ -874,18 +932,23 @@ typedef enum {
*
* Parameters:
*
- * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width].
+ * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernelHeight, kernelWidth].
* The first number indicates the kernel height, and the second number indicates the kernel width.
- * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
- * The first number indicates the moving step in height, and the second number indicates the moving step in width.
+ * * strides indicates the distance of kernel moving. The value is an int array
+ * [strideHeight, strideWidth]. The first number indicates the moving step in height,
+ * and the second number indicates the moving step in width.
* * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same)
* or 1 (valid). The nearest neighbor value is used for padding.
- * 0 (same): The height and width of the output are the same as those of the input.
- * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
- * Otherwise, the last additional padding will be completed from the bottom and right.
- * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
- * * activationType is an integer constant which is contained in FuseType.
+ * 0 (same): The height and width of the output are the same as those of the input. The total padding
+ * quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and
+ * right if possible. Otherwise, the last additional padding will be completed from the bottom and right.
+ * 1 (valid): The possible maximum height and width of the output will be returned in case of
+ * no padding. The excessive pixels will be discarded.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
+ * * global Whether to do global pooling.
+ * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map,
+ * the output feature map is rounded up, 0 means round down, 1 means round up.
*
* If the input contains the padList parameter:
*
@@ -895,14 +958,18 @@ typedef enum {
*
* Parameters:
*
- * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width].
+ * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernelHeight, kernelWidth].
* The first number indicates the kernel height, and the second number indicates the kernel width.
- * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
- * The first number indicates the moving step in height, and the second number indicates the moving step in width.
- * * padList: padding around input. It is an int array [top, bottom, left, right],
+ * * strides indicates the distance of kernel moving. The value is an int array
+ * [strideHeight, strideWidth]. The first number indicates the moving step in height,
+ * and the second number indicates the moving step in width.
+ * * padList: padding around input. It is an int array [top, bottom, left, right],
* and the nearest neighbor values are used for padding.
* * activationType is an integer constant which is contained in FuseType.
* The specified activation function is called before output.
+ * * global Whether to do global pooling.
+ * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map,
+ * the output feature map is rounded up, 0 means round down, 1 means round up.
*
* Outputs:
*
@@ -911,8 +978,8 @@ typedef enum {
OH_NN_OPS_MAX_POOL = 21,
/**
- * Multiplies elements in the same positions of inputX and inputY to obtain the output.
- * If inputX and inputY have different shapes, expand them to the same shape
+ * Multiplies elements in the same positions of input1 and input2 to obtain the output.
+ * If input1 and input2 have different shapes, expand them to the same shape
* through broadcast and then perform multiplication.
*
* Inputs:
@@ -922,32 +989,33 @@ typedef enum {
*
* Parameters:
*
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
*
- * * Product of each element of input1 and input2.
+ * * output: Product of each element of input1 and input2.
*/
OH_NN_OPS_MUL = 22,
/**
- * Generates a one-hot tensor based on the positions specified by indices. The positions specified by indices
- * are determined by on_value, and other positions are determined by off_value.
+ * Generates a one-hot tensor based on the positions specified by indices. The positions specified by
+ * indices are determined by onValue, and other positions are determined by offValue.
*
* Inputs:
*
* * indices: n-dimensional tensor. Each element in indices determines the position of
- * on_value in each one-hot vector.
+ * onValue in each one-hot vector.
* * depth: integer scalar that determines the depth of the one-hot vector. The value of depth
* must be greater than 0.
- * * on_value: scalar that specifies a valid value in the one-hot vector.
- * * off_value: scalar that specifies the values of other posistions in the one-hot vector except the valid value.
+ * * onValue: scalar that specifies a valid value in the one-hot vector.
+ * * offValue: scalar that specifies the values of other posistions in the one-hot vector except
+ * the valid value.
*
* Parameters:
*
* * axis: integer scalar that specifies the dimension for inserting the one-hot. Assume that the shape
- * of indices is [N, C], and the value of depth is D.
+ * of indices is [N, C], and the value of depth is D.
* When axis is 0, the shape of the output is [D, N, C].
* When axis is -1, the shape of the output is [N, C, D].
* When axis is 1, the shape of the output is [N, D, C].
@@ -965,31 +1033,40 @@ typedef enum {
* Inputs:
*
* * inputX: n-dimensional tensor in [BatchSize, ...] format.
- * * paddings: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2].
- * For example, paddings[i][0] indicates the number of paddings to be added preceding inputX in the ith dimension.
- * paddings[i][1] indicates the number of paddings to be added following inputX in the ith dimension.
+ * * paddings: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2].
+ * For example, paddings[i][0] indicates the number of paddings to be added preceding
+ * inputX in the ith dimension.
+ * paddings[i][1] indicates the number of paddings to be added following inputX
+ * in the ith dimension.
*
* Parameters:
*
- * * padValues: value to be added to the pad operation. The value is a constant with the same data type as inputX.
+ * * constantValue: value to be added to the pad operation.
+ * The value is a constant with the same data type as inputX.
+ * * paddingMode: Padding mode.
*
* Outputs:
*
- * * output: n-dimensional tensor after padding, with the same dimensions and data type as inputX.
- * The shape is determined by inputX and paddings.
+ * * output: n-dimensional tensor after padding, with the same dimensions and data type as
+ * inputX. The shape is determined by inputX and paddings.
* output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]
*/
OH_NN_OPS_PAD = 24,
/**
- * Calculates the y power of each element in input. The inputs must be two tensors or one tensor and one scalar.
- * When the inputs are two tensors, their data types cannot be both NN_BOOL, and their shapes must be the same.
+ * Calculates the y power of each element in input.
+ * The inputs must be two tensors or one tensor and one scalar.
+ * When the inputs are two tensors, their data types cannot be both OH_NN_BOOL, and their shapes must be the same.
* When the inputs are one tensor and one scalar, the scalar must be a constant.
*
* Inputs:
*
- * * input: real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
- * * y: real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
+ * * input: real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * y: real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ *
+ * Parameters:
+ * * scale: A OH_NN_FLOAT32 scalar that represents the factor of the scale blend.
+ * * shift: A OH_NN_FLOAT32 scalar that represents the bias of the scale blend.
*
* Outputs:
*
@@ -1009,7 +1086,7 @@ typedef enum {
* Parameters:
*
* * axis: dimensions to be scaled.
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
@@ -1053,12 +1130,16 @@ typedef enum {
*
* * input: n-dimensional input tensor.
* * begin: start of the slice, which is an array of integers greater than or equal to 0.
- * * size: slice length, which is an array of integers greater than or equal to 0.
+ * * size: slice length, which is an array of integers greater than or equal to 0.
* Assume that a dimension is i and 1<=size[i]<=input.shape[i]-begin[i].
*
+ * Parameters:
+ *
+ * * axes: Dimensions on which the tensor is sliced.
+ *
* Outputs:
*
- * * output: n-dimensional tensor obtained by slicing.
+ * * output: n-dimensional tensor obtained by slicing.
* The TensorType, shape, and size of the output are the same as those of the input.
*/
OH_NN_OPS_SLICE = 29,
@@ -1093,8 +1174,8 @@ typedef enum {
* Parameters:
*
* * blockShape: a pair of integers. Each of them is greater than or equal to 1.
- * * paddings: a pair of arrays. Each of them consists of two integers. The four integers that form paddings
- * must be greater than or equal to 0. paddings[0][0] and paddings[0][1]
+ * * paddings: a pair of arrays. Each of them consists of two integers. The four integers that from
+ * paddings must be greater than or equal to 0. paddings[0][0] and paddings[0][1]
* specify the number of paddings in the third dimension, and paddings[1][0] and paddings[1][1]
* specify the number of paddings in the fourth dimension.
*
@@ -1106,14 +1187,15 @@ typedef enum {
* output.shape[1] = c
* output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0]
* output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1]
- * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by
+ * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by
* (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]).
- *
+ *
*/
OH_NN_OPS_SPACE_TO_BATCH_ND = 31,
/**
- * Splits the input into multiple tensors along the axis dimension. The number of tensors is specified by outputNum.
+ * Splits the input into multiple tensors along the axis dimension.
+ * The number of tensors is specified by outputNum.
*
* Inputs:
*
@@ -1122,15 +1204,15 @@ typedef enum {
* Parameters:
*
* * outputNum: number of output tensors. The data type is long.
- * * size_splits: size of each tensor split from the input. The value is a 1D tensor of the int type.
- * If size_splits is empty, the input will be evenly split into tensors of the same size. In this case,
+ * * sizeSplits: size of each tensor split from the input. The value is a 1D tensor of the int type. If
+ * sizeSplits is empty, the input will be evenly split into tensors of the same size. In this case,
* input.shape[axis] can be exactly divisible by outputNum.
- * If size_splits is not empty, the sum of all its elements must be equal to input.shape[axis].
+ * If sizeSplits is not empty, the sum of all its elements must be equal to input.shape[axis].
* * axis: splitting dimension of the int type.
*
* Outputs:
*
- * * outputs: array of n-dimensional tensors, with the same data type and dimensions.
+ * * outputs: array of n-dimensional tensors, with the same data type and dimensions.
* The data type of each tensor is the same as that of input.
*/
OH_NN_OPS_SPLIT = 32,
@@ -1144,25 +1226,30 @@ typedef enum {
*
* Outputs:
*
- * * output: square root of the input. It is an n-dimensional tensor with the same data type and shape as input.
+ * * output: square root of the input.
+ * It is an n-dimensional tensor with the same data type and shape as input.
*/
OH_NN_OPS_SQRT = 33,
/**
- * Calculates the square of the difference between two tensors. The SquaredDifference operator supports tensor and tensor subtraction.
- * If two tensors have different TensorTypes, the Sub operator converts the low-precision tensor to a high-precision one.
- * If two tensors have different shapes, the two tensors can be extended to tensors with the same shape through broadcast.
+ * Calculates the square of the difference between two tensors. The SquaredDifference operator supports
+ * tensor and tensor subtraction. If two tensors have different TensorTypes, the Sub operator
+ * converts the low-precision tensor to a high-precision one. If two tensors have different shapes,
+ * the two tensors can be extended to tensors with the same shape through broadcast.
*
* Inputs:
*
- * * input1: minuend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type.
- * * input2: subtrahend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type.
+ * * input1: minuend, which is a tensor of the OH_NN_FLOAT16, OH_NN_FLOAT32, OH_NN_INT32,
+ * or OH_NN_BOOL type.
+ * * input2: subtrahend, which is a tensor of the OH_NN_FLOAT16, OH_NN_FLOAT32, OH_NN_INT32,
+ * or OH_NN_BOOL type.
*
* Outputs:
*
* * output: square of the difference between two inputs. The output shape is determined
- * byinput1 and input2. If they have the same shape, the output tensor has the same shape as them.
- * If they have different shapes, perform the broadcast operation on input1 and input2 and perform subtraction.
+ * byinput1 and input2. If they have the same shape, the output tensor has the same
+ * shape as them. If they have different shapes, perform the broadcast operation on
+ * input1 and input2 and perform subtraction.
* TensorType of the output is the same as that of the input tensor with higher precision.
*/
OH_NN_OPS_SQUARED_DIFFERENCE = 34,
@@ -1178,7 +1265,8 @@ typedef enum {
*
* Parameters:
*
- * * axis: dimension to be removed. The value is of int64_t type and can be an integer in the range [-n, n) or an array.
+ * * axis: dimension to be removed.
+ * The value is of int64_t type and can be an integer in the range [-n, n) or an array.
*
* Outputs:
*
@@ -1202,8 +1290,8 @@ typedef enum {
*
* Outputs:
*
- * * output: stacking result of the input along the axis dimension. The value is an n+1-dimensional tensor
- * and has the same TensorType as the input.
+ * * output: stacking result of the input along the axis dimension.
+ * The value is an n+1-dimensional tensor and has the same TensorType as the input.
*/
OH_NN_OPS_STACK = 36,
@@ -1213,36 +1301,39 @@ typedef enum {
* Inputs:
*
* * input: n-dimensional input tensor.
- * * begin: start of slicing, which is a 1D tensor. The length of begin is n.
+ * * begin: start of slicing, which is a 1D tensor. The length of begin is n.
* begin[i] specifies the start of slicing in the ith dimension.
- * * end: end of slicing, which is a 1D tensor. The length of end is n.
+ * * end: end of slicing, which is a 1D tensor. The length of end is n.
* end[i] specifies the end of slicing in the ith dimension.
- * * strides: slicing stride, which is a 1D tensor. The length of strides is n.
+ * * strides: slicing stride, which is a 1D tensor. The length of strides is n.
* strides[i] specifies the stride at which the tensor is sliced in the ith dimension.
*
* Parameters:
*
- * * beginMask: an integer used to mask begin. beginMask is represented in binary code.
- * In case of binary(beginMask)[i]==1, for the ith dimension, elements are sliced from the first element
- * at strides[i] until the end[i]-1 element.
- *
- * * endMask: an integer used to mask end. endMask is represented in binary code.
- * In case of binary(endMask)[i]==1, elements are sliced from the element at the begin[i] position
+ * * beginMask: an integer used to mask begin. beginMask is represented in binary code.
+ * In case of binary(beginMask)[i]==1, for the ith dimension,
+ * elements are sliced from the first element at strides[i] until the end[i]-1 element.
+ *
+ * * endMask: an integer used to mask end. endMask is represented in binary code.
+ * In case of binary(endMask)[i]==1, elements are sliced from the element at the begin[i] position
* in the ith dimension until the tensor boundary at strides[i].
- *
- * * ellipsisMask: integer used to mask begin and end. ellipsisMask is represented in binary code.
- * In case of binary(ellipsisMask)[i]==1, elements are sliced from the first element at strides[i] in the ith dimension
+ *
+ * * ellipsisMask: integer used to mask begin and end.
+ * ellipsisMask is represented in binary code. In case of binary(ellipsisMask)[i]==1,
+ * elements are sliced from the first element at strides[i] in the ith dimension
* until the tensor boundary. Only one bit of binary(ellipsisMask) can be a non-zero value.
- *
- * * newAxisMask: new dimension, which is an integer. newAxisMask is represented in binary code.
- * In case of binary(newAxisMask)[i]==1, a new dimension whose length is 1 is inserted into the ith dimension.
- * * shrinkAxisMask: shrinking dimension, which is an integer. * shrinkAxisMask is represented in binary code.
- * In the case of binary(shrinkAxisMask)[i]==1, all elements in the ith dimension will be discarded,
- * and the length of the ith dimension is shrunk to 1.
+ *
+ * * newAxisMask: new dimension, which is an integer. newAxisMask is represented in binary code.
+ * In case of binary(newAxisMask)[i]==1,
+ * a new dimension whose length is 1 is inserted into the ith dimension.
+ * * shrinkAxisMask: shrinking dimension, which is an integer. * shrinkAxisMask is
+ * represented in binary code. In the case of binary(shrinkAxisMask)[i]==1, all elements in the
+ * ith dimension will be discarded, and the length of the ith dimension is shrunk to 1.
*
* Outputs:
*
- * * A tensor, with the same data type as input. The number of dimensions of the output tensor is rank(input[0])+1.
+ * * output: A tensor, with the same data type as input.
+ * The number of dimensions of the output tensor is rank(input[0])+1.
*/
OH_NN_OPS_STRIDED_SLICE = 37,
@@ -1256,14 +1347,15 @@ typedef enum {
*
* Parameters:
*
- * * activationType is an integer constant which is contained in FuseType.
+ * * activationType is an integer constant which is contained in OH_NN_FuseType.
* The specified activation function is called before output.
*
* Outputs:
*
- * * output: difference between the two tensors. The output shape is determined byinput1 and input2.
- * If they have the same shape, the output tensor has the same shape as them.
- * If they have different shapes, perform the broadcast operation on input1 and input2 and perform subtraction.
+ * * output: difference between the two tensors. The output shape is determined byinput1 and
+ * input2. If they have the same shape, the output tensor has the same shape as them.
+ * If they have different shapes,
+ * perform the broadcast operation on input1 and input2 and perform subtraction.
* TensorType of the output is the same as that of the input tensor with higher precision.
*/
OH_NN_OPS_SUB = 38,
@@ -1277,7 +1369,8 @@ typedef enum {
*
* Outputs:
*
- * * output: hyperbolic tangent of the input. The TensorType and tensor shape are the same as those of the input.
+ * * output: hyperbolic tangent of the input.
+ * The TensorType and tensor shape are the same as those of the input.
*/
OH_NN_OPS_TANH = 39,
@@ -1289,46 +1382,57 @@ typedef enum {
* * multiples: number of times that the input tensor is copied in each dimension. The value is a 1D tensor.
* The length m is not less than the number of dimensions, that is, n.
*
+ * Parameters:
+ *
+ * * dims A 1D tensor that specifies the number of times that data is copied in each dimension.
+ * The length m is not less than the number of dimensions of x.
+ *
* Outputs:
* * An m-dimensional tensor whose TensorType is the same as that of the input. If input and
* multiples have the same length, input and output have the same number of dimensions.
- * If the length of multiples is greater than n, 1 is used to fill the input dimension,
- * and then the input is copied in each dimension the specified times to obtain the m-dimensional tensor.
+ * If the length of multiples is greater than n, 1 is used to fill the input dimension, and
+ * then the input is copied in each dimension the specified times to obtain the m-dimensional tensor.
*/
OH_NN_OPS_TILE = 40,
/**
- * Transposes data of input 0 based on permutation.
+ * Transposes data of input based on permutation.
*
* Inputs:
*
* * input: n-dimensional tensor to be transposed.
- * * permutation: The value is a 1D tensor whose length is the same as the number of dimensions of input 0.
+ * * permutation: The value is a 1D tensor whose length is the same as the number of
+ * dimensions of input.
*
* Outputs:
*
- * * output: n-dimensional tensor. TensorType of output 0 is the same as that of input 0,
- * and the output shape is determined by the shape and permutation of input 0.
+ * * output: n-dimensional tensor. TensorType of output is the same as that of
+ * input, and the output shape is determined by the shape and permutation of input.
*/
OH_NN_OPS_TRANSPOSE = 41,
/**
- * Calculates the average value in the specified dimension. If keepDims is set to false, the number of dimensions
- * is reduced for the input; if keepDims is set to true, the number of dimensions is retained.
+ * Calculates the average value in the specified dimension.
+ * If keepDims is set to false, the number of dimensions is reduced for the input;
+ * if keepDims is set to true, the number of dimensions is retained.
*
* Inputs:
*
* * input: n-dimensional input tensor, where n is less than 8.
- * * axis: dimension used to calculate the average value. The value is a 1D tensor. The value range of each element in axis is [–n, n).
+ * * axis: dimension used to calculate the average value. The value is a 1D tensor.
+ * The value range of each element in axis is [–n, n).
*
* Parameters:
*
* * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
*
* Outputs:
*
- * * output: m-dimensional output tensor whose data type is the same as that of the input. If keepDims is
- * false, m==n. If keepDims is true, moutput: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, mkeepDims is true, m==n.
*/
OH_NN_OPS_REDUCE_MEAN = 42,
@@ -1337,21 +1441,25 @@ typedef enum {
*
* Inputs:
*
- * * input: 4D input tensor. Each element in the input cannot be less than 0. The input layout must be [batchSize, height, width, channels].
+ * * input: 4D input tensor. Each element in the input cannot be less than 0.
+ * The input layout must be [batchSize, height, width, channels].
*
* Parameters:
*
* * newHeight: resized height of the 4D tensor.
* * newWidth: resized width of the 4D tensor.
- * * preserveAspectRatio: indicates whether to maintain the height/width ratio of input after resizing.
- * * coordinateTransformMode: coordinate transformation method used by the resize operation. The value is an int32 integer.
- * Currently, the following methods are supported:
- * * excludeOutside: an int64 floating point number. When its value is 1, the sampling weight of the part that
+ * * preserveAspectRatio: indicates whether to maintain the height/width
+ * ratio of input after resizing.
+ * * coordinateTransformMode: coordinate transformation method used by the resize operation.
+ * The value is an int32 integer. Currently, the following methods are supported:
+ * 0 means ASYMMETRIC, 1 means ALIGN_CORNERS, 2 means HALF_PIXEL.
+ * * excludeOutside: an int64 floating point number. When its value is 1,
+ * the sampling weight of the part that
* exceeds the boundary of input is set to 0, and other weights are normalized.
*
* Outputs:
*
- * * output: n-dimensional tensor, with the same shape and data type as input.
+ * * output: n-dimensional tensor, with the same shape and data type as input.
*/
OH_NN_OPS_RESIZE_BILINEAR = 43,
@@ -1360,7 +1468,8 @@ typedef enum {
*
* Inputs:
*
- * * input: n-dimensional tensor, where n is less than 8. Each element of the tensor cannot be less than 0.
+ * * input: n-dimensional tensor, where n is less than 8.
+ * Each element of the tensor cannot be less than 0.
*
* Outputs:
*
@@ -1378,7 +1487,8 @@ typedef enum {
*
* Outputs:
*
- * * output: tensor whose data type is the same as that of input and shape is determined by InputShape.
+ * * output: tensor whose data type is the same as that of input
+ * and shape is determined by InputShape.
*/
OH_NN_OPS_RESHAPE = 45,
@@ -1387,16 +1497,16 @@ typedef enum {
*
* Inputs:
*
- * * input: n-dimensional tensor. If n is greater than or equal to 2, inputX must be [BatchSize, ..., Channels].
- * The second dimension is the number of channels.
- * * weight: 1D tensor. The length of weight must be 1 or equal to the number of channels. If the length of weight is 1,
- * all channels share the same weight.
- * If the length of weight is equal to the number of channels, each channel exclusively has a weight.
- * If n is less than 2 for inputX, the weight length must be 1.
+ * * input: n-dimensional tensor. If n is greater than or equal to 2,
+ * input must be [BatchSize, ..., Channels]. The second dimension is the number of channels.
+ * * weight: 1D tensor. The length of weight must be 1 or equal to the number of channels.
+ * If the length of weight is 1, all channels share the same weight.
+ * If the length of weight is equal to the number of channels, each channel exclusively has a weight.
+ * If n is less than 2 for input, the weight length must be 1.
*
* Outputs:
*
- * * output: PReLU activation value of x, with the same shape and data type as inputX.
+ * * output: PReLU activation value of input, with the same shape and data type as input.
*/
OH_NN_OPS_PRELU = 46,
@@ -1414,7 +1524,8 @@ typedef enum {
OH_NN_OPS_RELU = 47,
/**
- * Calculates the Relu6 activation value of the input, that is, calculate min(max(x, 0), 6) for each element x in the input.
+ * Calculates the Relu6 activation value of the input, that is,
+ * calculate min(max(x, 0), 6) for each element x in the input.
*
* Inputs:
*
@@ -1438,8 +1549,12 @@ typedef enum {
*
* Parameters:
*
- * * beginAxis is an NN_INT32 scalar that specifies the axis from which normalization starts. The value range is [1, rank(input)).
- * * epsilon is a scalar of NN_FLOAT32. It is a tiny amount in the normalization formula. The common value is 1e-7.
+ * * beginAxis: an OH_NN_INT32 scalar that specifies the axis from which normalization starts.
+ * The value range is [1, rank(input)).
+ * * epsilon: a scalar of OH_NN_FLOAT32. It is a tiny amount in the normalization formula.
+ * The common value is 0.00001f.
+ * * beginParamsAxis: an OH_NN_INT32 scalar that specifies the start axis of layer normalization
+ * of input parameter (gamma, beta).
*
* Outputs:
*
@@ -1448,42 +1563,51 @@ typedef enum {
OH_NN_OPS_LAYER_NORM = 49,
/**
- * Calculates the accumulated value for a tensor along the specified dimension.
+ * Calculates the accumulated value for a tensor along the specified dimension. If keepDims is set to
+ * false, the number of dimensions is reduced for the input; if keepDims is set to true,
+ * the number of dimensions is retained.
*
* Inputs:
*
* * input: n-dimensional input tensor, where n is less than 8.
- * * axis: dimension used to calculate the product. The value is a 1D tensor. The value range of each element in axis is [–n, n).
+ * * axis: dimension used to calculate the product. The value is a 1D tensor.
+ * The value range of each element in axis is [–n, n).
*
* Parameters:
*
* * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
- * When its value is true, the number of output dimensions is the same as that of the input.
- * When its value is false, the number of output dimensions is reduced.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
*
* Outputs:
*
* * output: m-dimensional output tensor whose data type is the same as that of the input.
- * If keepDims is false, m==n. If keepDims is true, mkeepDims is false, mkeepDims is true, m==n.
*/
OH_NN_OPS_REDUCE_PROD = 50,
/**
- * Operates the logical OR in the specified dimension. If keepDims is set to false,
- * the number of dimensions is reduced for the input; if keepDims is set to true, the number of dimensions is retained.
+ * Calculates the logical and value for input tensor along the specified dimension. If keepDims is set to
+ * false, the number of dimensions is reduced for the input; if keepDims is set to true,
+ * the number of dimensions is retained.
*
* Inputs:
*
- * * A n-dimensional input tensor, where n is less than 8.
- * * A 1D tensor specifying the dimension used to operate the logical OR. The value range of each element in axis is [–n, n).
+ * * input: n-dimensional input tensor, where n is less than 8.
+ * * axis: dimension used to calculate the logical and value. The value is a 1D tensor.
+ * The value range of each element in axis is [–n, n).
*
* Parameters:
*
* * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
*
* Outputs:
- * * output: m-dimensional output tensor whose data type is the same as that of the input.
- * If keepDims is false, m==n. If keepDims is true, moutput: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, mkeepDims is true, m==n.
*/
OH_NN_OPS_REDUCE_ALL = 51,
@@ -1492,16 +1616,22 @@ typedef enum {
*
* Inputs:
*
- * * input: n-dimensional tensor.
+ * * input: n-dimensional tensor. If it is a conversion between a quantized type and
+ * a floating-point type, the input tensor should contain quantized parameters.
*
* Parameters:
*
- * * src_t: data type of the input.
- * * dst_t: data type of the output.
+ * * srcT: data type of the input.
+ * * dstT: data type of the output.
+ * * axis: appoint the dimensions from which the quantization parameters are extracted.
+ * If the size of the input tensor quantization parameter is 1, the operator function is
+ * layer quantization conversion, and this parameter does not take effect. If the size of
+ * the input tensor quantization parameter is greater than 1, the operator function is the
+ * quantization conversion along the specific channels, and this parameter takes effect.
*
* Outputs:
*
- * * output: n-dimensional tensor. The data type is determined by input2.
+ * * output: n-dimensional tensor. The data type is determined by dstT.
* The output shape is the same as the input shape.
*/
OH_NN_OPS_QUANT_DTYPE_CAST = 52,
@@ -1512,11 +1642,13 @@ typedef enum {
* Inputs:
*
* * input: n-dimensional tensor.
- * * input k: first k records of data and their indices.
+ * * k: first k records of data and their indices.
*
* Parameters:
*
* * sorted: order of sorting. The value true means descending and false means ascending.
+ * * axis: A OH_NN_INT32 scalar that specifies the dimension that needs to be sorted, default -1,
+ * pointing to the last dimension.
*
* Outputs:
*
@@ -1535,7 +1667,10 @@ typedef enum {
* Parameters:
*
* * axis: dimension for calculating the index of the maximum.
- * * keep_dims: indicates whether to maintain the input tensor dimension. The value is a Boolean value.
+ * * keepDims: indicates whether to maintain the input tensor dimension. The value is a Boolean value.
+ * * topK: Whether to keep the output dimensions the same as the input dimensions.
+ * * outMaxValue: Return the index if the value is false.
+ * Return the value if the value is true. The default value is false.
*
* Outputs:
* * output: index of the maximum input tensor on the axis. The value is a tensor.
@@ -1546,151 +1681,1262 @@ typedef enum {
* Adds a dimension based on the value of axis.
*
* Inputs:
+ *
* * input: n-dimensional tensor.
*
* Parameters:
*
- * * axis: dimension to be added. The value of axis can be an integer or an array of integers.
+ * * axis: dimension to be added. The value of axis can be an integer or an array of integers.
* The value range of the integer is [-n, n).
*
* Outputs:
+ *
* * output: output tensor.
*/
OH_NN_OPS_UNSQUEEZE = 55,
/**
- * Gaussian error linear unit activation function. The int quantization input is not supported. output=0.5∗input∗(1+tanh(input/2))
+ * Gaussian error linear unit activation function. The int quantization input is not supported.
+ * output=0.5∗input∗(1+tanh(input/2))
*
* Inputs:
- * * An n-dimensional input tensor.
+ *
+ * * input: An n-dimensional input tensor.
+ *
+ * Parameters:
+ * * approximate: Whether to use the approximation algorithm.
*
* Outputs:
+ *
* * output: n-dimensional tensor, with the same data type and shape as the input tensor.
*/
OH_NN_OPS_GELU = 56,
-} OH_NN_OperationType;
-
-/**
- * @brief Enumerates the tensor data types.
- *
- * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used
- * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}.
- * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the tensor type.
- * Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set.
- * You need to set the type attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}.
- * The settings of other operator parameters are similar. The enumerated values are named
- * in the format OH_NN_{Operator name}_{Attribute name}.
- *
- * @since 9
- * @version 1.0
- */
-typedef enum {
- /** This enumerated value is used when the tensor is used as the input or output of a model (or operator). */
- OH_NN_TENSOR = 0,
-
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Add operator. */
- OH_NN_ADD_ACTIVATIONTYPE = 1,
- /** This enumerated value is used when the tensor is used as the kernel_size parameter of the AvgPool operator. */
- OH_NN_AVG_POOL_KERNEL_SIZE = 2,
- /** This enumerated value is used when the tensor is used as the stride parameter of the AvgPool operator. */
- OH_NN_AVG_POOL_STRIDE = 3,
- /** This enumerated value is used when the tensor is used as the pad_mode parameter of the AvgPool operator. */
- OH_NN_AVG_POOL_PAD_MODE = 4,
- /** This enumerated value is used when the tensor is used as the pad parameter of the AvgPool operator. */
- OH_NN_AVG_POOL_PAD = 5,
- /** This enumerated value is used when the tensor is used as the activation_type parameter of the AvgPool operator. */
- OH_NN_AVG_POOL_ACTIVATION_TYPE = 6,
+ /**
+ * Unpacks the input tensors base on the given dimension of axis.
+ * Unpacks tensors from input by chipping it along the axis dimension.
+ * For example, given a tensor of shape (A, B, C, D);
+ * If axis == 0, then the i'th tensor in output is the slice value[i, :, :, :],\n
+ * and each tensor in output will have shape (B, C, D).
+ * If axis == 1, then the i'th tensor in output is the slice value[:, i, :, :],\n
+ * and each tensor in output will have shape (A, C, D). Etc.
+ * This is the opposite of OH_NN_OPS_STACK.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * axis: dimension along witch to unpack. Default 0. The range is [-n, n).
+ *
+ * Outputs:
+ *
+ * * output: A tuple of tensors, the shape of each objects is same.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_UNSTACK = 57,
- /** This enumerated value is used when the tensor is used as the eosilon parameter of the BatchNorm operator. */
- OH_NN_BATCH_NORM_EPSILON = 7,
+ /**
+ * Obtains the absolute value of the input tensor.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The absolute value of the input tensor.
+ * The shape and data type is the same as inputs'.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_ABS = 58,
- /** This enumerated value is used when the tensor is used as the blockSize parameter of the BatchToSpaceND operator. */
- OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE = 8,
- /** This enumerated value is used when the tensor is used as the crops parameter of the BatchToSpaceND operator. */
- OH_NN_BATCH_TO_SPACE_ND_CROPS = 9,
+ /**
+ * Computes the Gauss error function of input element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor. The dimension should be less than 8,
+ * and the data type only support OH_NN_FLOAT32 and OH_NN_FLOAT16.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The shape and data type is the same as inputs'.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_ERF = 59,
- /** This enumerated value is used when the tensor is used as the axis parameter of the Concat operator. */
- OH_NN_CONCAT_AXIS = 10,
+ /**
+ * Calculates the exponential of the given input tensor element-wise.
+ * ExpFusion computes outputs by formula output = base ^ (shift + scale * input), for base > 0.
+ * And the base is default set to -1, which means nature logarithm 'e',
+ * and the calculate formula changes to output = exp(shift + scale * input).
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * base: The base of exponential function. Default set to -1 representing nature logarithm 'e'.
+ * Input value must be > 0.
+ * * scale: The amplifcation factor of exponential value, default 1.
+ * * shift: The offset of exponential value, default 0.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The element-wise exponential result of the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_EXP = 60,
- /** This enumerated value is used when the tensor is used as the strides parameter of the Conv2D operator. */
- OH_NN_CONV2D_STRIDES = 11,
- /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2D operator. */
- OH_NN_CONV2D_PAD = 12,
- /** This enumerated value is used when the tensor is used as the dilation parameter of the Conv2D operator. */
- OH_NN_CONV2D_DILATION = 13,
- /** This enumerated value is used when the tensor is used as the padMode parameter of the Conv2D operator. */
- OH_NN_CONV2D_PAD_MODE = 14,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Conv2D operator. */
- OH_NN_CONV2D_ACTIVATION_TYPE = 15,
- /** This enumerated value is used when the tensor is used as the group parameter of the Conv2D operator. */
- OH_NN_CONV2D_GROUP = 16,
+ /**
+ * For input1 and input2, calculate the result of input1[i] < input2[i] for each pair of elements,
+ * where i is the index of each element in the input tensor.
+ *
+ * Inputs:
+ *
+ * * input1: can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * input2: can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or OH_NN_BOOL if input1 is not a tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization
+ * parameters of the output cannot be omitted. However, values of the quantization parameters do not
+ * affect the result.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LESS = 61,
- /** This enumerated value is used when the tensor is used as the strides parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_STRIDES = 17,
- /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_PAD = 18,
- /** This enumerated value is used when the tensor is used as the dilation parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_DILATION = 19,
- /** This enumerated value is used when the tensor is used as the outputPaddings parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS = 20,
- /** This enumerated value is used when the tensor is used as the padMode parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_PAD_MODE = 21,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE = 22,
- /** This enumerated value is used when the tensor is used as the group parameter of the Conv2DTranspose operator. */
- OH_NN_CONV2D_TRANSPOSE_GROUP = 23,
+ /**
+ * Selects output elements from input1 or input2, depending on condition.
+ * If condition is true, choose elements from input1. Otherwise, choose elements from input2 if condition is false.
+ * The three inputs, condition , input1 and input2 must share the same shape.
+ *
+ * Inputs:
+ *
+ * * condition: n-dimensional tensor or scalar.
+ * The condition tensor, decides which element is chosen.
+ * * input1: n-dimensional tensor. First input tensor to be chosen.
+ * If condition is rank 1, input1 may have higher rank, but its first dimension must match the
+ * size of condition.
+ * * input2: n-dimensional tensor. Second input tensor to be chosen.
+ *
+ * Outputs:
+ *
+ * * output: A tensor, has the same shape and data type as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SELECT = 62,
- /** This enumerated value is used when the tensor is used as the strides parameter of the DepthwiseConv2dNative operator. */
- OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES = 24,
- /** This enumerated value is used when the tensor is used as the pad parameter of the DepthwiseConv2dNative operator. */
- OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD = 25,
- /** This enumerated value is used when the tensor is used as the dilation parameter of the DepthwiseConv2dNative operator. */
- OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION = 26,
- /** This enumerated value is used when the tensor is used as the padMode parameter of the DepthwiseConv2dNative operator. */
- OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE = 27,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the DepthwiseConv2dNative operator. */
- OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE = 28,
+ /**
+ * Calculates the square of input tensor element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor, has the same shape and dtype as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SQUARE = 63,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Div operator. */
- OH_NN_DIV_ACTIVATIONTYPE = 29,
+ /**
+ * Flattens the input tensor into a 2D matrix. If input tensor has shape (d_0, d_1, … d_n),
+ * then the output will have shape (d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor. The rank of input should be greater or equal to axis.
+ *
+ * Parameters:
+ *
+ * * axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension
+ * of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor.
+ * Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is
+ * (1, (d_0 X d_1 … d_n)), where the shape of the input tensor is (d_0, d_1, … d_n).
+ *
+ * Outputs:
+ *
+ * * output: 2-dimensional tensor after flattened.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_FLATTEN = 64,
- /** This enumerated value is used when the tensor is used as the mode parameter of the Eltwise operator. */
- OH_NN_ELTWISE_MODE = 30,
+ /**
+ * DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.
+ * This is the reverse transformation of SpaceToDepth. More specifically, this op outputsa copy of the input tensor
+ * where values from the depth dimension are moved in spatial blocks to the height and width dimensions.
+ * By default, mode = DCR. In the DCR mode, elements along the depth dimension from the input tensor are rearranged
+ * in the following order: depth, column, and then row.
+ *
+ * Inputs:
+ *
+ * * input: 4-dimensional tensor with specific format of NHWC or NCHW.
+ * where N is the batch axis, H is the height, W is the width and C is the channel or depth.
+ *
+ * Parameters:
+ *
+ * * blockSize: Blocks of [blocksize, blocksize] are moved.
+ * * mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order.
+ *
+ * Outputs:
+ *
+ * * output: Output tensor of [N, H * blocksize, W * blocksize, C/(blocksize * blocksize)] for NHWC format
+ * or [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] for NCHW format.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_DEPTH_TO_SPACE = 65,
- /** This enumerated value is used when the tensor is used as the axis parameter of the FullConnection operator. */
- OH_NN_FULL_CONNECTION_AXIS = 31,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the FullConnection operator. */
- OH_NN_FULL_CONNECTION_ACTIVATIONTYPE = 32,
+ /**
+ * Generate a tensor containing a sequence of numbers that begin at start\n
+ * and extends by increments of delta up to limit.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * start: Scalar. First entry for the range of output values.
+ * * limit: Scalar. Exclusive upper limit for the range of output values.
+ * * delta: Scalar. Value to step by.
+ *
+ * Outputs:
+ *
+ * * output: 1-dimensional tensor with specific data type containing generated range of values.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_RANGE = 66,
- /** This enumerated value is used when the tensor is used as the transposeA parameter of the Matmul operator. */
- OH_NN_MATMUL_TRANSPOSE_A = 33,
- /** This enumerated value is used when the tensor is used as the transposeB parameter of the Matmul operator. */
- OH_NN_MATMUL_TRANSPOSE_B = 34,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Matmul operator. */
- OH_NN_MATMUL_ACTIVATION_TYPE = 35,
+ /**
+ * Normalize each channel of the input. Make the mean of each channel of the input is 0 and the variance is 1.
+ *
+ * Inputs:
+ *
+ * * input: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W),
+ * where N is the batch size, C is the number of channels, and H and W are the height and the width of
+ * the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the
+ * batch size.
+ * * scale: The input 1-dimensional scale tensor of channel size.
+ * * bias: The input 1-dimensional bias tensor of channel size.
+ *
+ * Parameters:
+ *
+ * * epsilon: The epsilon value to use to avoid division by zero.
+ *
+ * Outputs:
+ *
+ * * output: The output tensor of the same shape as input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_INSTANCE_NORM = 67,
- /** This enumerated value is used when the tensor is used as the kernel_size parameter of the MaxPool operator. */
- OH_NN_MAX_POOL_KERNEL_SIZE = 36,
- /** This enumerated value is used when the tensor is used as the stride parameter of the MaxPool operator. */
- OH_NN_MAX_POOL_STRIDE = 37,
- /** This enumerated value is used when the tensor is used as the pad_mode parameter of the MaxPool operator. */
- OH_NN_MAX_POOL_PAD_MODE = 38,
- /** This enumerated value is used when the tensor is used as the pad parameter of the MaxPool operator. */
- OH_NN_MAX_POOL_PAD = 39,
- /** This enumerated value is used when the tensor is used as the activation_type parameter of the MaxPool operator. */
+ /**
+ * Generate a tensor with given value and shape.
+ *
+ * Inputs:
+ *
+ * * input: 1-dimensional tensor. Indicates the shape of the expected output tensor.
+ * All values must be >= 0.
+ *
+ * Parameters:
+ *
+ * * dataType: The data type of the output tensor.
+ * * value: The value of the output elements.
+ *
+ * Outputs:
+ *
+ * * output: A tensor, has the same shape as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_CONSTANT_OF_SHAPE = 68,
+
+ /**
+ * Broadcast a tensor for a compatiable shape.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * shape: A 1-dimensional Tensor, the shape of the desired output.
+ *
+ * Outputs:
+ *
+ * * output: A tensor after broadcasted.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_BROADCAST_TO = 69,
+
+ /**
+ * For input1 and input2, calculate the result of input1[i] = input2[i] for each pair of elements,
+ * where i is the index of each element in the input tensor.
+ *
+ * Inputs:
+ *
+ * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or OH_NN_BOOL if input1 is not a tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
+ * the quantization output cannot be omitted. However, values of the quantization
+ * parameters do not affect the result.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_EQUAL = 70,
+
+ /**
+ * For input1 and input2, calculate the result of input1[i] > input2[i] for each pair of elements,
+ * where i is the index of each element in the input tensor.
+ *
+ * Inputs:
+ *
+ * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or OH_NN_BOOL if input1 is not a tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
+ * the quantization parameters of the output cannot be omitted. However,
+ * values of the quantization parameters do not affect the result.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_GREATER = 71,
+
+ /**
+ * For input1 and input2, calculate the result of input1[i] != input2[i] for each pair of elements,
+ * where i is the index of each element in the input tensor.
+ *
+ * Inputs:
+ *
+ * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or OH_NN_BOOL if input1 is not a tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
+ * the quantization parameters of the output cannot be omitted. However,
+ * values of the quantization parameters do not affect the result.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_NOT_EQUAL = 72,
+
+ /**
+ * For input1 and input2, calculate the result of input1[i] >= input2[i] for each pair of elements,
+ * where i is the index of each element in the input tensor.
+ *
+ * Inputs:
+ *
+ * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
+ * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor
+ * with the data type of real number or OH_NN_BOOL if input1 is not a tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
+ * the quantization parameters of the output cannot be omitted. However,
+ * values of the quantization parameters do not affect the result.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_GREATER_EQUAL = 73,
+
+ /**
+ * LeakyRelu takes input data (Tensor) and an argument alpha, and produces one output data (Tensor)
+ * where the function f(x) = alpha * x for x < 0, f(x) = x for x >= 0,
+ * is applied to the data tensor elementwise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional input tensor.
+ *
+ * Parameters:
+ *
+ * * negativeSlope: Coefficient of leakage.
+ *
+ * Outputs:
+ *
+ * * output: A tensor, with the same data type and shape as the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LEAKY_RELU = 74,
+
+ /**
+ * Computes an one-layer LSTM. This operator is usually supported via some custom implementation.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor, shape is [seqLen, batchSize, inputSize].
+ * * wIh: Weight tensor of input-layer to hidden-layer,
+ * shape is [numDirections* numLayers, 4 * hiddenSize, inputSize].
+ * * wHh: Weight tensor of hidden-layer to hidden-layer,
+ * shape is [numDirections* numLayers, 4 * hiddenSize, hiddenSize].
+ * * bias: Bias tensor of input-layer and hidden-layer to hidden-layer,
+ * shape is [numDirections* numLayers, 8 * hiddenSize].
+ * * hx: Init state of hidden-layer, shape is [numDirections * numLayers, batchSize, hiddenSize].
+ * * cx: Init state of cell, shape is [numDirections * numLayers, batchSize, hiddenSize].
+ *
+ * Parameters:
+ *
+ * * bidirectional: Whether the LSTM operation is bidirectional.
+ * * hasBias: Whether the operation contains bias.
+ * * inputSize: Size of input tensor.
+ * * hiddenSize: Size of hidden state tensor.
+ * * numLayers: Layers of LSTM network.
+ * * numDirections: Number of directions, value is 2 if direction == bidirectional else 1.
+ * * dropout: Dropout probalility of each layer except first-layer.
+ * * zoneoutCell: Probalility that the cell state retains the previous state. Default: 0.
+ * * zoneoutHidden: Probalility that the hidden state retains the previous state. Default: 0.
+ * * projSize: If projSize > 0, will use LSTM with projections of corresponding size. Default: 0.
+ *
+ * Outputs:
+ *
+ * * output: A tensor that concats all the intermediate output tensor of the hidden,
+ * shape is [seqLen, batchSize, numDirections * realHiddenSize].
+ * * hy: The last output tensor of the hidden-layer,
+ * shape is [numDirections * numLayers, batchSize, realHiddenSize].
+ * * cy: The last output tensor of the cell,
+ * shape is [numDirections * numLayers, batchSize, hiddenSize].
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LSTM = 75,
+
+ /**
+ * Returns a tensor of the same type and shape as input tensor with its value clipped to min and max.
+ * Any values less than min are set to min. Any values greater than max are set to max.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * max: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).
+ * * min: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor., with the same data type and shape as the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_CLIP = 76,
+
+ /**
+ * Determine whether all emements in a given tensor are non-zero. It returns a boolean tensor
+ * where each element is 'True' if corresponding element in the input tensor is non-zero, and 'False' otherwise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor of shape (N,*),
+ * where * indicates any number of additional dimensions.
+ * * aixs: scalar or tensor, indices the dimension to be computed.
+ *
+ * Parameters:
+ *
+ * * keepDims: Whether to keep dimension info.
+ *
+ * Outputs:
+ *
+ * * output: 1-dimension or n-dimension tensor with boolean data type.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_ALL = 77,
+
+ /**
+ * Asserts that the given condition si true.
+ * If condition evalutes to false, print the list of tensors in data.
+ * Summerize determines how many entries of the tensors to print.
+ *
+ * Inputs:
+ *
+ * * condition: The condition to evalute.
+ * * data: The tensors to print out when condition is false.
+ *
+ * Parameters:
+ *
+ * * summarize: The number of entries for each tensor is printed.
+ *
+ * Outputs:
+ *
+ * * output: Result value judged by condition. If the condition is not true, an Error is returned.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_ASSERT = 78,
+
+ /**
+ * Calculates the cosine of the given input tensor, element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The cosine of the input tensor computed element-wise.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_COS = 79,
+
+ /**
+ * Calculates the result of nature logarithm of the input.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor. The value must be greater than 0.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor with the same shape as the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LOG = 80,
+
+ /**
+ * Calculates the logical value of input1 and input2 element-wise.
+ *
+ * Inputs:
+ *
+ * * input1: Tensor of type boolean or convert to boolean implicitly.
+ * * input2: Tensor of type boolean or convert to boolean implicitly.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The calculation result of logical-and
+ * and the numeric type is OH_NN_BOOL.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LOGICAL_AND = 81,
+
+ /**
+ * Calculates the logical value of NOT input element-wise.
+ *
+ * Inputs:
+ *
+ * * input: Tensor of type boolean or convert to boolean implicitly.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The calculation result of logical-not
+ * and the numeric type is OH_NN_BOOL.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LOGICAL_NOT = 82,
+
+ /**
+ * Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
+ * Inputs of input1 and input2 comply with the implicit type conversion rules to make the data types consistent.
+ * The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
+ * both dtypes cannot be bool, and the shapes of them could be broadcast.
+ * When the inputs are one tensor and one scalar, the scalar could only be a constant.
+ *
+ * Inputs:
+ *
+ * * input1: The remainder of the scalar or tensor, numeric or OH_NN_BOOL type,
+ * or the n-dimensional tensor of the numeric dimension numeric type.
+ * * input2: Remainder factor. When the first input is an n-dimensional tensor,
+ * the second input can be a numeric tensor, a OH_NN_BOOL type, or an n-dimensional
+ * tensor of a numeric type dimension, and when the first input is a numeric or OH_NN_BOOL tensor,
+ * the second input must be a tensor of the numeric dimension of the data type.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor. The shape is the same as the input after broadcasting,
+ * and the data type is the data type with the highest accuracy of the two inputs.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_MOD = 83,
+
+ /**
+ * Calculate the opposite value of the input tensor element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor with numeric data type。
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor with the same shape and data type as the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_NEG = 84,
+
+ /**
+ * Calculate reciprocal of a tensor element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor with the same shape and data type as the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_RECIPROCAL = 85,
+
+ /**
+ * Calculate sine of the input element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor the same data type and shape as the input tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SIN = 86,
+
+ /**
+ * Selects elements from input1 or input2 based on condition and returns a tensor.
+ *
+ * Inputs:
+ *
+ * * condition: n-dimensional tensor or scalar. Judging conditions. If the OH_NN_BOOL element
+ * is True, then the element corresponding to the position of input1 is selected, and if the OH_NN_BOOL
+ * element is False, the element corresponding to the position of input2 is selected.
+ * * input1: n-dimensional tensor. First tensor to be chosen.
+ * * input2: n-dimensional tensor. Second tensor to be chosen.
+ *
+ * Outputs:
+ *
+ * * output: n-dimensional tensor with the same shape and data type as the input1 and input2.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_WHERE = 87,
+
+ /**
+ * Converts a sparse tensor into a dense tensor.
+ *
+ * Inputs:
+ *
+ * * indices: 2-dimensional tensor. Position of an ellement in a sparse tensor.
+ * Each element value must be non-negative. The shape is (N, 2).
+ * * values: 1-dimensional tensor. The value corresponding to the location of indices. The shape is (N).
+ * * sparseShape: 2-dimensional tensor. The shape of a sparse tensor. The value consists of
+ * two positive integers, indicating that the shape of the sparse tensor is (N, C).
+ *
+ * Outputs:
+ *
+ * * output: A tensor. The data type is the same as values, and the shape is specified by sparseShape.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SPARSE_TO_DENSE = 88,
+
+ /**
+ * Calculates the logical value of input1 or input2 element-wise.
+ *
+ * Inputs:
+ *
+ * * input1: Tensor of type boolean or convert to boolean implicitly.
+ * * input2: Tensor of type boolean or convert to boolean implicitly.
+ *
+ * Outputs:
+ *
+ * * output: n--dimensional tensor. The calculation result of logical-or
+ * and the numeric type is OH_NN_BOOL.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LOGICAL_OR = 89,
+
+ /**
+ * Returns element-wise smallest integer in not less than input.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor after ceiled.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_CEIL = 90,
+
+ /**
+ * Crop given tensor acrodding to axis and offset.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ * * shape: 1-dimensional tensor, indices cropped windows dimension.
+ *
+ * Parameters:
+ *
+ * * axis: Cropped dimension.
+ * * offset: Cropped offset per dimension.
+ *
+ * Outputs:
+ *
+ * * output: Cropped output tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_CROP = 91,
+
+ /**
+ * The output of the object detection model is post-processed, including decoding the bounding box,
+ * class probability and score of the model output, and then performing non-maximum suppression (NMS)
+ * to remove the overlapping bounding box, and finally outputting the detection result.
+ *
+ * Inputs:
+ *
+ * * bbox: Boxes to be predicted.
+ * * scores: Socres of all boxes.
+ * * anchors: Information of boxes, includes box, variance and coordinates.
+ *
+ * Parameters:
+ * * inputSize: The size of the input tensor.
+ * * scale: The scaling factor used to convert the output from
+ * the normalized form to the original image coordinates.
+ * * nmsIoUThreshold: The threshold of overlapping region during NMS.
+ * * nmsScoreThreshold: The socre threshold used to select target bbox duing NMS.
+ * * maxDetections: Maximum of bboxes per image.
+ * * detectionsPerClass: Maximum of bboxes per class.
+ * * maxClassesPerDetection: Maximum of reserved classes per bboxes.
+ * * numClasses: Number of target classes to be detected.
+ * * useRegularNms: Whether use NMS based on IoU threshold.
+ * * outQuantized: Whether need to quantize.
+ *
+ * Outputs:
+ *
+ * * bboxes: The corrdinates of target detected bboxes.
+ * * classes: The target class index of target detected bboxes.
+ * * confidences: The score of target detected bboxes.
+ * * numDetections: The number of target detected bboxes.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_DETECTION_POST_PROCESS = 92,
+
+ /**
+ * Returns element-wise largest integer not greater than x.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: A tensor after floored.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_FLOOR = 93,
+
+ /**
+ * Calculate the L2-normalize of the input using the given axis.
+ *
+ * Inputs:
+ *
+ * * input: Input to compute the L2-normalization.
+ *
+ * Parameters:
+ *
+ * * axis: The axis on which to apply normalization, -1 means last axis, default: 0.
+ * * epsilon: Value added for numerical stability. default: 1e-6;
+ * * activationType: Activation function type.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same type and shape as input input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_L2_NORMALIZE = 94,
+
+ /**
+ * Computes the log-softmax function to n-dimensional input tensor.
+ * The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * axis: The axis to apply LogSoftmax operation, -1 means the last dimension.
+ *
+ * Outputs:
+ *
+ * * output: Tensor output. Has the same data type and shape as input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LOG_SOFTMAX = 95,
+
+ /**
+ * Normalize over local input regions.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * depthRadius: Half-width of the 1-dimension normalization window.
+ * * bias: Offset.
+ * * alpha: Scale factor.
+ * * beta: Exponent.
+ * * normRegion: Specifies normalization region. Options: "ACROSS_CHNNEL".
+ *
+ * Outputs:
+ *
+ * * output: Result output tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_LRN = 96,
+
+ /**
+ * Calculates the minimum of input1 and input2 element-wise. The inputs of input1 and
+ * input2 comply with the implicit type conversion rules to make the data types are consistent.
+ *
+ * The input must be two tensors or one tensor and one scalar. When the input is two tensors, the data types
+ * cannot be Boolean at the same time, and their shapes can be broadcast to the same size. When the inputs are
+ * one tensor and one scalar, the scalar must be a constant.
+ *
+ * Inputs:
+ *
+ * * input1: n-dimensional tensor, whose data type can be number or Boolean.
+ * * input2: n-dimensional tensor, whose data type can be number or Boolean.
+ *
+ * Outputs:
+ *
+ * * output: Minimum value of the elements of the two tensors.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_MINIMUM = 97,
+
+ /**
+ * Calculate the rank of a tensor.
+ * The rank of a tensor is the number of indices required to uniquely select each element of the tensor.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor. 0-D int32 Tensor representing the rank of input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_RANK = 98,
+
+ /**
+ * Calculates the maximum value for input tensor along the specified dimension. If keepDims is set to
+ * false, the number of dimensions is reduced for the input; if keepDims is set to true,
+ * the number of dimensions is retained.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional input tensor, where n is less than 8.
+ * * axis: dimension used to calculate the maximum value. The value is a 1D tensor.
+ * The value range of each element in axis is [–n, n).
+ *
+ * Parameters:
+ *
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
+ *
+ * Outputs:
+ *
+ * * output: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, mkeepDims is true, m==n.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_REDUCE_MAX = 99,
+
+ /**
+ * Calculates the minimum value for input tensor along the specified dimension. If keepDims is set to
+ * false, the number of dimensions is reduced for the input; if keepDims is set to true,
+ * the number of dimensions is retained.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional input tensor, where n is less than 8.
+ * * axis: dimension used to calculate the minimum value. The value is a 1D tensor.
+ * The value range of each element in axis is [–n, n).
+ *
+ * Parameters:
+ *
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
+ *
+ * Outputs:
+ *
+ * * output: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, mkeepDims is true, m==n.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_REDUCE_MIN = 100,
+
+ /**
+ * Calculates the numerical sum value for input tensor along the specified dimension. If keepDims is set to
+ * false, the number of dimensions is reduced for the input; if keepDims is set to true,
+ * the number of dimensions is retained.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional input tensor, where n is less than 8.
+ * * axis: dimension used to calculate the sum value. The value is a 1D tensor.
+ * The value range of each element in axis is [–n, n).
+ *
+ * Parameters:
+ *
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
+ *
+ * Outputs:
+ *
+ * * output: m-dimensional output tensor whose data type is the same as that of the input.
+ * If keepDims is false, mkeepDims is true, m==n.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_REDUCE_SUM = 101,
+
+ /**
+ * Calculate half to even of a tensor element-wise.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same shape as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_ROUND = 102,
+
+ /**
+ * Scatters a tensor into a new tensor depending on the specified indices.
+ *
+ * Inputs:
+ *
+ * * indices: The index of scattering in the new tensor with int32 or int64 data type.
+ * The rank of indices must be at least 2 and indicesShape[-1] <= len(shape).
+ * * updates: The source tensor to be scattered. It has shape indicesShape[:-1]+shape[indicesShape[-1]:].
+ * * shape: The shape of the output tensor, has the same data type as indices.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same type as update and the same shape as shape.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SCATTER_ND = 103,
+
+ /**
+ * Rearrange blocks of spatial data into depth.
+ * The output tensor’s height dimension is height / blocksize;
+ * The output tensor’s weight dimension is weight / blocksize;
+ * The depth of output tensor is blocksize * blocksize * inputDepth;
+ * The input tensor’s height and width must be divisible by blocksize.
+ *
+ * Inputs:
+ *
+ * * input: 4-dimensional tensor.
+ *
+ * Parameters:
+ *
+ * * blocksize: The block size used to divide spatial data. It must be >= 2.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same dataType as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SPACE_TO_DEPTH = 104,
+
+ /**
+ * Swish activation function
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: Output tensor.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_SWISH = 105,
+
+ /**
+ * Calculates the L2 norm of the input tensor along the specified axis,
+ * replacing other elements of the dimension with the L2 norm value of the specified dimension to
+ * remove the dimension, or to reduce the dimension size to 1. Control whether the dimensions of the
+ * output and input are the same by specifying the keepDims parameter.
+ *
+ * Inputs:
+ *
+ * * input: input tensor.
+ * * axis: Dimensions to perform L2-Norm calculations.
+ *
+ * Parameters:
+ *
+ * * keepDims: indicates whether to retain the dimension. The value is a Boolean value.
+ * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed
+ * until the last axis.
+ * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same dataType as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_REDUCE_L2 = 106,
+
+ /**
+ * HardSigmoid activation function. Calculate the output by element.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same shape and dataType as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_HARD_SIGMOID = 107,
+
+ /**
+ * Gets the element at the location specified by the input tensor according to the index.
+ *
+ * Inputs:
+ *
+ * * input: n-dimensional tensor.
+ * * indices: index tensor.
+ *
+ * Outputs:
+ *
+ * * output: Result tensor with the same shape as the input.
+ *
+ * @since 12
+ */
+ OH_NN_OPS_GATHER_ND = 108,
+} OH_NN_OperationType;
+
+/**
+ * @brief Enumerates the tensor data types.
+ *
+ * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used
+ * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}.
+ * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the
+ * tensor type. Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set.
+ * You need to set the type attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}.
+ * The settings of other operator parameters are similar. The enumerated values are named
+ * in the format OH_NN_{Operator name}_{Attribute name}.
+ *
+ * @since 9
+ * @version 2.0
+ */
+typedef enum {
+ /** This enumerated value is used when the tensor is used as the input or output of a model (or operator). */
+ OH_NN_TENSOR = 0,
+
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Add operator. */
+ OH_NN_ADD_ACTIVATIONTYPE = 1,
+
+ /** This enumerated value is used when the tensor is used as the kernelSize parameter
+ * of the AvgPool operator. */
+ OH_NN_AVG_POOL_KERNEL_SIZE = 2,
+ /** This enumerated value is used when the tensor is used as the stride parameter
+ * of the AvgPool operator. */
+ OH_NN_AVG_POOL_STRIDE = 3,
+ /** This enumerated value is used when the tensor is used as the padMode parameter
+ * of the AvgPool operator. */
+ OH_NN_AVG_POOL_PAD_MODE = 4,
+ /** This enumerated value is used when the tensor is used as the pad parameter of the AvgPool operator. */
+ OH_NN_AVG_POOL_PAD = 5,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the AvgPool operator. */
+ OH_NN_AVG_POOL_ACTIVATION_TYPE = 6,
+
+ /** This enumerated value is used when the tensor is used as the eosilon parameter
+ * of the BatchNorm operator. */
+ OH_NN_BATCH_NORM_EPSILON = 7,
+
+ /** This enumerated value is used when the tensor is used as the blockSize parameter
+ * of the BatchToSpaceND operator. */
+ OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE = 8,
+ /** This enumerated value is used when the tensor is used as the crops parameter
+ * of the BatchToSpaceND operator. */
+ OH_NN_BATCH_TO_SPACE_ND_CROPS = 9,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter of the Concat operator. */
+ OH_NN_CONCAT_AXIS = 10,
+
+ /** This enumerated value is used when the tensor is used as the strides parameter
+ * of the Conv2D operator. */
+ OH_NN_CONV2D_STRIDES = 11,
+ /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2D operator. */
+ OH_NN_CONV2D_PAD = 12,
+ /** This enumerated value is used when the tensor is used as the dilation parameter
+ * of the Conv2D operator. */
+ OH_NN_CONV2D_DILATION = 13,
+ /** This enumerated value is used when the tensor is used as the padMode parameter
+ * of the Conv2D operator. */
+ OH_NN_CONV2D_PAD_MODE = 14,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Conv2D operator. */
+ OH_NN_CONV2D_ACTIVATION_TYPE = 15,
+ /** This enumerated value is used when the tensor is used as the group parameter of the Conv2D operator. */
+ OH_NN_CONV2D_GROUP = 16,
+
+ /** This enumerated value is used when the tensor is used as the strides parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_STRIDES = 17,
+ /** This enumerated value is used when the tensor is used as the pad parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_PAD = 18,
+ /** This enumerated value is used when the tensor is used as the dilation parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_DILATION = 19,
+ /** This enumerated value is used when the tensor is used as the outputPaddings parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS = 20,
+ /** This enumerated value is used when the tensor is used as the padMode parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_PAD_MODE = 21,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE = 22,
+ /** This enumerated value is used when the tensor is used as the group parameter
+ * of the Conv2DTranspose operator. */
+ OH_NN_CONV2D_TRANSPOSE_GROUP = 23,
+
+ /** This enumerated value is used when the tensor is used as the strides parameter
+ * of the DepthwiseConv2dNative operator. */
+ OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES = 24,
+ /** This enumerated value is used when the tensor is used as the pad parameter
+ * of the DepthwiseConv2dNative operator. */
+ OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD = 25,
+ /** This enumerated value is used when the tensor is used as the dilation parameter
+ * of the DepthwiseConv2dNative operator. */
+ OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION = 26,
+ /** This enumerated value is used when the tensor is used as the padMode parameter
+ * of the DepthwiseConv2dNative operator. */
+ OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE = 27,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the DepthwiseConv2dNative operator. */
+ OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE = 28,
+
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Div operator. */
+ OH_NN_DIV_ACTIVATIONTYPE = 29,
+
+ /** This enumerated value is used when the tensor is used as the mode parameter of the Eltwise operator. */
+ OH_NN_ELTWISE_MODE = 30,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter
+ * of the FullConnection operator. */
+ OH_NN_FULL_CONNECTION_AXIS = 31,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the FullConnection operator. */
+ OH_NN_FULL_CONNECTION_ACTIVATIONTYPE = 32,
+
+ /** This enumerated value is used when the tensor is used as the transposeA parameter
+ * of the Matmul operator. */
+ OH_NN_MATMUL_TRANSPOSE_A = 33,
+ /** This enumerated value is used when the tensor is used as the transposeB parameter
+ * of the Matmul operator. */
+ OH_NN_MATMUL_TRANSPOSE_B = 34,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Matmul operator. */
+ OH_NN_MATMUL_ACTIVATION_TYPE = 35,
+
+ /** This enumerated value is used when the tensor is used as the kernelSize parameter
+ * of the MaxPool operator. */
+ OH_NN_MAX_POOL_KERNEL_SIZE = 36,
+ /** This enumerated value is used when the tensor is used as the stride parameter
+ * of the MaxPool operator. */
+ OH_NN_MAX_POOL_STRIDE = 37,
+ /** This enumerated value is used when the tensor is used as the padMode parameter
+ * of the MaxPool operator. */
+ OH_NN_MAX_POOL_PAD_MODE = 38,
+ /** This enumerated value is used when the tensor is used as the pad parameter of the MaxPool operator. */
+ OH_NN_MAX_POOL_PAD = 39,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the MaxPool operator. */
OH_NN_MAX_POOL_ACTIVATION_TYPE = 40,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Mul operator. */
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Mul operator. */
OH_NN_MUL_ACTIVATION_TYPE = 41,
/** This enumerated value is used when the tensor is used as the axis parameter of the OneHot operator. */
OH_NN_ONE_HOT_AXIS = 42,
- /** This enumerated value is used when the tensor is used as the constant_value parameter of the Pad operator. */
+ /** This enumerated value is used when the tensor is used as the constantValue parameter
+ * of the Pad operator. */
OH_NN_PAD_CONSTANT_VALUE = 43,
- /** This enumerated value is used when the tensor is used as the activationType parameter of the Scale operator. */
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the Scale operator. */
OH_NN_SCALE_ACTIVATIONTYPE = 44,
/** This enumerated value is used when the tensor is used as the axis parameter of the Scale operator. */
OH_NN_SCALE_AXIS = 45,
@@ -1698,16 +2944,20 @@ typedef enum {
/** This enumerated value is used when the tensor is used as the axis parameter of the Softmax operator. */
OH_NN_SOFTMAX_AXIS = 46,
- /** This enumerated value is used when the tensor is used as the BlockShape parameter of the SpaceToBatchND operator. */
+ /** This enumerated value is used when the tensor is used as the BlockShape parameter
+ * of the SpaceToBatchND operator. */
OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE = 47,
- /** This enumerated value is used when the tensor is used as the Paddings parameter of the SpaceToBatchND operator. */
+ /** This enumerated value is used when the tensor is used as the Paddings parameter
+ * of the SpaceToBatchND operator. */
OH_NN_SPACE_TO_BATCH_ND_PADDINGS = 48,
/** This enumerated value is used when the tensor is used as the Axis parameter of the Split operator. */
OH_NN_SPLIT_AXIS = 49,
- /** This enumerated value is used when the tensor is used as the OutputNum parameter of the Split operator. */
+ /** This enumerated value is used when the tensor is used as the OutputNum parameter
+ * of the Split operator. */
OH_NN_SPLIT_OUTPUT_NUM = 50,
- /** This enumerated value is used when the tensor is used as the SizeSplits parameter of the Split operator. */
+ /** This enumerated value is used when the tensor is used as the SizeSplits parameter
+ * of the Split operator. */
OH_NN_SPLIT_SIZE_SPLITS = 51,
/** This enumerated value is used when the tensor is used as the Axis parameter of the Squeeze operator. */
@@ -1716,64 +2966,527 @@ typedef enum {
/** This enumerated value is used when the tensor is used as the Axis parameter of the Stack operator. */
OH_NN_STACK_AXIS = 53,
- /** This enumerated value is used when the tensor is used as the BeginMask parameter of the StridedSlice operator. */
+ /** This enumerated value is used when the tensor is used as the BeginMask parameter
+ * of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_BEGIN_MASK = 54,
- /** This enumerated value is used when the tensor is used as the EndMask parameter of the StridedSlice operator. */
+ /** This enumerated value is used when the tensor is used as the EndMask parameter
+ * of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_END_MASK = 55,
- /** This enumerated value is used when the tensor is used as the EllipsisMask parameter of the StridedSlice operator. */
+ /** This enumerated value is used when the tensor is used as the EllipsisMask parameter
+ * of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_ELLIPSIS_MASK = 56,
- /** This enumerated value is used when the tensor is used as the NewAxisMask parameter of the StridedSlice operator. */
+ /** This enumerated value is used when the tensor is used as the NewAxisMask parameter
+ * of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_NEW_AXIS_MASK = 57,
- /** This enumerated value is used when the tensor is used as the ShrinkAxisMask parameter of the StridedSlice operator. */
+ /** This enumerated value is used when the tensor is used as the ShrinkAxisMask parameter
+ * of the StridedSlice operator. */
OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK = 58,
- /** This enumerated value is used when the tensor is used as the ActivationType parameter of the Sub operator. */
+ /** This enumerated value is used when the tensor is used as the ActivationType parameter
+ * of the Sub operator. */
OH_NN_SUB_ACTIVATIONTYPE = 59,
- /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceMean operator. */
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceMean operator. */
OH_NN_REDUCE_MEAN_KEEP_DIMS = 60,
- /** This enumerated value is used when the tensor is used as the new_height parameter of the ResizeBilinear operator. */
+ /** This enumerated value is used when the tensor is used as the newHeight parameter
+ * of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_NEW_HEIGHT = 61,
- /** This enumerated value is used when the tensor is used as the new_width parameter of the ResizeBilinear operator. */
+ /** This enumerated value is used when the tensor is used as the newWidth parameter
+ * of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_NEW_WIDTH = 62,
- /** This enumerated value is used when the tensor is used as the preserve_aspect_ratio parameter of the ResizeBilinear operator. */
+ /** This enumerated value is used when the tensor is used as the preserveAspectRatio parameter
+ * of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO = 63,
- /** This enumerated value is used when the tensor is used as the coordinate_transform_mode parameter of the ResizeBilinear operator. */
+ /** This enumerated value is used when the tensor is used as the coordinateTransformMode parameter
+ * of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE = 64,
- /** This enumerated value is used when the tensor is used as the exclude_outside parameter of the ResizeBilinear operator. */
+ /** This enumerated value is used when the tensor is used as the excludeOutside parameter
+ * of the ResizeBilinear operator. */
OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE = 65,
- /** This enumerated value is used when the tensor is used as the beginNormAxis parameter of the LayerNorm operator. */
+ /** This enumerated value is used when the tensor is used as the beginNormAxis parameter
+ * of the LayerNorm operator. */
OH_NN_LAYER_NORM_BEGIN_NORM_AXIS = 66,
- /** This enumerated value is used when the tensor is used as the epsilon parameter of the LayerNorm operator. */
+ /** This enumerated value is used when the tensor is used as the epsilon parameter
+ * of the LayerNorm operator. */
OH_NN_LAYER_NORM_EPSILON = 67,
- /** This enumerated value is used when the tensor is used as the beginParamsAxis parameter of the LayerNorm operator. */
+ /** This enumerated value is used when the tensor is used as the beginParamsAxis parameter
+ * of the LayerNorm operator. */
OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS = 68,
- /** This enumerated value is used when the tensor is used as the elementwiseAffine parameter of the LayerNorm operator. */
+ /** This enumerated value is used when the tensor is used as the elementwiseAffine parameter
+ * of the LayerNorm operator. */
OH_NN_LAYER_NORM_ELEMENTWISE_AFFINE = 69,
- /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceProd operator. */
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceProd operator. */
OH_NN_REDUCE_PROD_KEEP_DIMS = 70,
- /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceAll operator. */
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceAll operator. */
OH_NN_REDUCE_ALL_KEEP_DIMS = 71,
- /** This enumerated value is used when the tensor is used as the src_t parameter of the QuantDTypeCast operator. */
+ /** This enumerated value is used when the tensor is used as the src_t parameter
+ * of the QuantDTypeCast operator. */
OH_NN_QUANT_DTYPE_CAST_SRC_T = 72,
- /** This enumerated value is used when the tensor is used as the dst_t parameter of the QuantDTypeCast operator. */
+ /** This enumerated value is used when the tensor is used as the dst_t parameter
+ * of the QuantDTypeCast operator. */
OH_NN_QUANT_DTYPE_CAST_DST_T = 73,
- /** This enumerated value is used when the tensor is used as the Sorted parameter of the Topk operator. */
+ /** This enumerated value is used when the tensor is used as the Sorted parameter
+ * of the Topk operator. */
OH_NN_TOP_K_SORTED = 74,
- /** This enumerated value is used when the tensor is used as the axis parameter of the ArgMax operator. */
+ /** This enumerated value is used when the tensor is used as the axis parameter
+ * of the ArgMax operator. */
OH_NN_ARG_MAX_AXIS = 75,
- /** This enumerated value is used when the tensor is used as the keepDims parameter of the ArgMax operator. */
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ArgMax operator. */
OH_NN_ARG_MAX_KEEPDIMS = 76,
- /** This enumerated value is used when the tensor is used as the Axis parameter of the Unsqueeze operator. */
+ /** This enumerated value is used when the tensor is used as the axis parameter
+ * of the Unsqueeze operator. */
OH_NN_UNSQUEEZE_AXIS = 77,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter of the Unstack operator.
+ * @since 12
+ */
+ OH_NN_UNSTACK_AXIS = 78,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter of the Flatten operator.
+ * @since 12
+ */
+ OH_NN_FLATTEN_AXIS = 79,
+
+ /** This enumerated value is used when the tensor is used as the blockSize parameter
+ * of the DepthToSpace operator.
+ * @since 12
+ */
+ OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE = 80,
+ /** This enumerated value is used when the tensor is used as the mode parameter
+ * of the DepthToSpace operator.
+ * @since 12
+ */
+ OH_NN_DEPTH_TO_SPACE_MODE = 81,
+
+ /** This enumerated value is used when the tensor is used as the start parameter of the Range operator.
+ * @since 12
+ */
+ OH_NN_RANGE_START = 82,
+ /** This enumerated value is used when the tensor is used as the limit parameter of the Range operator.
+ * @since 12
+ */
+ OH_NN_RANGE_LIMIT = 83,
+ /** This enumerated value is used when the tensor is used as the delta parameter of the Range operator.
+ * @since 12
+ */
+ OH_NN_RANGE_DELTA = 84,
+
+ /** This enumerated value is used when the tensor is used as the dataType parameter
+ * of the ConstantOfShape operator.
+ * @since 12
+ */
+ OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE = 85,
+ /** This enumerated value is used when the tensor is used as the value parameter
+ * of the ConstantOfShape operator.
+ * @since 12
+ */
+ OH_NN_CONSTANT_OF_SHAPE_VALUE = 86,
+
+ /** This enumerated value is used when the tensor is used as the shape parameter
+ * of the BroadcastTo operator.
+ * @since 12
+ */
+ OH_NN_BROADCAST_TO_SHAPE = 87,
+
+ /** This enumerated value is used when the tensor is used as the epsilon parameter
+ * of the InstanceNorm operator.
+ * @since 12
+ */
+ OH_NN_INSTANCE_NORM_EPSILON = 88,
+
+ /** This enumerated value is used when the tensor is used as the base parameter of the Exp operator.
+ * @since 12
+ */
+ OH_NN_EXP_BASE = 89,
+ /** This enumerated value is used when the tensor is used as the scale parameter of the Exp operator.
+ * @since 12
+ */
+ OH_NN_EXP_SCALE = 90,
+ /** This enumerated value is used when the tensor is used as the shift parameter of the Exp operator.
+ * @since 12
+ */
+ OH_NN_EXP_SHIFT = 91,
+
+ /** This enumerated value is used when the tensor is used as the negativeSlope parameter
+ * of the LeakyRelu operator.
+ * @since 12
+ */
+ OH_NN_LEAKY_RELU_NEGATIVE_SLOPE = 92,
+
+ /** This enumerated value is used when the tensor is used as the bidirectional parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_BIDIRECTIONAL = 93,
+ /** This enumerated value is used when the tensor is used as the hasBias parameter of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_HAS_BIAS = 94,
+ /** This enumerated value is used when the tensor is used as the inputSize parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_INPUT_SIZE = 95,
+ /** This enumerated value is used when the tensor is used as the hiddenSize parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_HIDDEN_SIZE = 96,
+ /** This enumerated value is used when the tensor is used as the numLayers parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_NUM_LAYERS = 97,
+ /** This enumerated value is used when the tensor is used as the numDirections parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_NUM_DIRECTIONS = 98,
+ /** This enumerated value is used when the tensor is used as the dropout parameter of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_DROPOUT = 99,
+ /** This enumerated value is used when the tensor is used as the zoneoutCell parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_ZONEOUT_CELL = 100,
+ /** This enumerated value is used when the tensor is used as the zoneoutHidden parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_ZONEOUT_HIDDEN = 101,
+ /** This enumerated value is used when the tensor is used as the projSize parameter
+ * of the LSTM operator.
+ * @since 12
+ */
+ OH_NN_LSTM_PROJ_SIZE = 102,
+
+ /** This enumerated value is used when the tensor is used as the max parameter of the Clip operator.
+ * @since 12
+ */
+ OH_NN_CLIP_MAX = 103,
+ /** This enumerated value is used when the tensor is used as the min parameter of the Clip operator.
+ * @since 12
+ */
+ OH_NN_CLIP_MIN = 104,
+
+ /** This enumerated value is used when the tensor is used as the keepDims parameter of the All operator.
+ * @since 12
+ */
+ OH_NN_ALL_KEEP_DIMS = 105,
+
+ /** This enumerated value is used when the tensor is used as the summarize parameter
+ * of the Assert operator.
+ * @since 12
+ */
+ OH_NN_ASSERT_SUMMARIZE = 106,
+
+ /** This enumerated value is used when the tensor is used as the scale parameter of the pow operator.
+ * @since 12
+ */
+ OH_NN_POW_SCALE = 107,
+ /** This enumerated value is used when the tensor is used as the shift parameter of the pow operator.
+ * @since 12
+ */
+ OH_NN_POW_SHIFT = 108,
+
+ /** This enumerated value is used when the tensor is used as the roundMode parameter
+ * of the AvgPool operator.
+ * @since 12
+ */
+ OH_NN_AVG_POOL_ROUND_MODE = 109,
+ /** This enumerated value is used when the tensor is used as the global parameter
+ * of the AvgPool operator.
+ * @since 12
+ */
+ OH_NN_AVG_POOL_GLOBAL = 110,
+
+ /** This enumerated value is used when the tensor is used as the hasBias parameter
+ * of the FullConnection operator.
+ * @since 12
+ */
+ OH_NN_FULL_CONNECTION_HAS_BIAS = 111,
+ /** This enumerated value is used when the tensor is used as the useAxis parameter
+ * of the FullConnection operator.
+ * @since 12
+ */
+ OH_NN_FULL_CONNECTION_USE_AXIS = 112,
+
+ /** This enumerated value is used when the tensor is used as the approximate parameter
+ * of the GeLU operator.
+ * @since 12
+ */
+ OH_NN_GELU_APPROXIMATE = 113,
+
+ /** This enumerated value is used when the tensor is used as the roundMode parameter
+ * of the MaxPool operator.
+ * @since 12
+ */
+ OH_NN_MAX_POOL_ROUND_MODE = 114,
+ /** This enumerated value is used when the tensor is used as the global parameter
+ * of the MaxPool operator.
+ * @since 12
+ */
+ OH_NN_MAX_POOL_GLOBAL = 115,
+
+ /** This enumerated value is used when the tensor is used as the paddingMode parameter
+ * of the Pad operator.
+ * @since 12
+ */
+ OH_NN_PAD_PADDING_MODE = 116,
+
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceMean operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MEAN_REDUCE_TO_END = 117,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceMean operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MEAN_COEFF = 118,
+
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceProd operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_PROD_REDUCE_TO_END = 119,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceProd operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_PROD_COEFF = 120,
+
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceAll operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_ALL_REDUCE_TO_END = 121,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceAll operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_ALL_COEFF = 122,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter
+ * of the Topk operator.
+ * @since 12
+ */
+ OH_NN_TOP_K_AXIS = 123,
+
+ /** This enumerated value is used when the tensor is used as the topK parameter
+ * of the ArgMax operator.
+ * @since 12
+ */
+ OH_NN_ARG_MAX_TOP_K = 124,
+ /** This enumerated value is used when the tensor is used as the outMaxValue parameter
+ * of the ArgMax operator.
+ * @since 12
+ */
+ OH_NN_ARG_MAX_OUT_MAX_VALUE = 125,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter
+ * of the QuantDTypeCast operator.
+ * @since 12
+ */
+ OH_NN_QUANT_DTYPE_CAST_AXIS = 126,
+
+ /** This enumerated value is used when the tensor is used as the axes parameter of the Slice operator.
+ * @since 12
+ */
+ OH_NN_SLICE_AXES = 127,
+
+ /** This enumerated value is used when the tensor is used as the dims parameter of the Tile operator.
+ * @since 12
+ */
+ OH_NN_TILE_DIMS = 128,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter of the crop operator.
+ * @since 12
+ */
+ OH_NN_CROP_AXIS = 129,
+ /** This enumerated value is used when the tensor is used as the offset parameter of the crop operator.
+ * @since 12
+ */
+ OH_NN_CROP_OFFSET = 130,
+
+ /** This enumerated value is used when the tensor is used as the inputSize parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE = 131,
+ /** This enumerated value is used when the tensor is used as the scale parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_SCALE = 132,
+ /** This enumerated value is used when the tensor is used as the nmsIoUThreshold
+ * parameter of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD = 133,
+ /** This enumerated value is used when the tensor is used as the nmsScoreThreshold parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD = 134,
+ /** This enumerated value is used when the tensor is used as the maxDetections parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS = 135,
+ /** This enumerated value is used when the tensor is used as the detectionsPerClass parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS = 136,
+ /** This enumerated value is used when the tensor is used as the maxClassesPerDetection parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION = 137,
+ /** This enumerated value is used when the tensor is used as the numClasses parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES = 138,
+ /** This enumerated value is used when the tensor is used as the useRegularNms parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS = 139,
+ /** This enumerated value is used when the tensor is used as the outQuantized parameter
+ * of the detectionPostProcess operator.
+ * @since 12
+ */
+ OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED = 140,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter
+ * of the L2Normalize operator.
+ * @since 12
+ */
+ OH_NN_L2_NORMALIZE_AXIS = 141,
+ /** This enumerated value is used when the tensor is used as the epsilon parameter
+ * of the L2Normalize operator.
+ * @since 12
+ */
+ OH_NN_L2_NORMALIZE_EPSILON = 142,
+ /** This enumerated value is used when the tensor is used as the activationType parameter
+ * of the L2Normalize operator.
+ * @since 12
+ */
+ OH_NN_L2_NORMALIZE_ACTIVATION_TYPE = 143,
+
+ /** This enumerated value is used when the tensor is used as the axis parameter of the softmax operator.
+ * @since 12
+ */
+ OH_NN_LOG_SOFTMAX_AXIS = 144,
+
+ /** This enumerated value is used when the tensor is used as the depthRedius
+ * parameter of the LRN operator.
+ * @since 12
+ */
+ OH_NN_LRN_DEPTH_RADIUS = 145,
+ /** This enumerated value is used when the tensor is used as the bias parameter of the LRN operator.
+ * @since 12
+ */
+ OH_NN_LRN_BIAS = 146,
+ /** This enumerated value is used when the tensor is used as the alpha parameter of the LRN operator.
+ * @since 12
+ */
+ OH_NN_LRN_ALPHA = 147,
+ /** This enumerated value is used when the tensor is used as the beta parameter of the LRN operator.
+ * @since 12
+ */
+ OH_NN_LRN_BETA = 148,
+ /** This enumerated value is used when the tensor is used as the normRegion parameter
+ * of the LRN operator.
+ * @since 12
+ */
+ OH_NN_LRN_NORM_REGION = 149,
+
+ /** This enumerated value is used when the tensor is used as the blockSize parameter
+ * of the spaceToDepth operator.
+ * @since 12
+ */
+ OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE = 150,
+
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceMax operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MAX_KEEP_DIMS = 151,
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceMax operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MAX_REDUCE_TO_END = 152,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceMax operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MAX_COEFF = 153,
+
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceMin operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MIN_KEEP_DIMS = 154,
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceMin operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MIN_REDUCE_TO_END = 155,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceMin operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_MIN_COEFF = 156,
+
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceSum operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_SUM_KEEP_DIMS = 157,
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceSum operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_SUM_REDUCE_TO_END = 158,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceSum operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_SUM_COEFF = 159,
+
+ /** This enumerated value is used when the tensor is used as the keepDims parameter
+ * of the ReduceL2 operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_L2_KEEP_DIMS = 160,
+ /** This enumerated value is used when the tensor is used as the reduceToEnd parameter
+ * of the ReduceL2 operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_L2_REDUCE_TO_END = 161,
+ /** This enumerated value is used when the tensor is used as the coeff parameter
+ * of the ReduceL2 operator.
+ * @since 12
+ */
+ OH_NN_REDUCE_L2_COEFF = 162,
} OH_NN_TensorType;
/**
@@ -1792,16 +3505,18 @@ typedef struct OH_NN_UInt32Array {
/**
* @brief Quantization information.
*
- * In quantization scenarios, the 32-bit floating-point data type is quantized into the fixed-point data type according to the following formula:
+ * In quantization scenarios, the 32-bit floating-point data type is quantized into
+ * the fixed-point data type according to the following formula:
\f[
q = clamp(round(\frac{r}{s}+z), q_{min}, q_{max})
\f]
- * s and z are quantization parameters, which are stored by scale and zeroPoint in {@link OH_NN_QuantParam}.
- * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and
+ * s and z are quantization parameters, which are stored by scale and zeroPoint
+ * in {@link OH_NN_QuantParam}.
+ * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and
* q_max is an upper bound of a quantization result. The calculation method is as follows:
- *
+ *
\f[
- \text{clamp}(x,min,max) =
+ \text{clamp}(x,min,max) =
\begin{cases}
q_{min} = -(1 << (numBits - 1)) \\
q_{max} = (1 << (numBits - 1)) \\
@@ -1809,24 +3524,25 @@ typedef struct OH_NN_UInt32Array {
\f]
* The clamp function is defined as follows:
\f[
- \text{clamp}(x,min,max) =
+ \text{clamp}(x,min,max) =
\begin{cases}
\text{max} & \text{ if } x > \text{ max } \\
\text{min} & \text{ if } x < \text{ min } \\
x & \text{ otherwise } \\
\end{cases}
\f]
- *
+ *
* @deprecated since 11
* @useinstead {@link NN_QuantParam}
* @since 9
* @version 1.0
*/
typedef struct OH_NN_QuantParam {
- /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario,
- * quantCount is usually set to 1. That is, all channels of a tensor share a set of quantization parameters.
- * In the per-channel quantization scenario, quantCount is usually the same as the number of tensor channels,
- * and each channel uses its own quantization parameters.
+ /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario,
+ * quantCount is usually set to 1.
+ * That is, all channels of a tensor share a set of quantization parameters.
+ * In the per-channel quantization scenario, quantCount is usually the same as the number of tensor
+ * channels, and each channel uses its own quantization parameters.
*/
uint32_t quantCount;
/** Number of quantization bits */
@@ -1857,9 +3573,10 @@ typedef struct OH_NN_Tensor {
const int32_t *dimensions;
/** Quantization information of the specified tensor. The data type must be {@link OH_NN_QuantParam}. */
const OH_NN_QuantParam *quantParam;
- /** Specifies the tensor type. The value of type is related to the tensor usage.
+ /** Specifies the tensor type. The value of type is related to the tensor usage.
* When the tensor is used as the input or output of the model, set type to {@link OH_NN_TENSOR}.
- * When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR} from {@link OH_NN_TensorType}.
+ * When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR}
+ * from {@link OH_NN_TensorType}.
*/
OH_NN_TensorType type;
} OH_NN_Tensor;