From b9cd14aea416f5b8280a647f515971eee96bebd6 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Tue, 2 Jan 2024 19:31:44 +0800 Subject: [PATCH 1/5] change 1.0 Signed-off-by: w30052974 --- .../neural_network_runtime.h | 180 +++-- .../neural_network_runtime_type.h | 706 +++++++++++------- 2 files changed, 533 insertions(+), 353 deletions(-) diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h index ef9552d..2d7cdf2 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h @@ -26,8 +26,8 @@ /** * @file neural_network_runtime.h * - * @brief Defines the Neural Network Runtime APIs. The AI inference framework uses the Native APIs provided by Neural Network Runtime - * to construct models. + * @brief Defines the Neural Network Runtime APIs. The AI inference framework uses the Native APIs provided + * by Neural Network Runtime to construct models. * * Note: Currently, the APIs of Neural Network Runtime do not support multi-thread calling. \n * @@ -52,9 +52,10 @@ extern "C" { /** * @brief Creates a {@link NN_QuantParam} instance. * - * After the {@link NN_QuantParam} instance is created, call {@link OH_NNQuantParam_SetScales}, {@link OH_NNQuantParam_SetZeroPoints}, - * {@link OH_NNQuantParam_SetNumBits} to set its attributes, and then call {@link OH_NNModel_SetTensorQuantParams} to set it - * to a tensor. After that you should destroy it by calling {@link OH_NNQuantParam_Destroy} to avoid memory leak. \n + * After the {@link NN_QuantParam} instance is created, call {@link OH_NNQuantParam_SetScales}, + * {@link OH_NNQuantParam_SetZeroPoints}, {@link OH_NNQuantParam_SetNumBits} to set its attributes, + * and then call {@link OH_NNModel_SetTensorQuantParams} to set it to a tensor. + * After that you should destroy it by calling {@link OH_NNQuantParam_Destroy} to avoid memory leak. \n * * @return Pointer to a {@link NN_QuantParam} instance, or NULL if it fails to create. * @since 11 @@ -94,7 +95,8 @@ OH_NN_ReturnCode OH_NNQuantParam_SetScales(NN_QuantParam *quantParams, const dou * @since 11 * @version 1.0 */ -OH_NN_ReturnCode OH_NNQuantParam_SetZeroPoints(NN_QuantParam *quantParams, const int32_t *zeroPoints, size_t quantCount); +OH_NN_ReturnCode OH_NNQuantParam_SetZeroPoints(NN_QuantParam *quantParams, + const int32_t *zeroPoints, size_t quantCount); /** * @brief Sets the number bits of the {@link NN_QuantParam} instance. @@ -116,28 +118,33 @@ OH_NN_ReturnCode OH_NNQuantParam_SetNumBits(NN_QuantParam *quantParams, const ui /** * @brief Releases a {@link NN_QuantParam} instance. * - * The {@link NN_QuantParam} instance needs to be released to avoid memory leak after it is set to a {@link NN_TensorDesc}. \n + * The {@link NN_QuantParam} instance needs to be released to avoid memory leak + * after it is set to a {@link NN_TensorDesc}. \n * - * If quantParams or *quantParams is a null pointer, this method only prints warning logs and does not + * If quantParams or *quantParams is a null pointer, this method only prints warning logs and does not * execute the release. \n * * @param quantParams Double pointer to the {@link NN_QuantParam} instance. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ OH_NN_ReturnCode OH_NNQuantParam_Destroy(NN_QuantParam **quantParams); /** - * @brief Creates a model instance of the {@link OH_NNModel} type and uses other APIs provided by OH_NNModel to construct the model instance. + * @brief Creates a model instance of the {@link OH_NNModel} type + * and uses other APIs provided by OH_NNModel to construct the model instance. * - * Before composition, call {@link OH_NNModel_Construct} to create a model instance. Based on the model topology, - * call the {@link OH_NNModel_AddTensorToModel}, {@link OH_NNModel_AddOperation}, and {@link OH_NNModel_SetTensorData} methods - * to fill in the data and operator nodes of the model, and then call {@link OH_NNModel_SpecifyInputsAndOutputs} to specify the inputs and outputs of the model. + * Before composition, call {@link OH_NNModel_Construct} to create a model instance. Based on the model topology, + * call the {@link OH_NNModel_AddTensorToModel}, {@link OH_NNModel_AddOperation}, + * and {@link OH_NNModel_SetTensorData} methods to fill in the data and operator nodes of the model, + * and then call {@link OH_NNModel_SpecifyInputsAndOutputs} to specify the inputs and outputs of the model. * After the model topology is constructed, call {@link OH_NNModel_Finish} to build the model. \n * - * After a model instance is no longer used, you need to destroy it by calling {@link OH_NNModel_Destroy} to avoid memory leak. \n + * After a model instance is no longer used, + * you need to destroy it by calling {@link OH_NNModel_Destroy} to avoid memory leak. \n * * @return Pointer to a {@link OH_NNModel} instance, or NULL if it fails to create. * @since 9 @@ -148,20 +155,26 @@ OH_NNModel *OH_NNModel_Construct(void); /** * @brief Adds a tensor to the model instance. * - * The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model. - * This method is used to add tensors to a model instance based on the tensorDesc parameter with type of {@link NN_TensorDesc}. - * {@link NN_TensorDesc} contains some attributes such as shape, format, data type and provides corresponding APIs to access them. - * The order of adding tensors is specified by the indices recorded in the model. The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation}, + * The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model. + * This method is used to add tensors to a model instance based on the tensorDesc parameter + * with type of {@link NN_TensorDesc}. {@link NN_TensorDesc} contains some attributes such as shape, + * format, data type and provides corresponding APIs to access them. + * The order of adding tensors is specified by the indices recorded in the model. + * The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation}, * and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specify tensors based on the indices. \n * - * Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic shape, - * you need to set the dimensions that support dynamic changes to -1. - * For example, if the shape of a four-dimensional tensor is set to [1, -1, 2, 2], the second dimension supports dynamic changes. \n + * Neural Network Runtime supports inputs and outputs of the dynamic shape. + * When adding a data node with a dynamic shape, + * you need to set the dimensions that support dynamic changes to -1. + * For example, if the shape of a four-dimensional tensor is set to [1, -1, 2, 2], + * the second dimension supports dynamic changes. \n * * @param model Pointer to the {@link OH_NNModel} instance. - * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. The tensor descriptor specifies the attributes of the tensor added to the model instance. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. + * The tensor descriptor specifies the attributes of the tensor added to the model instance. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -170,8 +183,8 @@ OH_NN_ReturnCode OH_NNModel_AddTensorToModel(OH_NNModel *model, const NN_TensorD /** * @brief Sets the tensor value. * - * For tensors with constant values (such as model weights), you need to use this method to set their data. - * The index of a tensor is determined by the order in which the tensor is added to the model. + * For tensors with constant values (such as model weights), you need to use this method to set their data. + * The index of a tensor is determined by the order in which the tensor is added to the model. * For details about how to add a tensor, see {@link OH_NNModel_AddTensorToModel}. \n * * @param model Pointer to the {@link OH_NNModel} instance. @@ -179,7 +192,8 @@ OH_NN_ReturnCode OH_NNModel_AddTensorToModel(OH_NNModel *model, const NN_TensorD * @param dataBuffer Pointer to real data. * @param length Length of the data buffer. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -191,8 +205,9 @@ OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, uint32_t index, con * @param model Pointer to the {@link OH_NNModel} instance. * @param index Index of a tensor. * @param quantParam Pointer to the quantization parameter instance. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -204,8 +219,9 @@ OH_NN_ReturnCode OH_NNModel_SetTensorQuantParams(OH_NNModel *model, uint32_t ind * @param model Pointer to the {@link OH_NNModel} instance. * @param index Index of a tensor. * @param tensorType Tensor type of {@link OH_NN_TensorType}. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -214,27 +230,31 @@ OH_NN_ReturnCode OH_NNModel_SetTensorType(OH_NNModel *model, uint32_t index, OH_ /** * @brief Adds an operator to a model instance. * - * This method is used to add an operator to a model instance. The operator type is specified by op, and - * the operator parameters, inputs, and outputs are specified by paramIndices, inputIndices, and outputIndices respectively. - * This method verifies the attributes of operator parameters and the number of input and output parameters. - * These attributes must be correctly set when {@link OH_NNModel_AddTensorToModel} is called to add tensors. - * For details about the expected parameters, input attributes, and output attributes of each operator, see {@link OH_NN_OperationType}. \n + * This method is used to add an operator to a model instance. The operator type is specified by op, and + * the operator parameters, inputs, and outputs are specified by paramIndices, inputIndices, + * and outputIndices respectively. + * This method verifies the attributes of operator parameters and the number of input and output parameters. + * These attributes must be correctly set when {@link OH_NNModel_AddTensorToModel} is called to add tensors. + * For details about the expected parameters, input attributes, and output attributes of each operator, + * see {@link OH_NN_OperationType}. \n * - * paramIndices, inputIndices, and outputIndices store the indices of tensors. - * The indices are determined by the order in which tensors are added to the model. + * paramIndices, inputIndices, and outputIndices store the indices of tensors. + * The indices are determined by the order in which tensors are added to the model. * For details about how to add a tensor, see {@link OH_NNModel_AddTensorToModel}. \n * - * If unnecessary parameters are added when adding an operator, this method returns {@link OH_NN_INVALID_PARAMETER}. - * If no operator parameter is set, the operator uses the default parameter value. + * If unnecessary parameters are added when adding an operator, this method returns {@link OH_NN_INVALID_PARAMETER}. + * If no operator parameter is set, the operator uses the default parameter value. * For details about the default values, see {@link OH_NN_OperationType}. \n * * @param model Pointer to the {@link OH_NNModel} instance. - * @param op Specifies the type of an operator to be added. For details, see the enumerated values of {@link OH_NN_OperationType}. + * @param op Specifies the type of an operator to be added. For details, + * see the enumerated values of {@link OH_NN_OperationType}. * @param paramIndices Pointer to the OH_NN_UInt32Array instance, which is used to set operator parameters. * @param inputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator input. * @param outputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator output. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -247,10 +267,10 @@ OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model, /** * @brief Specifies the inputs and outputs of a model. * - * A tensor must be specified as the end-to-end inputs and outputs of a model instance. This type of tensor cannot be set - * using {@link OH_NNModel_SetTensorData}. \n + * A tensor must be specified as the end-to-end inputs and outputs of a model instance. + * This type of tensor cannot be set using {@link OH_NNModel_SetTensorData}. \n * - * The index of a tensor is determined by the order in which the tensor is added to the model. + * The index of a tensor is determined by the order in which the tensor is added to the model. * For details about how to add a tensor, see {@link OH_NNModel_AddTensorToModel}. \n * * Currently, the model inputs and outputs cannot be set asynchronously. \n @@ -258,8 +278,9 @@ OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model, * @param model Pointer to the {@link OH_NNModel} instance. * @param inputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator input. * @param outputIndices Pointer to the OH_NN_UInt32Array instance, which is used to set the operator output. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -270,17 +291,19 @@ OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model, /** * @brief Completes model composition. * - * After the model topology is set up, call this method to indicate that the composition is complete. After this method is called, - * additional composition operations cannot be performed. If {@link OH_NNModel_AddTensorToModel}, {@link OH_NNModel_AddOperation}, - * {@link OH_NNModel_SetTensorData}, and {@link OH_NNModel_SpecifyInputsAndOutputs} are called, + * After the model topology is set up, call this method to indicate that the composition is complete. + * After this method is called, additional composition operations cannot be performed. + * If {@link OH_NNModel_AddTensorToModel}, {@link OH_NNModel_AddOperation}, + * {@link OH_NNModel_SetTensorData}, and {@link OH_NNModel_SpecifyInputsAndOutputs} are called, * {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n * - * Before calling {@link OH_NNModel_GetAvailableOperations} and {@link OH_NNCompilation_Construct}, + * Before calling {@link OH_NNModel_GetAvailableOperations} and {@link OH_NNCompilation_Construct}, * you must call this method to complete composition. \n * * @param model Pointer to the {@link OH_NNModel} instance. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -289,34 +312,42 @@ OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model); /** * @brief Releases a model instance. * - * This method needs to be called to release the model instance created by calling {@link OH_NNModel_Construct}. Otherwise, memory leak will occur. \n + * This method needs to be called to release the model instance created by calling {@link OH_NNModel_Construct}. + * Otherwise, memory leak will occur. \n * - * If model or *model is a null pointer, this method only prints warning logs and does not execute the release. \n + * If model or *model is a null pointer, + * this method only prints warning logs and does not execute the release. \n * - * @param model Double pointer to the {@link OH_NNModel} instance. After a model instance is destroyed, this method sets *model to a null pointer. + * @param model Double pointer to the {@link OH_NNModel} instance. After a model instance is destroyed, + * this method sets *model to a null pointer. * @since 9 * @version 1.0 */ void OH_NNModel_Destroy(OH_NNModel **model); /** - * @brief Queries whether the device supports operators in the model. The support status is indicated by the Boolean value. + * @brief Queries whether the device supports operators in the model. + * The support status is indicated by the Boolean value. * - * Queries whether underlying device supports operators in a model instance. The device is specified by deviceID, - * and the result is represented by the array pointed by isSupported. If the ith operator is supported, - * the value of (*isSupported)[i] is true. Otherwise, the value is false. \n + * Queries whether underlying device supports operators in a model instance. The device is specified by + * deviceID, and the result is represented by the array pointed by isSupported. + * If the ith operator is supported, the value of (*isSupported)[i] is true. + * Otherwise, the value is false. \n * - * After this method is successfully executed, (*isSupported) points to the bool array that records the operator support status. - * The operator quantity for the array length is the same as that for the model instance. The memory corresponding to this array is - * managed by Neural Network Runtime and is automatically destroyed after the model instance is destroyed or this method is called again. \n + * After this method is successfully executed, (*isSupported) points to the bool array that records the operator + * support status. The operator quantity for the array length is the same as that for the model instance. + * The memory corresponding to this array is managed by Neural Network Runtime and is automatically destroyed + * after the model instance is destroyed or this method is called again. \n * * @param model Pointer to the {@link OH_NNModel} instance. * @param deviceID Device ID to be queried, which can be obtained by using {@link OH_NNDevice_GetAllDevicesID}. - * @param isSupported Pointer to the bool array. When this method is called, (*isSupported) must be a null pointer. - * Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned. - * @param opCount Number of operators in a model instance, corresponding to the length of the (*isSupported) array. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @param isSupported Pointer to the bool array. When this method is called, (*isSupported) must be + * a null pointer. Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned. + * @param opCount Number of operators in a model instance, corresponding to the length of the + * (*isSupported) array. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. For details about the error codes, + * see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -335,7 +366,7 @@ OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model, * and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specifies tensors based on the index value.\n * * Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic - * shape, you need to set the dimensions that support dynamic changes in tensor.dimensions to -1. + * shape, you need to set the dimensions that support dynamic changes in tensor.dimensions to -1. * For example, if tensor.dimensions of a four-dimensional tensor is set to [1, -1, 2, 2], * the second dimension supports dynamic changes.\n * @@ -405,7 +436,7 @@ OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor, * results based on the actual situation.\n * * - If the buffer length is greater than or equal to the data length, the inference result is copied to the buffer and - * {@link OH_NN_SUCCESS} is returned. You can read the inference result from dataBuffer. + * {@link OH_NN_SUCCESS} is returned. You can read the inference result from dataBuffer. * - If the buffer length is smaller than the data length, {@link OH_NNExecutor_Run} returns * {@link OH_NN_INVALID_PARAMETER} and generates a log indicating that the buffer is too small.\n * @@ -579,8 +610,9 @@ OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor, * * In scenarios where memory needs to be managed by yourself, this method binds the execution output to the * {@link OH_NN_Memory} memory instance. When computing is performed, the underlying hardware directly writes the - * computing result to the shared memory to which the memory instance points. By using this method, concurrent execution - * of input setting, computing, and read can be implemented to improve inference efficiency of a data flow.\n + * computing result to the shared memory to which the memory instance points. + * By using this method, concurrent execution of input setting, + * computing, and read can be implemented to improve inference efficiency of a data flow.\n * * @param executor Executor. * @param outputIndex Output Index value, which is in the same sequence of the data output when diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index 93f34ac..fed062b 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -143,7 +143,8 @@ typedef enum { OH_NN_FAILED = 1, /** Invalid parameter. */ OH_NN_INVALID_PARAMETER = 2, - /** Memory-related error, for example, insufficient memory, memory data copy failure, or memory application failure. */ + /** Memory-related error, for example, insufficient memory, memory data copy failure, + * or memory application failure. */ OH_NN_MEMORY_ERROR = 3, /** Invalid operation. */ OH_NN_OPERATION_FORBIDDEN = 4, @@ -151,22 +152,22 @@ typedef enum { OH_NN_NULL_PTR = 5, /** Invalid file. */ OH_NN_INVALID_FILE = 6, - /** A hardware error occurs, for example, HDL service crash. + /** A hardware error occurs, for example, HDL service crash. * @deprecated since 11 * @useinstead {@link OH_NN_UNAVAILABLE_DEVICE} */ OH_NN_UNAVALIDABLE_DEVICE = 7, /** Invalid path. */ OH_NN_INVALID_PATH = 8, - /** Timeout. + /** Timeout. * @since 11 */ OH_NN_TIMEOUT = 9, - /** Unsupported. + /** Unsupported. * @since 11 */ OH_NN_UNSUPPORTED = 10, - /** Connection Exception. + /** Connection Exception. * @since 11 */ OH_NN_CONNECTION_EXCEPTION = 11, @@ -178,7 +179,7 @@ typedef enum { * @since 11 */ OH_NN_DYNAMIC_SHAPE = 13, - /** A hardware error occurs, for example, HDL service crash. + /** A hardware error occurs, for example, HDL service crash. * @since 11 */ OH_NN_UNAVAILABLE_DEVICE = 14 @@ -340,7 +341,8 @@ typedef enum { OH_NN_OPS_ADD = 1, /** - * Apply 2D average pooling to the input tensor, which now must be in NHWC format. The int8 quantization input is supported. + * Apply 2D average pooling to the input tensor, which now must be in NHWC format. + * The int8 quantization input is supported. * * If the input contains the padMode parameter: * @@ -350,16 +352,20 @@ typedef enum { * * Parameters: * - * * kernelSize indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width]. + * * kernelSize indicates the kernel size used to obtain the average value. + * It is an int array [kernel_height, kernel_width]. * The first number indicates the kernel height, and the second number indicates the kernel width. - * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width]. - * The first number indicates the moving step in height, and the second number indicates the moving step in width. - * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same) or 1 (valid). - * The nearest neighbor value is used for padding. - * 0 (same): The height and width of the output are the same as those of the input. - * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible. + * * strides indicates the distance of kernel moving. The value is an int array + * [stride_height, stride_width]. The first number indicates the moving step in height, + * and the second number indicates the moving step in width. + * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same) + * or 1 (valid). The nearest neighbor value is used for padding. + * 0 (same): The height and width of the output are the same as those of the input. + * The total and padding quantity is calculated horizontally and vertically and + * evenly distributed to the top, bottom, left, right if possible. * Otherwise, the last additional padding will be completed from the bottom and right. - * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. Excessive pixels will be discarded. + * 1 (valid): The possible maximum height and width of the output will be returned in case of no + * padding. Excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -371,11 +377,14 @@ typedef enum { * * Parameters: * - * * kernelSize indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width]. + * * kernelSize indicates the kernel size used to obtain the average value. + * It is an int array [kernel_height, kernel_width]. * The first number indicates the kernel height, and the second number indicates the kernel width. - * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width]. - * The first number indicates the moving step in height, and the second number indicates the moving step in width. - * * padList: padding around input. It is an int array [top, bottom, left, right], and the nearest neighbor values are used for padding. + * * strides indicates the distance of kernel moving. The value is an int array + * [stride_height, stride_width]. The first number indicates the moving step in height, + * and the second number indicates the moving step in width. + * * padList: padding around input. It is an int array [top, bottom, left, right], + * and the nearest neighbor values are used for padding. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -386,15 +395,19 @@ typedef enum { OH_NN_OPS_AVG_POOL = 2, /** - * Batch normalization is performed on a tensor to scale and shift tensor elements, relieving potential covariate shift in a batch of data. + * Batch normalization is performed on a tensor to scale and shift tensor elements, + * relieving potential covariate shift in a batch of data. * * Inputs: * - * * input: n-dimensional tensor of shape [N, ..., C]. The nth dimension is the number of channels. + * * input: n-dimensional tensor of shape [N, ..., C]. + * The nth dimension is the number of channels. * * scale: 1D tensor of the scaling factor used to scale the first normalized tensor. * * offset: 1D tensor used to move to the first normalized tensor. - * * mean: 1D tensor of the overall mean value. It is used only for inference. In case of training, this parameter must be left empty. - * * variance: 1D tensor used for the overall variance. It is used only for inference. In case of training, this parameter must be left empty. + * * mean: 1D tensor of the overall mean value. It is used only for inference. + * In case of training, this parameter must be left empty. + * * variance: 1D tensor used for the overall variance. It is used only for inference. + * In case of training, this parameter must be left empty. * * Parameters: * @@ -402,27 +415,32 @@ typedef enum { * * Outputs: * - * * output: n-dimensional output tensor whose shape and data type are the same as those of the input. + * * output: n-dimensional output tensor whose shape + * and data type are the same as those of the input. */ OH_NN_OPS_BATCH_NORM = 3, /** - * Divides the batch dimension of a 4D tensor into small blocks by block_shape, and interleaves these blocks back into the spatial dimension. + * Divides the batch dimension of a 4D tensor into small blocks by block_shape, + * and interleaves these blocks back into the spatial dimension. * * Parameters: * - * * input: input tensor. The dimension will be divided into small blocks, and these blocks will be interleaved into the spatial dimension. + * * input: input tensor. The dimension will be divided into small blocks, + * and these blocks will be interleaved into the spatial dimension. * * Outputs: * - * * blockSize: size of each block to be interleaved into the spatial dimension. The value is an array [height_block, width_block]. - * * crops: elements truncated from the spatial dimension of the output. The value is a 2D array [[crop0_start, crop0_end], - * [crop1_start, crop1_end]] with the shape of (2, 2). + * * blockSize: size of each block to be interleaved into the spatial dimension. + * The value is an array [height_block, width_block]. + * * crops: elements truncated from the spatial dimension of the output. The value is a 2D array + * [[crop0_start, crop0_end], [crop1_start, crop1_end]] with the shape of (2, 2). * * * Outputs: * - * * output. Assume that the shape of input is (n,h,w,c) and the shape of output is (n',h',w',c'): + * * output. Assume that the shape of input is (n,h,w,c) and + * the shape of output is (n',h',w',c'): * n' = n / (block_shape[0] * block_shape[1]) * h' = h * block_shape[0] - crops[0][0] - crops[0][1] * w' = w * block_shape[1] - crops[1][0] - crops[1][1] @@ -483,37 +501,43 @@ typedef enum { * Inputs: * * * input: input tensor. - * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. + * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. * The value of inChannel must be exactly divided by the value of group. * - * * bias: bias of the convolution. It is an array with a length of [outChannel]. - * In quantization scenarios, the bias parameter does not require quantization parameters. - * The quantization version requires data input of the OH_NN_INT32 type. + * * bias: bias of the convolution. It is an array with a length of [outChannel]. + * In quantization scenarios, the bias parameter does not require quantization parameters. + * The quantization version requires data input of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. * * Parameters: * - * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth]. - * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth]. - * The value must be greater than or equal to 1 and cannot exceed the height and width of input. - * - * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid). - * 0 (same): The height and width of the output are the same as those of the input. - * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible. + * * stride: movement stride of the convolution kernel in height and width. + * It is an int array [strideHeight, strideWidth]. + * * dilation: dilation size of the convolution kernel in height and width. + * It is an int array [dilationHeight, dilationWidth]. The value must be greater than + * or equal to 1 and cannot exceed the height and width of input. + * + * * padMode: padding mode of input. + * The value is of the int type and can be 0 (same) or 1 (valid). + * 0 (same): The height and width of the output are the same as those of the input. + * The total padding quantity is calculated horizontally and vertically + * and evenly distributed to the top, bottom, left, and right if possible. * Otherwise, the last additional padding will be completed from the bottom and right. * - * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded. - * * group: number of groups in which the input is divided by in_channel. The value is of the int type. - * If group is 1, it is a conventional convolution. If group is greater than 1 and - * less than or equal to in_channel, it is a group convolution. - * * activationType is an integer constant which is contained in FuseType. The specified activation function is called before output. + * 1 (valid): The possible maximum height and width of the output will be returned + * in case of no padding. The excessive pixels will be discarded. + * * group: number of groups in which the input is divided by in_channel. The value is of the + * int type. If group is 1, it is a conventional convolution. If group is greater + * than 1 and less than or equal to in_channel, it is a group convolution. + * * activationType is an integer constant which is contained in FuseType. + * The specified activation function is called before output. * * If the input contains the padList parameter: * * Inputs: * * * input: input tensor. - * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. + * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. * The value of inChannel must be exactly divided by the value of group. * * * bias: bias of the convolution. It is an array with a length of [outChannel]. @@ -523,14 +547,17 @@ typedef enum { * * Parameters: * - * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth]. - * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth]. - * The value must be greater than or equal to 1 and cannot exceed the height and width of input. + * * stride: movement stride of the convolution kernel in height and width. + * It is an int array [strideHeight, strideWidth]. + * * dilation: dilation size of the convolution kernel in height and width. + * It is an int array [dilationHeight, dilationWidth]. The value must be greater than + * or equal to 1 and cannot exceed the height and width of input. * * padList: padding around input. It is an int array [top, bottom, left, right]. - * * group: number of groups in which the input is divided by in_channel. The value is of the int type. - * If group is 1, it is a conventional convolution. + * * group: number of groups in which the input is divided by in_channel. + * The value is of the int type. If group is 1, it is a conventional convolution. * If group is in_channel, it is depthwiseConv2d. In this case, group==in_channel==out_channel. - * If group is greater than 1 and less than in_channel, it is a group convolution. In this case, out_channel==group. + * If group is greater than 1 and less than in_channel, it is a group convolution. + * In this case, out_channel==group. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -548,32 +575,36 @@ typedef enum { * Inputs: * * * input: input tensor. - * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. + * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. * The value of inChannel must be exactly divided by the value of group. - * - * * bias: bias of the convolution. It is an array with a length of [outChannel]. - * In quantization scenarios, the bias parameter does not require quantization parameters. - * The quantization version requires data input of the OH_NN_INT32 type. + * + * * bias: bias of the convolution. It is an array with a length of [outChannel]. + * In quantization scenarios, the bias parameter does not require quantization parameters. + * The quantization version requires data input of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. - * - * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth]. + * + * * stride: movement stride of the convolution kernel in height and width. + * It is an int array [strideHeight, strideWidth]. * * Parameters: * - * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth]. - * The value must be greater than or equal to 1 and cannot exceed the height and width of input. - * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid). - * 0 (same): The height and width of the output are the same as those of the input. - * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible. + * * dilation: dilation size of the convolution kernel in height and width. + * It is an int array [dilationHeight, dilationWidth]. The value must be greater than + * or equal to 1 and cannot exceed the height and width of input. + * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or + * 1 (valid). 0 (same): The height and width of the output are the same as those of the input. + * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, + * bottom, left, and right if possible. * Otherwise, the last additional padding will be completed from the bottom and right. - * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded. - * * group: number of groups in which the input is divided by in_channel. The value is of the int type. - * If group is 1, it is a conventional convolution. If group is greater than 1 and - * less than or equal to in_channel, it is a group convolution. - * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple. - * It can be a single integer to specify the same value for all spatial dimensions. The amount of output + * 1 (valid): The possible maximum height and width of the output will be returned in case of + * no padding. The excessive pixels will be discarded. + * * group: number of groups in which the input is divided by in_channel. The value is of the int + * type. If group is 1, it is a conventional convolution. If group is greater than + * 1 and less than or equal to in_channel, it is a group convolution. + * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple. + * It can be a single integer to specify the same value for all spatial dimensions. The amount of output * padding along a dimension must be less than the stride along this dimension. - * + * * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -582,26 +613,28 @@ typedef enum { * Inputs: * * * input: input tensor. - * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. - * The value of inChannel must be exactly divided by the value of group. - * * bias: bias of the convolution. It is an array with a length of [outChannel]. - * In quantization scenarios, the bias parameter does not require quantization parameters. - * The quantization version requires data input of the OH_NN_INT32 type. + * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. + * The value of inChannel must be exactly divided by the value of group. + * * bias: bias of the convolution. It is an array with a length of [outChannel]. + * In quantization scenarios, the bias parameter does not require quantization parameters. + * The quantization version requires data input of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. - * + * * Parameters: * - * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth]. - * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth]. - * The value must be greater than or equal to 1 and cannot exceed the height and width of input. + * * stride: movement stride of the convolution kernel in height and width. + * It is an int array [strideHeight, strideWidth]. + * * dilation: dilation size of the convolution kernel in height and width. + * It is an int array [dilationHeight, dilationWidth]. The value must be greater than + * or equal to 1 and cannot exceed the height and width of input. * * padList: padding around input. It is an int array [top, bottom, left, right]. - * * group: number of groups in which the input is divided by in_channel. The value is of the int type. - * If group is 1, it is a conventional convolution. If group is greater than 1 - * and less than or equal to in_channel, it is a group convolution. - * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple. - * It can be a single integer to specify the same value for all spatial dimensions. The amount of output padding - * along a dimension must be less than the stride along this dimension. - * + * * group: number of groups in which the input is divided by in_channel. The value is of the int + * type. If group is 1, it is a conventional convolution. If group is greater than + * 1 and less than or equal to in_channel, it is a group convolution. + * * outputPads: padding along the height and width of the output tensor. The value is an int or a tuple. + * It can be a single integer to specify the same value for all spatial dimensions. The amount of output + * padding along a dimension must be less than the stride along this dimension. + * * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -628,15 +661,19 @@ typedef enum { * * Parameters: * - * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth]. - * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth]. - * The value must be greater than or equal to 1 and cannot exceed the height and width of input. - * * padMode: padding mode of input. The value is of the int type and can be 0 (same) or 1 (valid). - * 0 (same): The height and width of the output are the same as those of the input. - * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible. - * Otherwise, the last additional padding will be completed from the bottom and right. + * * stride: movement stride of the convolution kernel in height and width. + * It is an int array [strideHeight, strideWidth]. + * * dilation: dilation size of the convolution kernel in height and width. + * It is an int array [dilationHeight, dilationWidth]. The value must be greater than + * or equal to 1 and cannot exceed the height and width of input. + * * padMode: padding mode of input. + * The value is of the int type and can be 0 (same) or 1 (valid). + * 0 (same): The height and width of the output are the same as those of the input. The total padding + * quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and + * right if possible. Otherwise, the last additional padding will be completed from the bottom and right. * - * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded. + * 1 (valid): The possible maximum height and width of the output will be returned in case of no + * padding. The excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -654,9 +691,11 @@ typedef enum { * * Parameters: * - * * stride: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth]. - * * dilation: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth]. - * The value must be greater than or equal to 1 and cannot exceed the height and width of input. + * * stride: movement stride of the convolution kernel in height and width. + * It is an int array [strideHeight, strideWidth]. + * * dilation: dilation size of the convolution kernel in height and width. + * It is an int array [dilationHeight, dilationWidth]. The value must be greater than + * or equal to 1 and cannot exceed the height and width of input. * * padList: padding around input. It is an int array [top, bottom, left, right]. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. @@ -673,9 +712,10 @@ typedef enum { * Inputs: * * * input1: first input, which is a number, a bool, or a tensor whose data type is number or Boolean. - * * input2: second input, which must meet the following requirements: - * If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose data type is real number or Boolean value. - * If the first input is a real number or Boolean value, the second input must be a tensor whose data type is real number or Boolean value. + * * input2: second input, which must meet the following requirements: + * If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose + * data type is real number or Boolean value. If the first input is a real number or Boolean value, + * the second input must be a tensor whose data type is real number or Boolean value. * * Parameters: * @@ -689,7 +729,8 @@ typedef enum { OH_NN_OPS_DIV = 11, /** - * Sets parameters to perform product (dot product), sum (addition and subtraction), or max (larger value) on the input. + * Sets parameters to perform product (dot product), sum (addition and subtraction), + * or max (larger value) on the input. * * Inputs: * @@ -712,7 +753,8 @@ typedef enum { * Inputs: * * * input: input tensor. - * * axis: index of the dimension to be added. The value is of the int32_t type and must be a constant in the range [-dim-1, dim]. + * * axis: index of the dimension to be added. + * The value is of the int32_t type and must be a constant in the range [-dim-1, dim]. * * Outputs: * @@ -730,7 +772,8 @@ typedef enum { * * Outputs: * - * * output: generated tensor, which has the same data type as value. The tensor shape is specified by the shape parameter. + * * output: generated tensor, which has the same data type as value. + * The tensor shape is specified by the shape parameter. */ OH_NN_OPS_FILL = 14, @@ -741,8 +784,8 @@ typedef enum { * * * input: full-connection input tensor. * * weight: weight tensor for a full connection. - * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter. - * If quantization is required, the data must be of the OH_NN_INT32 type. + * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required + * for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. * * Parameters: @@ -760,9 +803,9 @@ typedef enum { * * * input: full-connection input tensor. * * weight: weight tensor for a full connection. - * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter. - * If quantization is required, the data must be of the OH_NN_INT32 type. The actual quantization parameters - * are determined by input and weight. + * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required + * for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type. + * The actual quantization parameters are determined by input and weight. * * Parameters: * @@ -802,7 +845,8 @@ typedef enum { * * Outputs: * - * * output: n-dimensional Hswish activation value. The data type is the same as that of shape and input. + * * output: n-dimensional Hswish activation value. + * The data type is the same as that of shape and input. */ OH_NN_OPS_HSWISH = 17, @@ -818,8 +862,8 @@ typedef enum { * * Outputs: * - * * A tensor of the data type NN_BOOL. When a quantization model is used, the quantization parameters of the output - * cannot be omitted. However, values of the quantization parameters do not affect the result. + * * A tensor of the data type NN_BOOL. When a quantization model is used, the quantization parameters of the + * output cannot be omitted. However, values of the quantization parameters do not affect the result. */ OH_NN_OPS_LESS_EQUAL = 18, @@ -847,8 +891,10 @@ typedef enum { /** * Calculates the maximum of input1 and input2 element-wise. The inputs of input1 and input2 - * comply with the implicit type conversion rules to make the data types consistent. * The inputs must be two tensors or one tensor and one scalar. - * When the inputs are two tensors, their data types cannot be both NN_BOOL. Their shapes can be broadcast to the same size. + * comply with the implicit type conversion rules to make the data types consistent. + * * The inputs must be two tensors or one tensor and one scalar. + * When the inputs are two tensors, their data types cannot be both NN_BOOL. + * Their shapes can be broadcast to the same size. * When the inputs are one tensor and one scalar, the scalar must be a constant. * * Inputs: @@ -876,14 +922,16 @@ typedef enum { * * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width]. * The first number indicates the kernel height, and the second number indicates the kernel width. - * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width]. - * The first number indicates the moving step in height, and the second number indicates the moving step in width. + * * strides indicates the distance of kernel moving. The value is an int array + * [stride_height, stride_width]. The first number indicates the moving step in height, + * and the second number indicates the moving step in width. * * padMode: padding mode, which is optional. The value is of the int type and can be 0 (same) * or 1 (valid). The nearest neighbor value is used for padding. - * 0 (same): The height and width of the output are the same as those of the input. - * The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible. - * Otherwise, the last additional padding will be completed from the bottom and right. - * 1 (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded. + * 0 (same): The height and width of the output are the same as those of the input. The total padding + * quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and + * right if possible. Otherwise, the last additional padding will be completed from the bottom and right. + * 1 (valid): The possible maximum height and width of the output will be returned in case of + * no padding. The excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. * @@ -897,8 +945,9 @@ typedef enum { * * * kernelSize: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width]. * The first number indicates the kernel height, and the second number indicates the kernel width. - * * strides indicates the distance of kernel moving. The value is an int array [stride_height, stride_width]. - * The first number indicates the moving step in height, and the second number indicates the moving step in width. + * * strides indicates the distance of kernel moving. The value is an int array + * [stride_height, stride_width]. The first number indicates the moving step in height, + * and the second number indicates the moving step in width. * * padList: padding around input. It is an int array [top, bottom, left, right], * and the nearest neighbor values are used for padding. * * activationType is an integer constant which is contained in FuseType. @@ -932,8 +981,8 @@ typedef enum { OH_NN_OPS_MUL = 22, /** - * Generates a one-hot tensor based on the positions specified by indices. The positions specified by indices - * are determined by on_value, and other positions are determined by off_value. + * Generates a one-hot tensor based on the positions specified by indices. The positions specified by + * indices are determined by on_value, and other positions are determined by off_value. * * Inputs: * @@ -942,7 +991,8 @@ typedef enum { * * depth: integer scalar that determines the depth of the one-hot vector. The value of depth * must be greater than 0. * * on_value: scalar that specifies a valid value in the one-hot vector. - * * off_value: scalar that specifies the values of other posistions in the one-hot vector except the valid value. + * * off_value: scalar that specifies the values of other posistions in the one-hot vector except + * the valid value. * * Parameters: * @@ -965,24 +1015,28 @@ typedef enum { * Inputs: * * * inputX: n-dimensional tensor in [BatchSize, ...] format. - * * paddings: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2]. - * For example, paddings[i][0] indicates the number of paddings to be added preceding inputX in the ith dimension. - * paddings[i][1] indicates the number of paddings to be added following inputX in the ith dimension. + * * paddings: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2]. + * For example, paddings[i][0] indicates the number of paddings to be added preceding + * inputX in the ith dimension. + * paddings[i][1] indicates the number of paddings to be added following inputX + * in the ith dimension. * * Parameters: * - * * padValues: value to be added to the pad operation. The value is a constant with the same data type as inputX. + * * padValues: value to be added to the pad operation. + * The value is a constant with the same data type as inputX. * * Outputs: * - * * output: n-dimensional tensor after padding, with the same dimensions and data type as inputX. - * The shape is determined by inputX and paddings. + * * output: n-dimensional tensor after padding, with the same dimensions and data type as + * inputX. The shape is determined by inputX and paddings. * output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1] */ OH_NN_OPS_PAD = 24, /** - * Calculates the y power of each element in input. The inputs must be two tensors or one tensor and one scalar. + * Calculates the y power of each element in input. + * The inputs must be two tensors or one tensor and one scalar. * When the inputs are two tensors, their data types cannot be both NN_BOOL, and their shapes must be the same. * When the inputs are one tensor and one scalar, the scalar must be a constant. * @@ -1093,8 +1147,8 @@ typedef enum { * Parameters: * * * blockShape: a pair of integers. Each of them is greater than or equal to 1. - * * paddings: a pair of arrays. Each of them consists of two integers. The four integers that form paddings - * must be greater than or equal to 0. paddings[0][0] and paddings[0][1] + * * paddings: a pair of arrays. Each of them consists of two integers. The four integers that form + * paddings must be greater than or equal to 0. paddings[0][0] and paddings[0][1] * specify the number of paddings in the third dimension, and paddings[1][0] and paddings[1][1] * specify the number of paddings in the fourth dimension. * @@ -1106,14 +1160,15 @@ typedef enum { * output.shape[1] = c * output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0] * output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] - * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by + * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]). * */ OH_NN_OPS_SPACE_TO_BATCH_ND = 31, /** - * Splits the input into multiple tensors along the axis dimension. The number of tensors is specified by outputNum. + * Splits the input into multiple tensors along the axis dimension. + * The number of tensors is specified by outputNum. * * Inputs: * @@ -1122,8 +1177,8 @@ typedef enum { * Parameters: * * * outputNum: number of output tensors. The data type is long. - * * size_splits: size of each tensor split from the input. The value is a 1D tensor of the int type. - * If size_splits is empty, the input will be evenly split into tensors of the same size. In this case, + * * size_splits: size of each tensor split from the input. The value is a 1D tensor of the int type. If + * size_splits is empty, the input will be evenly split into tensors of the same size. In this case, * input.shape[axis] can be exactly divisible by outputNum. * If size_splits is not empty, the sum of all its elements must be equal to input.shape[axis]. * * axis: splitting dimension of the int type. @@ -1144,14 +1199,16 @@ typedef enum { * * Outputs: * - * * output: square root of the input. It is an n-dimensional tensor with the same data type and shape as input. + * * output: square root of the input. + * It is an n-dimensional tensor with the same data type and shape as input. */ OH_NN_OPS_SQRT = 33, /** - * Calculates the square of the difference between two tensors. The SquaredDifference operator supports tensor and tensor subtraction. - * If two tensors have different TensorTypes, the Sub operator converts the low-precision tensor to a high-precision one. - * If two tensors have different shapes, the two tensors can be extended to tensors with the same shape through broadcast. + * Calculates the square of the difference between two tensors. The SquaredDifference operator supports + * tensor and tensor subtraction. If two tensors have different TensorTypes, the Sub operator + * converts the low-precision tensor to a high-precision one. If two tensors have different shapes, + * the two tensors can be extended to tensors with the same shape through broadcast. * * Inputs: * @@ -1161,8 +1218,9 @@ typedef enum { * Outputs: * * * output: square of the difference between two inputs. The output shape is determined - * byinput1 and input2. If they have the same shape, the output tensor has the same shape as them. - * If they have different shapes, perform the broadcast operation on input1 and input2 and perform subtraction. + * byinput1 and input2. If they have the same shape, the output tensor has the same + * shape as them. If they have different shapes, perform the broadcast operation on + * input1 and input2 and perform subtraction. * TensorType of the output is the same as that of the input tensor with higher precision. */ OH_NN_OPS_SQUARED_DIFFERENCE = 34, @@ -1178,7 +1236,8 @@ typedef enum { * * Parameters: * - * * axis: dimension to be removed. The value is of int64_t type and can be an integer in the range [-n, n) or an array. + * * axis: dimension to be removed. + * The value is of int64_t type and can be an integer in the range [-n, n) or an array. * * Outputs: * @@ -1202,8 +1261,8 @@ typedef enum { * * Outputs: * - * * output: stacking result of the input along the axis dimension. The value is an n+1-dimensional tensor - * and has the same TensorType as the input. + * * output: stacking result of the input along the axis dimension. + * The value is an n+1-dimensional tensor and has the same TensorType as the input. */ OH_NN_OPS_STACK = 36, @@ -1213,36 +1272,39 @@ typedef enum { * Inputs: * * * input: n-dimensional input tensor. - * * begin: start of slicing, which is a 1D tensor. The length of begin is n. + * * begin: start of slicing, which is a 1D tensor. The length of begin is n. * begin[i] specifies the start of slicing in the ith dimension. * * end: end of slicing, which is a 1D tensor. The length of end is n. * end[i] specifies the end of slicing in the ith dimension. - * * strides: slicing stride, which is a 1D tensor. The length of strides is n. + * * strides: slicing stride, which is a 1D tensor. The length of strides is n. * strides[i] specifies the stride at which the tensor is sliced in the ith dimension. * * Parameters: * - * * beginMask: an integer used to mask begin. beginMask is represented in binary code. - * In case of binary(beginMask)[i]==1, for the ith dimension, elements are sliced from the first element - * at strides[i] until the end[i]-1 element. + * * beginMask: an integer used to mask begin. beginMask is represented in binary code. + * In case of binary(beginMask)[i]==1, for the ith dimension, + * elements are sliced from the first element at strides[i] until the end[i]-1 element. * * * endMask: an integer used to mask end. endMask is represented in binary code. - * In case of binary(endMask)[i]==1, elements are sliced from the element at the begin[i] position + * In case of binary(endMask)[i]==1, elements are sliced from the element at the begin[i] position * in the ith dimension until the tensor boundary at strides[i]. * - * * ellipsisMask: integer used to mask begin and end. ellipsisMask is represented in binary code. - * In case of binary(ellipsisMask)[i]==1, elements are sliced from the first element at strides[i] in the ith dimension + * * ellipsisMask: integer used to mask begin and end. + * ellipsisMask is represented in binary code. In case of binary(ellipsisMask)[i]==1, + * elements are sliced from the first element at strides[i] in the ith dimension * until the tensor boundary. Only one bit of binary(ellipsisMask) can be a non-zero value. * - * * newAxisMask: new dimension, which is an integer. newAxisMask is represented in binary code. - * In case of binary(newAxisMask)[i]==1, a new dimension whose length is 1 is inserted into the ith dimension. - * * shrinkAxisMask: shrinking dimension, which is an integer. * shrinkAxisMask is represented in binary code. - * In the case of binary(shrinkAxisMask)[i]==1, all elements in the ith dimension will be discarded, - * and the length of the ith dimension is shrunk to 1. + * * newAxisMask: new dimension, which is an integer. newAxisMask is represented in binary code. + * In case of binary(newAxisMask)[i]==1, + * a new dimension whose length is 1 is inserted into the ith dimension. + * * shrinkAxisMask: shrinking dimension, which is an integer. * shrinkAxisMask is + * represented in binary code. In the case of binary(shrinkAxisMask)[i]==1, all elements in the + * ith dimension will be discarded, and the length of the ith dimension is shrunk to 1. * * Outputs: * - * * A tensor, with the same data type as input. The number of dimensions of the output tensor is rank(input[0])+1. + * * A tensor, with the same data type as input. + * The number of dimensions of the output tensor is rank(input[0])+1. */ OH_NN_OPS_STRIDED_SLICE = 37, @@ -1261,9 +1323,10 @@ typedef enum { * * Outputs: * - * * output: difference between the two tensors. The output shape is determined byinput1 and input2. - * If they have the same shape, the output tensor has the same shape as them. - * If they have different shapes, perform the broadcast operation on input1 and input2 and perform subtraction. + * * output: difference between the two tensors. The output shape is determined byinput1 and + * input2. If they have the same shape, the output tensor has the same shape as them. + * If they have different shapes, + * perform the broadcast operation on input1 and input2 and perform subtraction. * TensorType of the output is the same as that of the input tensor with higher precision. */ OH_NN_OPS_SUB = 38, @@ -1277,7 +1340,8 @@ typedef enum { * * Outputs: * - * * output: hyperbolic tangent of the input. The TensorType and tensor shape are the same as those of the input. + * * output: hyperbolic tangent of the input. + * The TensorType and tensor shape are the same as those of the input. */ OH_NN_OPS_TANH = 39, @@ -1292,8 +1356,8 @@ typedef enum { * Outputs: * * An m-dimensional tensor whose TensorType is the same as that of the input. If input and * multiples have the same length, input and output have the same number of dimensions. - * If the length of multiples is greater than n, 1 is used to fill the input dimension, - * and then the input is copied in each dimension the specified times to obtain the m-dimensional tensor. + * If the length of multiples is greater than n, 1 is used to fill the input dimension, and + * then the input is copied in each dimension the specified times to obtain the m-dimensional tensor. */ OH_NN_OPS_TILE = 40, @@ -1303,23 +1367,26 @@ typedef enum { * Inputs: * * * input: n-dimensional tensor to be transposed. - * * permutation: The value is a 1D tensor whose length is the same as the number of dimensions of input 0. + * * permutation: The value is a 1D tensor whose length is the same as the number of + * dimensions of input 0. * * Outputs: * - * * output: n-dimensional tensor. TensorType of output 0 is the same as that of input 0, - * and the output shape is determined by the shape and permutation of input 0. + * * output: n-dimensional tensor. TensorType of output 0 is the same as that of + * input 0, and the output shape is determined by the shape and permutation of input 0. */ OH_NN_OPS_TRANSPOSE = 41, /** - * Calculates the average value in the specified dimension. If keepDims is set to false, the number of dimensions - * is reduced for the input; if keepDims is set to true, the number of dimensions is retained. + * Calculates the average value in the specified dimension. + * If keepDims is set to false, the number of dimensions is reduced for the input; + * if keepDims is set to true, the number of dimensions is retained. * * Inputs: * * * input: n-dimensional input tensor, where n is less than 8. - * * axis: dimension used to calculate the average value. The value is a 1D tensor. The value range of each element in axis is [–n, n). + * * axis: dimension used to calculate the average value. The value is a 1D tensor. + * The value range of each element in axis is [–n, n). * * Parameters: * @@ -1327,8 +1394,8 @@ typedef enum { * * Outputs: * - * * output: m-dimensional output tensor whose data type is the same as that of the input. If keepDims is - * false, m==n. If keepDims is true, moutput: m-dimensional output tensor whose data type is the same as that of the input. + * If keepDims is false, m==n. If keepDims is true, minput: 4D input tensor. Each element in the input cannot be less than 0. The input layout must be [batchSize, height, width, channels]. + * * input: 4D input tensor. Each element in the input cannot be less than 0. + * The input layout must be [batchSize, height, width, channels]. * * Parameters: * * * newHeight: resized height of the 4D tensor. * * newWidth: resized width of the 4D tensor. - * * preserveAspectRatio: indicates whether to maintain the height/width ratio of input after resizing. - * * coordinateTransformMode: coordinate transformation method used by the resize operation. The value is an int32 integer. - * Currently, the following methods are supported: - * * excludeOutside: an int64 floating point number. When its value is 1, the sampling weight of the part that + * * preserveAspectRatio: indicates whether to maintain the height/width + * ratio of input after resizing. + * * coordinateTransformMode: coordinate transformation method used by the resize operation. + * The value is an int32 integer. Currently, the following methods are supported: + * * excludeOutside: an int64 floating point number. When its value is 1, + * the sampling weight of the part that * exceeds the boundary of input is set to 0, and other weights are normalized. * * Outputs: * - * * output: n-dimensional tensor, with the same shape and data type as input. + * * output: n-dimensional tensor, with the same shape and data type as input. */ OH_NN_OPS_RESIZE_BILINEAR = 43, @@ -1360,7 +1430,8 @@ typedef enum { * * Inputs: * - * * input: n-dimensional tensor, where n is less than 8. Each element of the tensor cannot be less than 0. + * * input: n-dimensional tensor, where n is less than 8. + * Each element of the tensor cannot be less than 0. * * Outputs: * @@ -1378,7 +1449,8 @@ typedef enum { * * Outputs: * - * * output: tensor whose data type is the same as that of input and shape is determined by InputShape. + * * output: tensor whose data type is the same as that of input + * and shape is determined by InputShape. */ OH_NN_OPS_RESHAPE = 45, @@ -1387,11 +1459,11 @@ typedef enum { * * Inputs: * - * * input: n-dimensional tensor. If n is greater than or equal to 2, inputX must be [BatchSize, ..., Channels]. - * The second dimension is the number of channels. - * * weight: 1D tensor. The length of weight must be 1 or equal to the number of channels. If the length of weight is 1, - * all channels share the same weight. - * If the length of weight is equal to the number of channels, each channel exclusively has a weight. + * * input: n-dimensional tensor. If n is greater than or equal to 2, + * inputX must be [BatchSize, ..., Channels]. The second dimension is the number of channels. + * * weight: 1D tensor. The length of weight must be 1 or equal to the number of channels. + * If the length of weight is 1, all channels share the same weight. + * If the length of weight is equal to the number of channels, each channel exclusively has a weight. * If n is less than 2 for inputX, the weight length must be 1. * * Outputs: @@ -1414,7 +1486,8 @@ typedef enum { OH_NN_OPS_RELU = 47, /** - * Calculates the Relu6 activation value of the input, that is, calculate min(max(x, 0), 6) for each element x in the input. + * Calculates the Relu6 activation value of the input, that is, + * calculate min(max(x, 0), 6) for each element x in the input. * * Inputs: * @@ -1438,8 +1511,10 @@ typedef enum { * * Parameters: * - * * beginAxis is an NN_INT32 scalar that specifies the axis from which normalization starts. The value range is [1, rank(input)). - * * epsilon is a scalar of NN_FLOAT32. It is a tiny amount in the normalization formula. The common value is 1e-7. + * * beginAxis is an NN_INT32 scalar that specifies the axis from which normalization starts. + * The value range is [1, rank(input)). + * * epsilon is a scalar of NN_FLOAT32. It is a tiny amount in the normalization formula. + * The common value is 1e-7. * * Outputs: * @@ -1453,7 +1528,8 @@ typedef enum { * Inputs: * * * input: n-dimensional input tensor, where n is less than 8. - * * axis: dimension used to calculate the product. The value is a 1D tensor. The value range of each element in axis is [–n, n). + * * axis: dimension used to calculate the product. The value is a 1D tensor. + * The value range of each element in axis is [–n, n). * * Parameters: * @@ -1470,19 +1546,21 @@ typedef enum { /** * Operates the logical OR in the specified dimension. If keepDims is set to false, - * the number of dimensions is reduced for the input; if keepDims is set to true, the number of dimensions is retained. + * the number of dimensions is reduced for the input; if keepDims is set to true, + * the number of dimensions is retained. * * Inputs: * * * A n-dimensional input tensor, where n is less than 8. - * * A 1D tensor specifying the dimension used to operate the logical OR. The value range of each element in axis is [–n, n). + * * A 1D tensor specifying the dimension used to operate the logical OR. + * The value range of each element in axis is [–n, n). * * Parameters: * * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. * * Outputs: - * * output: m-dimensional output tensor whose data type is the same as that of the input. + * * output: m-dimensional output tensor whose data type is the same as that of the input. * If keepDims is false, m==n. If keepDims is true, moutput: n-dimensional tensor. The data type is determined by input2. + * * output: n-dimensional tensor. The data type is determined by input2. * The output shape is the same as the input shape. */ OH_NN_OPS_QUANT_DTYPE_CAST = 52, @@ -1550,7 +1628,7 @@ typedef enum { * * Parameters: * - * * axis: dimension to be added. The value of axis can be an integer or an array of integers. + * * axis: dimension to be added. The value of axis can be an integer or an array of integers. * The value range of the integer is [-n, n). * * Outputs: @@ -1559,7 +1637,8 @@ typedef enum { OH_NN_OPS_UNSQUEEZE = 55, /** - * Gaussian error linear unit activation function. The int quantization input is not supported. output=0.5∗input∗(1+tanh(input/2)) + * Gaussian error linear unit activation function. The int quantization input is not supported. + * output=0.5∗input∗(1+tanh(input/2)) * * Inputs: * * An n-dimensional input tensor. @@ -1573,10 +1652,10 @@ typedef enum { /** * @brief Enumerates the tensor data types. * - * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used + * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}. - * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the tensor type. - * Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set. + * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the + * tensor type. Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set. * You need to set the type attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}. * The settings of other operator parameters are similar. The enumerated values are named * in the format OH_NN_{Operator name}_{Attribute name}. @@ -1588,109 +1667,146 @@ typedef enum { /** This enumerated value is used when the tensor is used as the input or output of a model (or operator). */ OH_NN_TENSOR = 0, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Add operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Add operator. */ OH_NN_ADD_ACTIVATIONTYPE = 1, - /** This enumerated value is used when the tensor is used as the kernel_size parameter of the AvgPool operator. */ + /** This enumerated value is used when the tensor is used as the kernel_size parameter + * of the AvgPool operator. */ OH_NN_AVG_POOL_KERNEL_SIZE = 2, - /** This enumerated value is used when the tensor is used as the stride parameter of the AvgPool operator. */ + /** This enumerated value is used when the tensor is used as the stride parameter + * of the AvgPool operator. */ OH_NN_AVG_POOL_STRIDE = 3, - /** This enumerated value is used when the tensor is used as the pad_mode parameter of the AvgPool operator. */ + /** This enumerated value is used when the tensor is used as the pad_mode parameter + * of the AvgPool operator. */ OH_NN_AVG_POOL_PAD_MODE = 4, /** This enumerated value is used when the tensor is used as the pad parameter of the AvgPool operator. */ OH_NN_AVG_POOL_PAD = 5, - /** This enumerated value is used when the tensor is used as the activation_type parameter of the AvgPool operator. */ + /** This enumerated value is used when the tensor is used as the activation_type parameter + * of the AvgPool operator. */ OH_NN_AVG_POOL_ACTIVATION_TYPE = 6, - /** This enumerated value is used when the tensor is used as the eosilon parameter of the BatchNorm operator. */ + /** This enumerated value is used when the tensor is used as the eosilon parameter + * of the BatchNorm operator. */ OH_NN_BATCH_NORM_EPSILON = 7, - /** This enumerated value is used when the tensor is used as the blockSize parameter of the BatchToSpaceND operator. */ + /** This enumerated value is used when the tensor is used as the blockSize parameter + * of the BatchToSpaceND operator. */ OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE = 8, - /** This enumerated value is used when the tensor is used as the crops parameter of the BatchToSpaceND operator. */ + /** This enumerated value is used when the tensor is used as the crops parameter + * of the BatchToSpaceND operator. */ OH_NN_BATCH_TO_SPACE_ND_CROPS = 9, /** This enumerated value is used when the tensor is used as the axis parameter of the Concat operator. */ OH_NN_CONCAT_AXIS = 10, - /** This enumerated value is used when the tensor is used as the strides parameter of the Conv2D operator. */ + /** This enumerated value is used when the tensor is used as the strides parameter + * of the Conv2D operator. */ OH_NN_CONV2D_STRIDES = 11, /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2D operator. */ OH_NN_CONV2D_PAD = 12, - /** This enumerated value is used when the tensor is used as the dilation parameter of the Conv2D operator. */ + /** This enumerated value is used when the tensor is used as the dilation parameter + * of the Conv2D operator. */ OH_NN_CONV2D_DILATION = 13, - /** This enumerated value is used when the tensor is used as the padMode parameter of the Conv2D operator. */ + /** This enumerated value is used when the tensor is used as the padMode parameter + * of the Conv2D operator. */ OH_NN_CONV2D_PAD_MODE = 14, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Conv2D operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Conv2D operator. */ OH_NN_CONV2D_ACTIVATION_TYPE = 15, /** This enumerated value is used when the tensor is used as the group parameter of the Conv2D operator. */ OH_NN_CONV2D_GROUP = 16, - /** This enumerated value is used when the tensor is used as the strides parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the strides parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_STRIDES = 17, - /** This enumerated value is used when the tensor is used as the pad parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the pad parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_PAD = 18, - /** This enumerated value is used when the tensor is used as the dilation parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the dilation parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_DILATION = 19, - /** This enumerated value is used when the tensor is used as the outputPaddings parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the outputPaddings parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS = 20, - /** This enumerated value is used when the tensor is used as the padMode parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the padMode parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_PAD_MODE = 21, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE = 22, - /** This enumerated value is used when the tensor is used as the group parameter of the Conv2DTranspose operator. */ + /** This enumerated value is used when the tensor is used as the group parameter + * of the Conv2DTranspose operator. */ OH_NN_CONV2D_TRANSPOSE_GROUP = 23, - /** This enumerated value is used when the tensor is used as the strides parameter of the DepthwiseConv2dNative operator. */ + /** This enumerated value is used when the tensor is used as the strides parameter + * of the DepthwiseConv2dNative operator. */ OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES = 24, - /** This enumerated value is used when the tensor is used as the pad parameter of the DepthwiseConv2dNative operator. */ + /** This enumerated value is used when the tensor is used as the pad parameter + * of the DepthwiseConv2dNative operator. */ OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD = 25, - /** This enumerated value is used when the tensor is used as the dilation parameter of the DepthwiseConv2dNative operator. */ + /** This enumerated value is used when the tensor is used as the dilation parameter + * of the DepthwiseConv2dNative operator. */ OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION = 26, - /** This enumerated value is used when the tensor is used as the padMode parameter of the DepthwiseConv2dNative operator. */ + /** This enumerated value is used when the tensor is used as the padMode parameter + * of the DepthwiseConv2dNative operator. */ OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE = 27, - /** This enumerated value is used when the tensor is used as the activationType parameter of the DepthwiseConv2dNative operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the DepthwiseConv2dNative operator. */ OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE = 28, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Div operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Div operator. */ OH_NN_DIV_ACTIVATIONTYPE = 29, /** This enumerated value is used when the tensor is used as the mode parameter of the Eltwise operator. */ OH_NN_ELTWISE_MODE = 30, - /** This enumerated value is used when the tensor is used as the axis parameter of the FullConnection operator. */ + /** This enumerated value is used when the tensor is used as the axis parameter + * of the FullConnection operator. */ OH_NN_FULL_CONNECTION_AXIS = 31, - /** This enumerated value is used when the tensor is used as the activationType parameter of the FullConnection operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the FullConnection operator. */ OH_NN_FULL_CONNECTION_ACTIVATIONTYPE = 32, - /** This enumerated value is used when the tensor is used as the transposeA parameter of the Matmul operator. */ + /** This enumerated value is used when the tensor is used as the transposeA parameter + * of the Matmul operator. */ OH_NN_MATMUL_TRANSPOSE_A = 33, - /** This enumerated value is used when the tensor is used as the transposeB parameter of the Matmul operator. */ + /** This enumerated value is used when the tensor is used as the transposeB parameter + * of the Matmul operator. */ OH_NN_MATMUL_TRANSPOSE_B = 34, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Matmul operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Matmul operator. */ OH_NN_MATMUL_ACTIVATION_TYPE = 35, - /** This enumerated value is used when the tensor is used as the kernel_size parameter of the MaxPool operator. */ + /** This enumerated value is used when the tensor is used as the kernel_size parameter + * of the MaxPool operator. */ OH_NN_MAX_POOL_KERNEL_SIZE = 36, - /** This enumerated value is used when the tensor is used as the stride parameter of the MaxPool operator. */ + /** This enumerated value is used when the tensor is used as the stride parameter + * of the MaxPool operator. */ OH_NN_MAX_POOL_STRIDE = 37, - /** This enumerated value is used when the tensor is used as the pad_mode parameter of the MaxPool operator. */ + /** This enumerated value is used when the tensor is used as the pad_mode parameter + * of the MaxPool operator. */ OH_NN_MAX_POOL_PAD_MODE = 38, /** This enumerated value is used when the tensor is used as the pad parameter of the MaxPool operator. */ OH_NN_MAX_POOL_PAD = 39, - /** This enumerated value is used when the tensor is used as the activation_type parameter of the MaxPool operator. */ + /** This enumerated value is used when the tensor is used as the activation_type parameter + * of the MaxPool operator. */ OH_NN_MAX_POOL_ACTIVATION_TYPE = 40, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Mul operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Mul operator. */ OH_NN_MUL_ACTIVATION_TYPE = 41, /** This enumerated value is used when the tensor is used as the axis parameter of the OneHot operator. */ OH_NN_ONE_HOT_AXIS = 42, - /** This enumerated value is used when the tensor is used as the constant_value parameter of the Pad operator. */ + /** This enumerated value is used when the tensor is used as the constant_value parameter + * of the Pad operator. */ OH_NN_PAD_CONSTANT_VALUE = 43, - /** This enumerated value is used when the tensor is used as the activationType parameter of the Scale operator. */ + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the Scale operator. */ OH_NN_SCALE_ACTIVATIONTYPE = 44, /** This enumerated value is used when the tensor is used as the axis parameter of the Scale operator. */ OH_NN_SCALE_AXIS = 45, @@ -1698,16 +1814,20 @@ typedef enum { /** This enumerated value is used when the tensor is used as the axis parameter of the Softmax operator. */ OH_NN_SOFTMAX_AXIS = 46, - /** This enumerated value is used when the tensor is used as the BlockShape parameter of the SpaceToBatchND operator. */ + /** This enumerated value is used when the tensor is used as the BlockShape parameter + * of the SpaceToBatchND operator. */ OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE = 47, - /** This enumerated value is used when the tensor is used as the Paddings parameter of the SpaceToBatchND operator. */ + /** This enumerated value is used when the tensor is used as the Paddings parameter + * of the SpaceToBatchND operator. */ OH_NN_SPACE_TO_BATCH_ND_PADDINGS = 48, /** This enumerated value is used when the tensor is used as the Axis parameter of the Split operator. */ OH_NN_SPLIT_AXIS = 49, - /** This enumerated value is used when the tensor is used as the OutputNum parameter of the Split operator. */ + /** This enumerated value is used when the tensor is used as the OutputNum parameter + * of the Split operator. */ OH_NN_SPLIT_OUTPUT_NUM = 50, - /** This enumerated value is used when the tensor is used as the SizeSplits parameter of the Split operator. */ + /** This enumerated value is used when the tensor is used as the SizeSplits parameter + * of the Split operator. */ OH_NN_SPLIT_SIZE_SPLITS = 51, /** This enumerated value is used when the tensor is used as the Axis parameter of the Squeeze operator. */ @@ -1716,63 +1836,87 @@ typedef enum { /** This enumerated value is used when the tensor is used as the Axis parameter of the Stack operator. */ OH_NN_STACK_AXIS = 53, - /** This enumerated value is used when the tensor is used as the BeginMask parameter of the StridedSlice operator. */ + /** This enumerated value is used when the tensor is used as the BeginMask parameter + * of the StridedSlice operator. */ OH_NN_STRIDED_SLICE_BEGIN_MASK = 54, - /** This enumerated value is used when the tensor is used as the EndMask parameter of the StridedSlice operator. */ + /** This enumerated value is used when the tensor is used as the EndMask parameter + * of the StridedSlice operator. */ OH_NN_STRIDED_SLICE_END_MASK = 55, - /** This enumerated value is used when the tensor is used as the EllipsisMask parameter of the StridedSlice operator. */ + /** This enumerated value is used when the tensor is used as the EllipsisMask parameter + * of the StridedSlice operator. */ OH_NN_STRIDED_SLICE_ELLIPSIS_MASK = 56, - /** This enumerated value is used when the tensor is used as the NewAxisMask parameter of the StridedSlice operator. */ + /** This enumerated value is used when the tensor is used as the NewAxisMask parameter + * of the StridedSlice operator. */ OH_NN_STRIDED_SLICE_NEW_AXIS_MASK = 57, - /** This enumerated value is used when the tensor is used as the ShrinkAxisMask parameter of the StridedSlice operator. */ + /** This enumerated value is used when the tensor is used as the ShrinkAxisMask parameter + * of the StridedSlice operator. */ OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK = 58, - /** This enumerated value is used when the tensor is used as the ActivationType parameter of the Sub operator. */ + /** This enumerated value is used when the tensor is used as the ActivationType parameter + * of the Sub operator. */ OH_NN_SUB_ACTIVATIONTYPE = 59, - /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceMean operator. */ + /** This enumerated value is used when the tensor is used as the keep_dims parameter + * of the ReduceMean operator. */ OH_NN_REDUCE_MEAN_KEEP_DIMS = 60, - /** This enumerated value is used when the tensor is used as the new_height parameter of the ResizeBilinear operator. */ + /** This enumerated value is used when the tensor is used as the new_height parameter + * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_NEW_HEIGHT = 61, - /** This enumerated value is used when the tensor is used as the new_width parameter of the ResizeBilinear operator. */ + /** This enumerated value is used when the tensor is used as the new_width parameter + * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_NEW_WIDTH = 62, - /** This enumerated value is used when the tensor is used as the preserve_aspect_ratio parameter of the ResizeBilinear operator. */ + /** This enumerated value is used when the tensor is used as the preserve_aspect_ratio parameter + * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO = 63, - /** This enumerated value is used when the tensor is used as the coordinate_transform_mode parameter of the ResizeBilinear operator. */ + /** This enumerated value is used when the tensor is used as the coordinate_transform_mode parameter + * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE = 64, - /** This enumerated value is used when the tensor is used as the exclude_outside parameter of the ResizeBilinear operator. */ + /** This enumerated value is used when the tensor is used as the exclude_outside parameter + * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE = 65, - /** This enumerated value is used when the tensor is used as the beginNormAxis parameter of the LayerNorm operator. */ + /** This enumerated value is used when the tensor is used as the beginNormAxis parameter + * of the LayerNorm operator. */ OH_NN_LAYER_NORM_BEGIN_NORM_AXIS = 66, - /** This enumerated value is used when the tensor is used as the epsilon parameter of the LayerNorm operator. */ + /** This enumerated value is used when the tensor is used as the epsilon parameter + * of the LayerNorm operator. */ OH_NN_LAYER_NORM_EPSILON = 67, - /** This enumerated value is used when the tensor is used as the beginParamsAxis parameter of the LayerNorm operator. */ + /** This enumerated value is used when the tensor is used as the beginParamsAxis parameter + * of the LayerNorm operator. */ OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS = 68, - /** This enumerated value is used when the tensor is used as the elementwiseAffine parameter of the LayerNorm operator. */ + /** This enumerated value is used when the tensor is used as the elementwiseAffine parameter + * of the LayerNorm operator. */ OH_NN_LAYER_NORM_ELEMENTWISE_AFFINE = 69, - /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceProd operator. */ + /** This enumerated value is used when the tensor is used as the keep_dims parameter + * of the ReduceProd operator. */ OH_NN_REDUCE_PROD_KEEP_DIMS = 70, - /** This enumerated value is used when the tensor is used as the keep_dims parameter of the ReduceAll operator. */ + /** This enumerated value is used when the tensor is used as the keep_dims parameter + * of the ReduceAll operator. */ OH_NN_REDUCE_ALL_KEEP_DIMS = 71, - /** This enumerated value is used when the tensor is used as the src_t parameter of the QuantDTypeCast operator. */ + /** This enumerated value is used when the tensor is used as the src_t parameter + * of the QuantDTypeCast operator. */ OH_NN_QUANT_DTYPE_CAST_SRC_T = 72, - /** This enumerated value is used when the tensor is used as the dst_t parameter of the QuantDTypeCast operator. */ + /** This enumerated value is used when the tensor is used as the dst_t parameter + * of the QuantDTypeCast operator. */ OH_NN_QUANT_DTYPE_CAST_DST_T = 73, - /** This enumerated value is used when the tensor is used as the Sorted parameter of the Topk operator. */ + /** This enumerated value is used when the tensor is used as the Sorted parameter + * of the Topk operator. */ OH_NN_TOP_K_SORTED = 74, - /** This enumerated value is used when the tensor is used as the axis parameter of the ArgMax operator. */ + /** This enumerated value is used when the tensor is used as the axis parameter + * of the ArgMax operator. */ OH_NN_ARG_MAX_AXIS = 75, - /** This enumerated value is used when the tensor is used as the keepDims parameter of the ArgMax operator. */ + /** This enumerated value is used when the tensor is used as the keepDims parameter + * of the ArgMax operator. */ OH_NN_ARG_MAX_KEEPDIMS = 76, - /** This enumerated value is used when the tensor is used as the Axis parameter of the Unsqueeze operator. */ + /** This enumerated value is used when the tensor is used as the Axis parameter + * of the Unsqueeze operator. */ OH_NN_UNSQUEEZE_AXIS = 77, } OH_NN_TensorType; @@ -1792,16 +1936,18 @@ typedef struct OH_NN_UInt32Array { /** * @brief Quantization information. * - * In quantization scenarios, the 32-bit floating-point data type is quantized into the fixed-point data type according to the following formula: + * In quantization scenarios, the 32-bit floating-point data type is quantized into + * the fixed-point data type according to the following formula: \f[ q = clamp(round(\frac{r}{s}+z), q_{min}, q_{max}) \f] - * s and z are quantization parameters, which are stored by scale and zeroPoint in {@link OH_NN_QuantParam}. - * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and + * s and z are quantization parameters, which are stored by scale and zeroPoint + * in {@link OH_NN_QuantParam}. + * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and * q_max is an upper bound of a quantization result. The calculation method is as follows: - * + * \f[ - \text{clamp}(x,min,max) = + \text{clamp}(x,min,max) = \begin{cases} q_{min} = -(1 << (numBits - 1)) \\ q_{max} = (1 << (numBits - 1)) \\ @@ -1809,24 +1955,25 @@ typedef struct OH_NN_UInt32Array { \f] * The clamp function is defined as follows: \f[ - \text{clamp}(x,min,max) = + \text{clamp}(x,min,max) = \begin{cases} \text{max} & \text{ if } x > \text{ max } \\ \text{min} & \text{ if } x < \text{ min } \\ x & \text{ otherwise } \\ \end{cases} \f] - * + * * @deprecated since 11 * @useinstead {@link NN_QuantParam} * @since 9 * @version 1.0 */ typedef struct OH_NN_QuantParam { - /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario, - * quantCount is usually set to 1. That is, all channels of a tensor share a set of quantization parameters. - * In the per-channel quantization scenario, quantCount is usually the same as the number of tensor channels, - * and each channel uses its own quantization parameters. + /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario, + * quantCount is usually set to 1. + * That is, all channels of a tensor share a set of quantization parameters. + * In the per-channel quantization scenario, quantCount is usually the same as the number of tensor + * channels, and each channel uses its own quantization parameters. */ uint32_t quantCount; /** Number of quantization bits */ @@ -1859,7 +2006,8 @@ typedef struct OH_NN_Tensor { const OH_NN_QuantParam *quantParam; /** Specifies the tensor type. The value of type is related to the tensor usage. * When the tensor is used as the input or output of the model, set type to {@link OH_NN_TENSOR}. - * When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR} from {@link OH_NN_TensorType}. + * When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR} + * from {@link OH_NN_TensorType}. */ OH_NN_TensorType type; } OH_NN_Tensor; -- Gitee From 7c61c178fb39fc25bdff05ad3b0fe2bea6cc093e Mon Sep 17 00:00:00 2001 From: w30052974 Date: Tue, 2 Jan 2024 19:57:18 +0800 Subject: [PATCH 2/5] change 1.0 Signed-off-by: w30052974 --- .../neural_network_runtime.h | 2 +- .../neural_network_runtime_type.h | 36 +++++++++---------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h index 2d7cdf2..674058b 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h @@ -191,7 +191,7 @@ OH_NN_ReturnCode OH_NNModel_AddTensorToModel(OH_NNModel *model, const NN_TensorD * @param index Index of a tensor. * @param dataBuffer Pointer to real data. * @param length Length of the data buffer. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. * If the operation fails, an error code is returned. For details about the error codes, * see {@link OH_NN_ReturnCode}. * @since 9 diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index fed062b..a320790 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -435,7 +435,7 @@ typedef enum { * The value is an array [height_block, width_block]. * * crops: elements truncated from the spatial dimension of the output. The value is a 2D array * [[crop0_start, crop0_end], [crop1_start, crop1_end]] with the shape of (2, 2). - * + * * * Outputs: * @@ -539,10 +539,10 @@ typedef enum { * * input: input tensor. * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. * The value of inChannel must be exactly divided by the value of group. - * - * * bias: bias of the convolution. It is an array with a length of [outChannel]. - * In quantization scenarios, the bias parameter does not require quantization parameters. - * The quantization version requires data input of the OH_NN_INT32 type. + * + * * bias: bias of the convolution. It is an array with a length of [outChannel]. + * In quantization scenarios, the bias parameter does not require quantization parameters. + * The quantization version requires data input of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. * * Parameters: @@ -652,11 +652,11 @@ typedef enum { * Inputs: * * * input: input tensor. - * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format. + * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format. * outChannel is equal to channelMultiplier multiplied by inChannel. - * * bias: bias of the convolution. It is an array with a length of [outChannel]. - * In quantization scenarios, the bias parameter does not require quantization parameters. - * The quantization version requires data input of the OH_NN_INT32 type. + * * bias: bias of the convolution. It is an array with a length of [outChannel]. + * In quantization scenarios, the bias parameter does not require quantization parameters. + * The quantization version requires data input of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. * * Parameters: @@ -682,10 +682,10 @@ typedef enum { * Inputs: * * * input: input tensor. - * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format. + * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format. * outChannel is equal to channelMultiplier multiplied by inChannel. - * * bias: bias of the convolution. It is an array with a length of [outChannel]. - * In quantization scenarios, the bias parameter does not require quantization parameters. + * * bias: bias of the convolution. It is an array with a length of [outChannel]. + * In quantization scenarios, the bias parameter does not require quantization parameters. * The quantization version requires data input of the OH_NN_INT32 type. * The actual quantization parameters are determined by input and weight. * @@ -805,7 +805,7 @@ typedef enum { * * weight: weight tensor for a full connection. * * bias: full-connection bias. In quantization scenarios, no quantized parameter is required * for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type. - * The actual quantization parameters are determined by input and weight. + * The actual quantization parameters are determined by input and weight. * * Parameters: * @@ -891,7 +891,7 @@ typedef enum { /** * Calculates the maximum of input1 and input2 element-wise. The inputs of input1 and input2 - * comply with the implicit type conversion rules to make the data types consistent. + * comply with the implicit type conversion rules to make the data types consistent. * * The inputs must be two tensors or one tensor and one scalar. * When the inputs are two tensors, their data types cannot be both NN_BOOL. * Their shapes can be broadcast to the same size. @@ -929,7 +929,7 @@ typedef enum { * or 1 (valid). The nearest neighbor value is used for padding. * 0 (same): The height and width of the output are the same as those of the input. The total padding * quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and - * right if possible. Otherwise, the last additional padding will be completed from the bottom and right. + * right if possible. Otherwise, the last additional padding will be completed from the bottom and right. * 1 (valid): The possible maximum height and width of the output will be returned in case of * no padding. The excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. @@ -1545,7 +1545,7 @@ typedef enum { OH_NN_OPS_REDUCE_PROD = 50, /** - * Operates the logical OR in the specified dimension. If keepDims is set to false, + * Operates the logical OR in the specified dimension. If keepDims is set to false, * the number of dimensions is reduced for the input; if keepDims is set to true, * the number of dimensions is retained. * @@ -1653,9 +1653,9 @@ typedef enum { * @brief Enumerates the tensor data types. * * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used - * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}. + * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}. * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the - * tensor type. Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set. + * tensor type. Assume that the pad parameter of the {@link OH_NN_OPS_CONV2D} operator is being set. * You need to set the type attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}. * The settings of other operator parameters are similar. The enumerated values are named * in the format OH_NN_{Operator name}_{Attribute name}. -- Gitee From a60c4fa890a67912b7cb2032cf599b674051d00f Mon Sep 17 00:00:00 2001 From: gWX1231951 Date: Wed, 3 Jan 2024 09:25:17 +0800 Subject: [PATCH 3/5] =?UTF-8?q?=E5=91=8A=E8=AD=A6=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: gWX1231951 --- frameworks/native/nncompiler.cpp | 2 +- frameworks/native/nnexecutor.cpp | 2 +- frameworks/native/quant_param.cpp | 3 +- frameworks/native/tensor_desc.h | 4 +- .../neural_network_core.h | 346 ++++++++++-------- 5 files changed, 199 insertions(+), 158 deletions(-) diff --git a/frameworks/native/nncompiler.cpp b/frameworks/native/nncompiler.cpp index 2df5226..54a697d 100644 --- a/frameworks/native/nncompiler.cpp +++ b/frameworks/native/nncompiler.cpp @@ -255,7 +255,7 @@ bool NNCompiler::IsBuild() const } OH_NN_ReturnCode NNCompiler::IsSupportedModel(const std::shared_ptr& liteGraph, - bool& isSupportedModel) const + bool& isSupportedModel) const { std::vector supportedList; OH_NN_ReturnCode ret = m_device->GetSupportedOperation(liteGraph, supportedList); diff --git a/frameworks/native/nnexecutor.cpp b/frameworks/native/nnexecutor.cpp index 2ac49b0..08b6627 100644 --- a/frameworks/native/nnexecutor.cpp +++ b/frameworks/native/nnexecutor.cpp @@ -567,7 +567,7 @@ OH_NN_ReturnCode NNExecutor::SetInput(uint32_t index, const OH_NN_Tensor& nnTens } OH_NN_ReturnCode NNExecutor::SetInputFromMemory( - uint32_t index, const OH_NN_Tensor& nnTensor,const OH_NN_Memory& memory) + uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) { auto nnRet = CheckInputDimRanges(index, nnTensor); if (nnRet == OH_NN_OPERATION_FORBIDDEN) { diff --git a/frameworks/native/quant_param.cpp b/frameworks/native/quant_param.cpp index f5a48e5..b873194 100644 --- a/frameworks/native/quant_param.cpp +++ b/frameworks/native/quant_param.cpp @@ -52,7 +52,8 @@ std::vector QuantParams::GetNumBits() const OH_NN_ReturnCode QuantParams::CopyToCompat(std::vector& compatQuantParams) const { if ((m_scales.size() != m_zeroPoints.size()) || (m_zeroPoints.size() != m_numBits.size())) { - LOGE("CopyToCompat failed, the size of scales(%zu), zeroPoints(%zu) and numBits(%zu) are not equal.", m_scales.size(), m_zeroPoints.size(), m_numBits.size()); + LOGE("CopyToCompat failed, the size of scales(%zu), zeroPoints(%zu) and numBits(%zu) are not equal.", + m_scales.size(), m_zeroPoints.size(), m_numBits.size()); return OH_NN_INVALID_PARAMETER; } diff --git a/frameworks/native/tensor_desc.h b/frameworks/native/tensor_desc.h index 6286797..08981a3 100644 --- a/frameworks/native/tensor_desc.h +++ b/frameworks/native/tensor_desc.h @@ -22,7 +22,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { -class TensorDesc{ +class TensorDesc { public: TensorDesc() = default; ~TensorDesc() = default; @@ -42,13 +42,11 @@ public: OH_NN_ReturnCode SetName(const char* name); OH_NN_ReturnCode GetName(const char** name) const; - private: OH_NN_DataType m_dataType {OH_NN_UNKNOWN}; OH_NN_Format m_format {OH_NN_FORMAT_NONE}; std::vector m_shape; std::string m_name; - }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_core.h b/interfaces/kits/c/neural_network_runtime/neural_network_core.h index bfa9cb3..be687ed 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_core.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_core.h @@ -77,13 +77,14 @@ OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model); * * This method conflicts with the way of passing an online built model or an offline model file buffer, * and you have to choose only one of the three construction methods. \n - * - * Offline model is a type of model that is offline compiled by the model converter provided by a device vendor. - * So that the offline model can only be used on the specified device, but the compilation time of offline model is usually - * much less than {@link OH_NNModel}. \n - * - * You should perform the offline compilation during your development and deploy the offline model in your app package. \n - * + * + * Offline model is a type of model that is offline compiled by the model converter provided by a device vendor. + * So that the offline model can only be used on the specified device, but the + * compilation time of offline model is usually much less than {@link OH_NNModel}. \n + * + * You should perform the offline compilation during your development + * and deploy the offline model in your app package. \n + * * @param modelPath Offline model file path. * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create. * @since 11 @@ -94,11 +95,12 @@ OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelFile(const char *mod /** * @brief Creates a compilation instance based on an offline model file buffer. * - * This method conflicts with the way of passing an online built model or an offline model file path, + * This method conflicts with the way of passing an online built model or an offline model file path, * and you have to choose only one of the three construction methods. \n - * - * Note that the returned {@link OH_NNCompilation} instance only saves the modelBuffer pointer inside, instead of - * copying its data. You should not release modelBuffer before the {@link OH_NNCompilation} instance is destroied. \n + * + * Note that the returned {@link OH_NNCompilation} instance only saves the + * modelBuffer pointer inside, instead of copying its data. + * You should not release modelBuffer before the {@link OH_NNCompilation} instance is destroied. \n * * @param modelBuffer Offline model file buffer. * @param modelSize Offfline model buffer size. @@ -114,7 +116,7 @@ OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelBuffer(const void *m * See {@link OH_NNCompilation_SetCache} for the description of cache.\n * * The restoration time from the cache is less than compilation with {@link OH_NNModel}.\n - * + * * You should call {@link OH_NNCompilation_SetCache} or {@link OH_NNCompilation_ImportCacheFromBuffer} first, * and then call {@link OH_NNCompilation_Build} to complete the restoration.\n * @@ -137,8 +139,8 @@ OH_NNCompilation *OH_NNCompilation_ConstructForCache(); * @param length Buffer length. * @param modelSize Byte size of the model cache. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -162,8 +164,8 @@ OH_NN_ReturnCode OH_NNCompilation_ExportCacheToBuffer(OH_NNCompilation *compilat * @param buffer Pointer to the given buffer. * @param modelSize Byte size of the model cache. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -174,19 +176,21 @@ OH_NN_ReturnCode OH_NNCompilation_ImportCacheFromBuffer(OH_NNCompilation *compil /** * @brief Adds an extension config for a custom hardware attribute. * - * Some devices have their own specific attributes which have not been opened in NNRt. This method provides an additional way for you - * to set these custom hardware attributes of the device. You should query their names and values from the device - * vendor's documents, and add them into compilation instance one by one. These attributes will be passed directly to device - * driver, and this method will return error code if the driver cannot parse them. \n - * + * Some devices have their own specific attributes which have not been opened in NNRt. + * This method provides an additional way for you to set these custom hardware attributes of the device. + * You should query their names and values from the device vendor's documents, + * and add them into compilation instance one by one. These attributes will be passed directly to device driver, + * and this method will return error code if the driver cannot parse them. \n + * * After {@link OH_NNCompilation_Build} is called, the configName and configValue can be released. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. * @param configName Config name. * @param configValue A byte buffer saving the config value. * @param configValueSize Byte size of the config value. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -198,15 +202,16 @@ OH_NN_ReturnCode OH_NNCompilation_AddExtensionConfig(OH_NNCompilation *compilati /** * @brief Specifies the device for model compilation and computing. * - * In the compilation phase, you need to specify the device for model compilation and computing. Call {@link OH_NNDevice_GetAllDevicesID} - * to obtain available device IDs. Call {@link OH_NNDevice_GetType} and {@link OH_NNDevice_GetName} to obtain device information + * In the compilation phase, you need to specify the device for model compilation and computing. + * Call {@link OH_NNDevice_GetAllDevicesID} to obtain available device IDs. + * Call {@link OH_NNDevice_GetType} and {@link OH_NNDevice_GetName} to obtain device information * and pass target device ID to this method for setting. \n * - * * @param compilation Pointer to the {@link OH_NNCompilation} instance. * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -215,18 +220,20 @@ OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_ /** * @brief Set the cache directory and version of the compiled model. * - * On the device that supports caching, a model can be saved as a cache file after being compiled on the device driver. - * The model can be directly read from the cache file in the next compilation, saving recompilation time. + * On the device that supports caching, a model can be saved as a cache file after being compiled on the device driver. + * The model can be directly read from the cache file in the next compilation, saving recompilation time. * This method performs different operations based on the passed cache directory and version: \n * * - No file exists in the cache directory: * Caches the compiled model to the directory and sets the cache version to version. \n * * - A complete cache file exists in the cache directory, and its version is version: - * Reads the cache file in the path and passes the data to the underlying device for conversion into executable model instances. \n + * Reads the cache file in the path and passes the data to the underlying + * device for conversion into executable model instances. \n * * - A complete cache file exists in the cache directory, and its version is earlier than version: - * When model compilation is complete on the underlying device, overwrites the cache file and changes the version number to version. \n + * When model compilation is complete on the underlying device, + * overwrites the cache file and changes the version number to version. \n * * - A complete cache file exists in the cache directory, and its version is later than version: * Returns the {@link OH_NN_INVALID_PARAMETER} error code without reading the cache file. \n @@ -238,11 +245,12 @@ OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_ * Returns the {@link OH_NN_INVALID_PATH} error code. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. - * @param cachePath Directory for storing model cache files. This method creates directories for different devices in the cachePath directory. - * You are advised to use a separate cache directory for each model. + * @param cachePath Directory for storing model cache files. This method creates directories for different devices in + * the cachePath directory. You are advised to use a separate cache directory for each model. * @param version Cache version. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -251,16 +259,20 @@ OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, const /** * @brief Sets the performance mode for model computing. * - * Allows you to set the performance mode for model computing to meet the requirements of low power consumption - * and ultimate performance. If this method is not called to set the performance mode in the compilation phase, the compilation instance assigns - * the {@link OH_NN_PERFORMANCE_NONE} mode for the model by default. In this case, the device performs computing in the default performance mode. \n + * Allows you to set the performance mode for model computing to meet the requirements of low power consumption + * and ultimate performance. If this method is not called to set the performance mode in the compilation phase, + * the compilation instance assigns, the {@link OH_NN_PERFORMANCE_NONE} mode for the model by default. + * In this case, the device performs computing in the default performance mode. \n * - * If this method is called on the device that does not support the setting of the performance mode, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n + * If this method is called on the device that does not support the setting of the performance mode, + * the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. - * @param performanceMode Performance mode. For details about the available performance modes, see {@link OH_NN_PerformanceMode}. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @param performanceMode Performance mode. For details about the available performance modes, + * see {@link OH_NN_PerformanceMode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -270,16 +282,18 @@ OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilati /** * @brief Sets the model computing priority. * - * Allows you to set computing priorities for models. - * The priorities apply only to models created by the process with the same UID. + * Allows you to set computing priorities for models. + * The priorities apply only to models created by the process with the same UID. * The settings will not affect models created by processes with different UIDs on different devices. \n * - * If this method is called on the device that does not support the priority setting, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n + * If this method is called on the device that does not support the priority setting, + * the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. * @param priority Priority. For details about the optional priorities, see {@link OH_NN_Priority}. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -288,17 +302,20 @@ OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, OH_ /** * @brief Enables float16 for computing. * - * Float32 is used by default for the model of float type. If this method is called on a device that supports float16, + * Float32 is used by default for the model of float type. If this method is called on a device that supports float16, * float16 will be used for computing the float32 model to reduce memory usage and execution time. \n - * + * * This option is useless for the model of int type, e.g. int8 type. \n * - * If this method is called on the device that does not support float16, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n + * If this method is called on the device that does not support float16, + * the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. - * @param enableFloat16 Indicates whether to enable float16. If this parameter is set to true, float16 inference is performed. + * @param enableFloat16 Indicates whether to enable float16. If this parameter is + * set to true, float16 inference is performed. * If this parameter is set to false, float32 inference is performed. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, an error code is returned. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 @@ -308,15 +325,18 @@ OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, b /** * @brief Compiles a model. * - * After the compilation configuration is complete, call this method to return the compilation result. The compilation instance pushes the model and - * compilation options to the device for compilation. After this method is called, additional compilation operations cannot be performed. \n - * - * If the {@link OH_NNCompilation_SetDevice}, {@link OH_NNCompilation_SetCache}, {@link OH_NNCompilation_SetPerformanceMode}, - * {@link OH_NNCompilation_SetPriority}, and {@link OH_NNCompilation_EnableFloat16} methods are called, {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n + * After the compilation configuration is complete, call this method to return the compilation result. + * The compilation instance pushes the model and compilation options to the device for compilation. + * After this method is called, additional compilation operations cannot be performed. \n + * + * If the {@link OH_NNCompilation_SetDevice}, {@link OH_NNCompilation_SetCache}, + * {@link OH_NNCompilation_SetPerformanceMode}, {@link OH_NNCompilation_SetPriority}, and + * {@link OH_NNCompilation_EnableFloat16} methods are called, {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -325,13 +345,15 @@ OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation); /** * @brief Releases the Compilation object. * - * This method needs to be called to release the compilation instance created by {@link OH_NNCompilation_Construct}, - * {@link OH_NNCompilation_ConstructWithOfflineModelFile}, {@link OH_NNCompilation_ConstructWithOfflineModelBuffer} and + * This method needs to be called to release the compilation instance created by {@link OH_NNCompilation_Construct}, + * {@link OH_NNCompilation_ConstructWithOfflineModelFile}, {@link OH_NNCompilation_ConstructWithOfflineModelBuffer} and * {@link OH_NNCompilation_ConstructForCache}. Otherwise, the memory leak will occur. \n * - * If compilation or *compilation is a null pointer, this method only prints warning logs and does not execute the release. \n + * If compilation or *compilation is a null pointer, + * this method only prints warning logs and does not execute the release. \n * - * @param compilation Double pointer to the {@link OH_NNCompilation} instance. After a compilation instance is destroyed, + * @param compilation Double pointer to the {@link OH_NNCompilation} instance. + * After a compilation instance is destroyed, * this method sets *compilation to a null pointer. * @since 9 * @version 1.0 @@ -363,14 +385,16 @@ NN_TensorDesc *OH_NNTensorDesc_Create(); /** * @brief Releases an {@link NN_TensorDesc} instance. * - * When the {@link NN_TensorDesc} instance is no longer used, this method needs to be called to release it. Otherwise, + * When the {@link NN_TensorDesc} instance is no longer used, this method needs to be called to release it. Otherwise, * the memory leak will occur. \n - * - * If tensorDesc or *tensorDesc is a null pointer, this method will return error code and does not execute the release. \n + * + * If tensorDesc or *tensorDesc is a null pointer, + * this method will return error code and does not execute the release. \n * * @param tensorDesc Double pointer to the {@link NN_TensorDesc} instance. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -387,8 +411,8 @@ OH_NN_ReturnCode OH_NNTensorDesc_Destroy(NN_TensorDesc **tensorDesc); * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param name The name of the tensor that needs to be set. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -400,7 +424,7 @@ OH_NN_ReturnCode OH_NNTensorDesc_SetName(NN_TensorDesc *tensorDesc, const char * * Call this method to obtain the name of the specified {@link NN_TensorDesc} instance. * The value of *name is a C-style string ended with '\0'.\n * - * if tensorDesc or name is a null pointer, this method will return error code. + * if tensorDesc or name is a null pointer, this method will return error code. * As an output parameter, *name must be a null pointer, otherwise the method will return an error code. * Fou example, you should define char* tensorName = NULL, and pass &tensorName as the argument of name.\n * @@ -409,8 +433,8 @@ OH_NN_ReturnCode OH_NNTensorDesc_SetName(NN_TensorDesc *tensorDesc, const char * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param name The retured name of the tensor. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -420,13 +444,14 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetName(const NN_TensorDesc *tensorDesc, const * @brief Sets the data type of a {@link NN_TensorDesc}. * * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor data type. \n - * + * * if tensorDesc is a null pointer, this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param dataType The data type of the tensor that needs to be set. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -436,13 +461,14 @@ OH_NN_ReturnCode OH_NNTensorDesc_SetDataType(NN_TensorDesc *tensorDesc, OH_NN_Da * @brief Gets the data type of a {@link NN_TensorDesc}. * * Call this method to obtain the data type of the specified {@link NN_TensorDesc} instance. \n - * + * * if tensorDesc or dataType is a null pointer, this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param dataType The returned data type of the tensor. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -452,14 +478,16 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetDataType(const NN_TensorDesc *tensorDesc, OH * @brief Sets the shape of a {@link NN_TensorDesc}. * * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor shape. \n - * - * if tensorDesc or shape is a null pointer, or shapeLength is 0, this method will return error code. \n + * + * if tensorDesc or shape is a null pointer, or shapeLength is 0, + * this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param shape The shape list of the tensor that needs to be set. * @param shapeLength The length of the shape list that needs to be set. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -469,18 +497,19 @@ OH_NN_ReturnCode OH_NNTensorDesc_SetShape(NN_TensorDesc *tensorDesc, const int32 * @brief Gets the shape of a {@link NN_TensorDesc}. * * Call this method to obtain the shape of the specified {@link NN_TensorDesc} instance. \n - * - * if tensorDesc, shape or shapeLength is a null pointer, this method will return error code. - * As an output parameter, *shape must be a null pointer, otherwise the method will return an error code. + * + * if tensorDesc, shape or shapeLength is a null pointer, this method will return error code. + * As an output parameter, *shape must be a null pointer, otherwise the method will return an error code. * Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of shape. \n - * + * * You do not need to release the memory of shape. It will be released when tensorDesc is destroied. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param shape Return the shape list of the tensor. * @param shapeLength The returned length of the shape list. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -490,13 +519,14 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetShape(const NN_TensorDesc *tensorDesc, int32 * @brief Sets the format of a {@link NN_TensorDesc}. * * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor format. \n - * + * * if tensorDesc is a null pointer, this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param format The format of the tensor that needs to be set. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -506,13 +536,14 @@ OH_NN_ReturnCode OH_NNTensorDesc_SetFormat(NN_TensorDesc *tensorDesc, OH_NN_Form * @brief Gets the format of a {@link NN_TensorDesc}. * * Call this method to obtain the format of the specified {@link NN_TensorDesc} instance. \n - * + * * if tensorDesc or format is a null pointer, this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param format The returned format of the tensor. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -521,17 +552,18 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetFormat(const NN_TensorDesc *tensorDesc, OH_N /** * @brief Gets the element count of a {@link NN_TensorDesc}. * - * Call this method to obtain the element count of the specified {@link NN_TensorDesc} instance. + * Call this method to obtain the element count of the specified {@link NN_TensorDesc} instance. * If you need to obtain byte size of the tensor data, call {@link OH_NNTensorDesc_GetByteSize}. \n - * + * * If the tensor shape is dynamic, this method will return error code, and elementCount will be 0. \n - * + * * if tensorDesc or elementCount is a null pointer, this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param elementCount The returned element count of the tensor. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -541,17 +573,18 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetElementCount(const NN_TensorDesc *tensorDesc * @brief Gets the byte size of a {@link NN_TensorDesc}. * * Call this method to obtain the byte size of the specified {@link NN_TensorDesc} instance. \n - * + * * If the tensor shape is dynamic, this method will return error code, and byteSize will be 0. \n - * + * * If you need to obtain element count of the tensor data, call {@link OH_NNTensorDesc_GetElementCount}. \n - * + * * if tensorDesc or byteSize is a null pointer, this method will return error code. \n * * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. * @param byteSize The returned byte size of the tensor. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -565,7 +598,7 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetByteSize(const NN_TensorDesc *tensorDesc, si * * Note that this method will copy the tensorDesc into {@link NN_Tensor}. Therefore you should destroy * tensorDesc by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n - * + * * If the tensor shape is dynamic, this method will return error code.\n * * deviceID indicates the selected device. If it is 0, the first device in the current device list will be used @@ -621,7 +654,7 @@ NN_Tensor *OH_NNTensor_CreateWithSize(size_t deviceID, NN_TensorDesc *tensorDesc * tensorDesc by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n * * deviceID indicates the selected device. If it is 0, the first device in the current device list will be used - * by default.\n + * by default.\n * * tensorDesc must be provided, if it is a null pointer, the method returns an error code.\n * @@ -653,8 +686,8 @@ NN_Tensor *OH_NNTensor_CreateWithFd(size_t deviceID, * * @param tensor Double pointer to the {@link NN_Tensor} instance. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -688,7 +721,7 @@ NN_TensorDesc *OH_NNTensor_GetTensorDesc(const NN_Tensor *tensor); * * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n - * + * * if tensor is a null pointer, this method will return null pointer.\n * * @param tensor Pointer to the {@link NN_Tensor} instance. @@ -709,8 +742,8 @@ void *OH_NNTensor_GetDataBuffer(const NN_Tensor *tensor); * @param tensor Pointer to the {@link NN_Tensor} instance. * @param fd The returned file descriptor of the shared memory. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -734,8 +767,8 @@ OH_NN_ReturnCode OH_NNTensor_GetFd(const NN_Tensor *tensor, int *fd); * @param tensor Pointer to the {@link NN_Tensor} instance. * @param size The returned size of tensor data. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -755,8 +788,8 @@ OH_NN_ReturnCode OH_NNTensor_GetSize(const NN_Tensor *tensor, size_t *size); * @param tensor Pointer to the {@link NN_Tensor} instance. * @param offset The returned offset of tensor data. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -767,7 +800,7 @@ OH_NN_ReturnCode OH_NNTensor_GetOffset(const NN_Tensor *tensor, size_t *offset); * * This method constructs a model inference executor associated with the device based on the passed compilation. \n * - * After the {@link OH_NNExecutor} instance is created, you can release the {@link OH_NNCompilation} + * After the {@link OH_NNExecutor} instance is created, you can release the {@link OH_NNCompilation} * instance if you do not need to create any other executors. \n * * @param compilation Pointer to the {@link OH_NNCompilation} instance. @@ -803,8 +836,8 @@ OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation); * in each dimension. * @param shapeLength Pointer to the uint32_t type. The number of output dimensions is returned. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -816,10 +849,11 @@ OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor, /** * @brief Destroys an executor instance to release the memory occupied by the executor. * - * This method needs to be called to release the executor instance created by calling {@link OH_NNExecutor_Construct}. Otherwise, - * the memory leak will occur. \n + * This method needs to be called to release the executor instance created by calling + * {@link OH_NNExecutor_Construct}. Otherwise, the memory leak will occur. \n * - * If executor or *executor is a null pointer, this method only prints warning logs and does not execute the release. \n + * If executor or *executor is a null pointer, + * this method only prints warning logs and does not execute the release. \n * * @param executor Double pointer to the {@link OH_NNExecutor} instance. * @since 9 @@ -830,13 +864,14 @@ void OH_NNExecutor_Destroy(OH_NNExecutor **executor); /** * @brief Gets the input tensor count. * - * You can get the input tensor count from the executor, and then create an input tensor descriptor with its index by + * You can get the input tensor count from the executor, and then create an input tensor descriptor with its index by * {@link OH_NNExecutor_CreateInputTensorDesc}. \n * * @param executor Pointer to the {@link OH_NNExecutor} instance. * @param inputCount Input tensor count returned. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -845,13 +880,14 @@ OH_NN_ReturnCode OH_NNExecutor_GetInputCount(const OH_NNExecutor *executor, size /** * @brief Gets the output tensor count. * - * You can get the output tensor count from the executor, and then create an output tensor descriptor with its index by + * You can get the output tensor count from the executor, and then create an output tensor descriptor with its index by * {@link OH_NNExecutor_CreateOutputTensorDesc}. \n * * @param executor Pointer to the {@link OH_NNExecutor} instance. * @param OutputCount Output tensor count returned. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -912,8 +948,8 @@ NN_TensorDesc *OH_NNExecutor_CreateOutputTensorDesc(const OH_NNExecutor *executo * @param maxInputDims Returned pointer to an array contains the maximum dimensions of the input tensor. * @param shapeLength Returned length of the shape of input tensor. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -930,22 +966,25 @@ OH_NN_ReturnCode OH_NNExecutor_GetInputDimRange(const OH_NNExecutor *executor, * * @param executor Pointer to the {@link OH_NNExecutor} instance. * @param onRunDone Callback function handle {@link NN_OnRunDone}. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ OH_NN_ReturnCode OH_NNExecutor_SetOnRunDone(OH_NNExecutor *executor, NN_OnRunDone onRunDone); /** - * @brief Sets the callback function handle for the post-process when the device driver service is dead during asynchronous execution. + * @brief Sets the callback function handle for the post-process when the + * device driver service is dead during asynchronous execution. * * The definition fo the callback function: {@link NN_OnServiceDied}. \n * * @param executor Pointer to the {@link OH_NNExecutor} instance. * @param onServiceDied Callback function handle {@link NN_OnServiceDied}. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -954,21 +993,23 @@ OH_NN_ReturnCode OH_NNExecutor_SetOnServiceDied(OH_NNExecutor *executor, NN_OnSe /** * @brief Synchronous execution of the model inference. * - * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} or - * {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by {@link OH_NNTensor_GetDataBuffer} must be filled. - * The executor will then yield out the results by inference execution and fill them into output tensors data for you to read. \n - * - * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, or you - * can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, and then read its real shape - * by {@link OH_NNTensorDesc_GetShape}. \n + * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} + * or {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by + * {@link OH_NNTensor_GetDataBuffer} must be filled. The executor will then yield out the results + * by inference execution and fill them into output tensors data for you to read. \n + * + * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, + * or you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, + * and then read its real shape by {@link OH_NNTensorDesc_GetShape}. \n * * @param executor Pointer to the {@link OH_NNExecutor} instance. * @param inputTensor An array of input tensors {@link NN_Tensor}. * @param inputCount Number of input tensors. * @param outputTensor An array of output tensors {@link NN_Tensor}. * @param outputCount Number of output tensors. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -981,14 +1022,14 @@ OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor, /** * @brief Asynchronous execution of the model inference. * - * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} or - * {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by {@link OH_NNTensor_GetDataBuffer} - * must be filled. The executor will yield out the results by inference execution and fill them into output tensors data - * for you to read.\n + * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} + * or {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by + * {@link OH_NNTensor_GetDataBuffer} must be filled. The executor will yield out the results by inference execution + * and fill them into output tensors data for you to read.\n * - * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, or - * you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, and then read its real - * shape by {@link OH_NNTensorDesc_GetShape}.\n + * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, + * or you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, + * and then read its real shape by {@link OH_NNTensorDesc_GetShape}.\n * * The method is non-blocked and will return immediately.\n * @@ -1012,8 +1053,8 @@ OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor, * @param timeout Time limit (millisecond) of the asynchronous execution, e.g. 1000. * @param userData Asynchronous execution identifier. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 11 * @version 1.0 */ @@ -1040,8 +1081,8 @@ OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor, * Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned. * @param deviceCount Pointer of the uint32_t type, which is used to return the length of *allDevicesID. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -1061,8 +1102,8 @@ OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32 * @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default. * @param name The device name returned. * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. - * If the operation fails, an error code is returned. For details about the error codes, - * see {@link OH_NN_ReturnCode}. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ @@ -1071,7 +1112,7 @@ OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name); /** * @brief Obtains the type information of the specified device. * - * deviceID specifies the device whose type will be obtained. If it is 0, the first device in the current device + * deviceID specifies the device whose type will be obtained. If it is 0, the first device in the current device * list will be used. Currently the following device types are supported: * - OH_NN_CPU: CPU device. * - OH_NN_GPU: GPU device. @@ -1080,8 +1121,9 @@ OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name); * * @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default. * @param deviceType The device type {@link OH_NN_DeviceType} returned. - * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. If the operation fails, - * an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}. + * @return Execution result of the function. If the operation is successful, OH_NN_SUCCESS is returned. + * If the operation fails, an error code is returned. + * For details about the error codes, see {@link OH_NN_ReturnCode}. * @since 9 * @version 1.0 */ -- Gitee From 74dac335eca3a64cc28c1363f7db43c1c9b01a57 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Wed, 3 Jan 2024 11:13:51 +0800 Subject: [PATCH 4/5] change 1.0 Signed-off-by: w30052974 --- frameworks/native/nn_tensor.cpp | 3 ++- frameworks/native/nncompiled_cache.cpp | 11 ++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/frameworks/native/nn_tensor.cpp b/frameworks/native/nn_tensor.cpp index 0b2f019..f010a06 100644 --- a/frameworks/native/nn_tensor.cpp +++ b/frameworks/native/nn_tensor.cpp @@ -188,7 +188,8 @@ OH_NN_ReturnCode NNTensor::BuildFromTensorDesc(const NN_TensorDesc* tensorDesc) } std::vector dimensions(shape, shape + shapeNum); - // OH_NNCore_TensorDesc does not include quant parameters and tensor type, should be set using indenpendent interface. + // OH_NNCore_TensorDesc does not include quant parameters and tensor type, + // should be setted by using indenpendent interface. returnCode = Build(dataType, dimensions, {}, OH_NN_TENSOR); if (returnCode != OH_NN_SUCCESS) { LOGE("BuildFromTensorDesc failed, error happened when building NNTensor."); diff --git a/frameworks/native/nncompiled_cache.cpp b/frameworks/native/nncompiled_cache.cpp index c06cd1c..92d703e 100644 --- a/frameworks/native/nncompiled_cache.cpp +++ b/frameworks/native/nncompiled_cache.cpp @@ -114,15 +114,15 @@ OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, return ret; } - if ((uint64_t)version > cacheInfo.version) { + if (static_cast(version) > cacheInfo.version) { LOGE("[NNCompiledCache] Restore failed, version is not match. The current version is %{public}u, " "but the cache files version is %{public}zu.", version, - (size_t)cacheInfo.version); + static_cast(cacheInfo.version)); return OH_NN_INVALID_PARAMETER; } - if ((uint64_t)version < cacheInfo.version) { + if (static_cast(version) < cacheInfo.version) { LOGE("[NNCompiledCache] Restore failed, the current version is lower than the cache files, " "please set a higher version."); return OH_NN_OPERATION_FORBIDDEN; @@ -275,7 +275,7 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache } int charNumber = NUMBER_CACHE_INFO_MEMBERS * sizeof(uint64_t); - if (!infoCacheFile.read((char*)&(modelCacheInfo), charNumber)) { + if (!infoCacheFile.read(reinterpret_cast(&(modelCacheInfo)), charNumber)) { LOGE("[NNCompiledCache] CheckCacheInfo failed, error happened when reading cache info file."); infoCacheFile.close(); return OH_NN_INVALID_FILE; @@ -297,7 +297,8 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache std::vector modelCheckSum; modelCheckSum.resize(modelCacheInfo.fileNumber); modelCacheInfo.modelCheckSum.resize(modelCacheInfo.fileNumber); - if (!infoCacheFile.read((char*)&modelCheckSum[0], modelCacheInfo.fileNumber * sizeof(uint64_t))) { + if (!infoCacheFile.read(reinterpret_cast(&modelCheckSum[0]), + modelCacheInfo.fileNumber * sizeof(uint64_t))) { LOGE("[NNCompiledCache] CheckCacheInfo failed. The info cache file has been changed."); infoCacheFile.close(); return OH_NN_INVALID_FILE; -- Gitee From 764649d56afe2c924448a1c898b6df71a44365d0 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Wed, 3 Jan 2024 11:37:11 +0800 Subject: [PATCH 5/5] change 1.0 Signed-off-by: w30052974 --- .../kits/c/neural_network_runtime/neural_network_core.h | 8 ++++---- .../c/neural_network_runtime/neural_network_runtime.h | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_core.h b/interfaces/kits/c/neural_network_runtime/neural_network_core.h index be687ed..659140a 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_core.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_core.h @@ -79,8 +79,8 @@ OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model); * and you have to choose only one of the three construction methods. \n * * Offline model is a type of model that is offline compiled by the model converter provided by a device vendor. - * So that the offline model can only be used on the specified device, but the - * compilation time of offline model is usually much less than {@link OH_NNModel}. \n + * So that the offline model can only be used on the specified device, + * but the compilation time of offline model is usually much less than {@link OH_NNModel}. \n * * You should perform the offline compilation during your development * and deploy the offline model in your app package. \n @@ -385,8 +385,8 @@ NN_TensorDesc *OH_NNTensorDesc_Create(); /** * @brief Releases an {@link NN_TensorDesc} instance. * - * When the {@link NN_TensorDesc} instance is no longer used, this method needs to be called to release it. Otherwise, - * the memory leak will occur. \n + * When the {@link NN_TensorDesc} instance is no longer used, this method needs to be called to release it. + * Otherwise, the memory leak will occur. \n * * If tensorDesc or *tensorDesc is a null pointer, * this method will return error code and does not execute the release. \n diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h index 674058b..0ec7f9c 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h @@ -96,7 +96,8 @@ OH_NN_ReturnCode OH_NNQuantParam_SetScales(NN_QuantParam *quantParams, const dou * @version 1.0 */ OH_NN_ReturnCode OH_NNQuantParam_SetZeroPoints(NN_QuantParam *quantParams, - const int32_t *zeroPoints, size_t quantCount); + const int32_t *zeroPoints, + size_t quantCount); /** * @brief Sets the number bits of the {@link NN_QuantParam} instance. -- Gitee