From e9eaf09f96de21d2685bd268090241d4351247f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E5=81=B6=E5=88=A9?= Date: Fri, 24 Jan 2025 15:48:38 +0800 Subject: [PATCH] fix review tips --- .../AclNNInvocation/src/op_runner.cpp | 1 + .../op_kernel/matmul_custom.cpp | 3 --- .../op_kernel/matmul_custom.cpp | 3 --- .../10_matmul_frameworklaunch/README.md | 2 +- .../MatmulInvocationNeo/data_utils.h | 4 ++-- .../MatmulInvocationNeo/main.cpp | 4 ++-- .../MatmulInvocationNeo/matmul_custom_tiling.cpp | 1 - .../AclNNInvocation/inc/op_runner.h | 8 ++++---- .../AclNNInvocation/inc/operator_desc.h | 2 +- .../AclNNInvocation/src/main.cpp | 12 ++++++------ .../AclNNInvocation/src/op_runner.cpp | 3 +-- .../op_kernel/matmul_leakyrelu_custom.cpp | 3 --- .../CppExtensions/matmul_leakyrelu_custom.cpp | 3 --- .../CppExtensions/matmul_leakyrelu_custom_tiling.cpp | 1 - 14 files changed, 18 insertions(+), 32 deletions(-) diff --git a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/AclNNInvocation/src/op_runner.cpp b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/AclNNInvocation/src/op_runner.cpp index d59580f64..b29ef33a7 100644 --- a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/AclNNInvocation/src/op_runner.cpp +++ b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/AclNNInvocation/src/op_runner.cpp @@ -326,6 +326,7 @@ bool OpRunner::RunOp() if (workspaceSize != 0) { if (aclrtMalloc(&workspace_, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) { ERROR_LOG("Malloc device memory failed"); + return false; } } diff --git a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomMultiCore/op_kernel/matmul_custom.cpp b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomMultiCore/op_kernel/matmul_custom.cpp index 8ecded753..52d667d7c 100644 --- a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomMultiCore/op_kernel/matmul_custom.cpp +++ b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomMultiCore/op_kernel/matmul_custom.cpp @@ -69,9 +69,6 @@ __aicore__ inline void MatmulKernel::Init(GM_ADDR bGlobal = bGlobal[offsetB]; cGlobal = cGlobal[offsetC]; biasGlobal = biasGlobal[offsetBias]; - if (GetSysWorkSpacePtr() == nullptr) { - return; - } } /** diff --git a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomSingleCore/op_kernel/matmul_custom.cpp b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomSingleCore/op_kernel/matmul_custom.cpp index fdde224c1..00bad4b3b 100644 --- a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomSingleCore/op_kernel/matmul_custom.cpp +++ b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/MatmulCustomSingleCore/op_kernel/matmul_custom.cpp @@ -69,9 +69,6 @@ __aicore__ inline void MatmulKernel::Init(GM_ADDR bGlobal = bGlobal[offsetB]; cGlobal = cGlobal[offsetC]; biasGlobal = biasGlobal[offsetBias]; - if (GetSysWorkSpacePtr() == nullptr) { - return; - } } /** diff --git a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/README.md b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/README.md index 9e7764b92..0b1685e96 100644 --- a/operator/ascendc/0_introduction/10_matmul_frameworklaunch/README.md +++ b/operator/ascendc/0_introduction/10_matmul_frameworklaunch/README.md @@ -54,7 +54,7 @@ C = A * B + Bias ``` CANN软件包中提供了工程创建工具msopgen,MatmulCustom算子工程可通过MatmulCustom.json自动创建,自定义算子工程具体请参考[Ascend C算子开发](https://hiascend.com/document/redirect/CannCommunityOpdevAscendC)>工程化算子开发>创建算子工程 章节。 -创建完自定义算子工程后,开发者重点需要完成算子host和kernel文件的功能开发。为简化样例运行流程,本样例已在MatmulCustomSingleCore和MatmulCustomMultiCore目录中准备好了必要的算子实现,install.sh脚本会创建一个CustomOp目录,并将算子实现文件复制到对应目录下,再编译算子。install.sh脚本会默认执行MatmulCustomSingleCore实现,即将MatmulCustomSingleCore算子实现文件复制到CustomOp下,对应脚本里执行命令:cp -rf MatmulCustomSingleCore/* CustomOp,如果想执行MatmulCustomMultiCore,执行命令替换为:cp -rf MatmulCustomMultiCore/* CustomOp。 +创建完自定义算子工程后,开发者需要重点完成算子host侧和kernel侧实现文件的功能开发。为简化样例运行流程,本样例已在MatmulCustomSingleCore和MatmulCustomMultiCore目录中准备好了必要的算子实现,install.sh脚本会创建一个CustomOp目录,并将算子实现文件复制到对应目录下,再编译算子。install.sh脚本会默认执行MatmulCustomSingleCore实现,即将MatmulCustomSingleCore算子实现文件复制到CustomOp下,对应脚本里执行命令:cp -rf MatmulCustomSingleCore/* CustomOp,如果想执行MatmulCustomMultiCore,执行命令替换为:cp -rf MatmulCustomMultiCore/* CustomOp。 备注:CustomOp目录为生成目录,每次执行install.sh脚本都会删除该目录并重新生成,切勿在该目录下编码算子,会存在丢失风险。 diff --git a/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/data_utils.h b/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/data_utils.h index b71d2aaf3..9ad791ed7 100644 --- a/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/data_utils.h +++ b/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/data_utils.h @@ -41,7 +41,7 @@ typedef enum { COMPLEX64 = 16, COMPLEX128 = 17, BF16 = 27 -} printDataType; +} PrintDataType; #define INFO_LOG(fmt, args...) fprintf(stdout, "[INFO] " fmt "\n", ##args) #define WARN_LOG(fmt, args...) fprintf(stdout, "[WARN] " fmt "\n", ##args) @@ -151,7 +151,7 @@ void DoPrintHalfData(const aclFloat16 *data, size_t count, size_t elementsPerRow } } -void PrintData(const void *data, size_t count, printDataType dataType, size_t elementsPerRow = 16) +void PrintData(const void *data, size_t count, PrintDataType dataType, size_t elementsPerRow = 16) { if (data == nullptr) { ERROR_LOG("Print data failed. data is nullptr"); diff --git a/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/main.cpp b/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/main.cpp index b34849def..2abc7b756 100644 --- a/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/main.cpp +++ b/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/main.cpp @@ -23,8 +23,8 @@ int32_t main(int32_t argc, char *argv[]) { const char *socVersion = SOC_VERSION; auto ascendcPlatform = platform_ascendc::PlatformAscendCManager::GetInstance(socVersion); - size_t aFileSize = 512 * 512 * sizeof(uint16_t); // uint16_t represent half - size_t bFileSize = 512 * 1024 * sizeof(uint16_t); // uint16_t represent half + size_t aFileSize = 512 * 512 * sizeof(uint16_t); // uint16_t represent float16 + size_t bFileSize = 512 * 1024 * sizeof(uint16_t); // uint16_t represent float16 size_t cFileSize = 512 * 1024 * sizeof(float); // matmul TCubeTiling + localMemorySize size_t tilingFileSize = sizeof(TCubeTiling) + sizeof(uint64_t); diff --git a/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/matmul_custom_tiling.cpp b/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/matmul_custom_tiling.cpp index 5b29e31be..2515314b2 100644 --- a/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/matmul_custom_tiling.cpp +++ b/operator/ascendc/0_introduction/11_matmul_kernellaunch/MatmulInvocationNeo/matmul_custom_tiling.cpp @@ -16,7 +16,6 @@ #include "tiling/tiling_api.h" #include "tiling/platform/platform_ascendc.h" using namespace matmul_tiling; -using namespace std; /** * @brief Generate matmul tiling. diff --git a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/op_runner.h b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/op_runner.h index f1b3a6706..cdd48d713 100644 --- a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/op_runner.h +++ b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/op_runner.h @@ -160,9 +160,9 @@ public: bool RunOp(); private: - size_t numInputs_; - size_t numOutputs_; - void *workspace_; + size_t numInputs_ = 0; + size_t numOutputs_ = 0; + void *workspace_ = nullptr; std::vector inputBuffers_; std::vector outputBuffers_; @@ -175,7 +175,7 @@ private: std::vector inputTensor_; std::vector outputTensor_; - OperatorDesc *opDesc_; + OperatorDesc *opDesc_ = nullptr;; }; #endif // OP_RUNNER_H diff --git a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/operator_desc.h b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/operator_desc.h index 97a39f187..2be1f63b6 100644 --- a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/operator_desc.h +++ b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/inc/operator_desc.h @@ -22,7 +22,7 @@ struct OperatorDesc { /** * Constructor */ - explicit OperatorDesc(); + OperatorDesc(); /** * Destructor diff --git a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/main.cpp b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/main.cpp index ba5198a77..36accde43 100644 --- a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/main.cpp +++ b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/main.cpp @@ -19,7 +19,7 @@ #include "op_runner.h" bool g_isDevice = false; -int deviceId = 0; +static constexpr int DEVICE_ID = 0; OperatorDesc CreateOpDesc() { @@ -61,8 +61,8 @@ bool ProcessOutputData(OpRunner &runner) void DestroyResource() { bool flag = false; - if (aclrtResetDevice(deviceId) != ACL_SUCCESS) { - ERROR_LOG("Reset device %d failed", deviceId); + if (aclrtResetDevice(DEVICE_ID) != ACL_SUCCESS) { + ERROR_LOG("Reset device %d failed", DEVICE_ID); flag = true; } INFO_LOG("Reset Device success"); @@ -95,12 +95,12 @@ bool InitResource() return false; } - if (aclrtSetDevice(deviceId) != ACL_SUCCESS) { - ERROR_LOG("Set device failed. deviceId is %d", deviceId); + if (aclrtSetDevice(DEVICE_ID) != ACL_SUCCESS) { + ERROR_LOG("Set device failed. deviceId is %d", DEVICE_ID); (void)aclFinalize(); return false; } - INFO_LOG("Set device[%d] success", deviceId); + INFO_LOG("Set device[%d] success", DEVICE_ID); // runMode is ACL_HOST which represents app is running in host // runMode is ACL_DEVICE which represents app is running in device diff --git a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/op_runner.cpp b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/op_runner.cpp index 39f2c441b..bd95b4f42 100644 --- a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/op_runner.cpp +++ b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/AclNNInvocation/src/op_runner.cpp @@ -16,8 +16,6 @@ #include "aclnn_matmul_leakyrelu_custom.h" #include "common.h" -using namespace std; - extern bool g_isDevice; OpRunner::OpRunner(OperatorDesc *opDesc) : opDesc_(opDesc) @@ -326,6 +324,7 @@ bool OpRunner::RunOp() if (workspaceSize != 0) { if (aclrtMalloc(&workspace_, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) { ERROR_LOG("Malloc device memory failed"); + return false; } } diff --git a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/MatmulLeakyReluCustom/op_kernel/matmul_leakyrelu_custom.cpp b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/MatmulLeakyReluCustom/op_kernel/matmul_leakyrelu_custom.cpp index bee005b90..7c969179b 100644 --- a/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/MatmulLeakyReluCustom/op_kernel/matmul_leakyrelu_custom.cpp +++ b/operator/ascendc/0_introduction/12_matmulleakyrelu_frameworklaunch/MatmulLeakyReluCustom/op_kernel/matmul_leakyrelu_custom.cpp @@ -78,9 +78,6 @@ MatmulLeakyKernel::Init(GM_ADDR a, GM_ADDR b, GM_ cGlobal = cGlobal[offsetC]; biasGlobal = biasGlobal[offsetBias]; pipe->InitBuffer(reluOutQueue_, 1, tiling.baseM * tiling.baseN * sizeof(cType)); // Init output buffer. - if (GetSysWorkSpacePtr() == nullptr) { - return; - } } /** diff --git a/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom.cpp b/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom.cpp index d061bcff3..739fd0ba1 100644 --- a/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom.cpp +++ b/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom.cpp @@ -65,9 +65,6 @@ __aicore__ inline void MatmulLeakyKernel::Init(GM cGlobal = cGlobal[offsetC]; biasGlobal = biasGlobal[offsetBias]; pipe->InitBuffer(reluOutQueue_, 1, tiling.baseM * tiling.baseN * sizeof(cType)); - if (GetSysWorkSpacePtr() == nullptr) { - return; - } } template diff --git a/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom_tiling.cpp b/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom_tiling.cpp index d0b8b19a3..f666facbd 100644 --- a/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom_tiling.cpp +++ b/operator/ascendc/0_introduction/13_matmulleakyrelu_kernellaunch/CppExtensions/matmul_leakyrelu_custom_tiling.cpp @@ -15,7 +15,6 @@ #include "tiling/tiling_api.h" using namespace matmul_tiling; -using namespace std; uint8_t *GetTilingBuf(optiling::TCubeTiling *tilingData) { -- Gitee