diff --git a/debug/accuracy_tools/MANIFEST.in b/debug/accuracy_tools/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..21075694b9b36d38ea5d5fea29e00bfbd6d9e538 --- /dev/null +++ b/debug/accuracy_tools/MANIFEST.in @@ -0,0 +1,8 @@ +recursive-include ptdbg_ascend/src/python/ptdbg_ascend/ *.py +recursive-include ptdbg_ascend/src/python/ptdbg_ascend/ *.yaml +recursive-include ptdbg_ascend/src/python/ptdbg_ascend/ *.template +recursive-include api_accuracy_checker/ *.py +recursive-include api_accuracy_checker/ *.yaml +recursive-include api_accuracy_checker/ *.json +recursive-include atat/ * +recursive-exclude api_accuracy_checker/test * diff --git a/debug/accuracy_tools/README_POC.md b/debug/accuracy_tools/README_POC.md new file mode 100644 index 0000000000000000000000000000000000000000..00b4818832ca26bf49c7144df2ed706ce4e2c150 --- /dev/null +++ b/debug/accuracy_tools/README_POC.md @@ -0,0 +1,69 @@ +# 精度工具 + +本手册主要介绍精度预检工具和ptdbg_ascend精度工具合一软件包的安装和工具命令行使用指导。 + +## 工具安装 + +精度工具合一软件包名称:`ascend_training_accuracy_tools-{version}-py3-none-any.whl` + +1. whl包获取。 + + 请通过下表链接下载ptdbg_ascend精度工具whl包。 + + | 版本 | 发布日期 | 支持PyTorch版本 | 下载链接 | 校验码 | + | ----- | ---------- | --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | + | 0.0.1 | 2024-03-15 | 1.11.0/2.0/2.1 | [ascend_training_accuracy_tools-0.0.1-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/att/0.0/ascend_training_accuracy_tools-0.0.1-py3-none-any.whl) | 5801510d4e827e4859bc9a5aca021e4d30c2ea42d60a4c8ad0c2baab1b7782c9 | + +2. whl包校验。 + + 1. 根据以上下载链接下载whl包到Linux安装环境。 + + 2. 进入whl包所在目录,执行如下命令。 + + ```bash + sha256sum {name}.whl + ``` + + {name}为whl包名称。 + + 若回显呈现对应版本whl包一致的**校验码**,则表示下载了正确的ptdbg_ascend精度工具whl安装包。示例如下: + + ```bash + sha256sum ascend_training_accuracy_tools-0.0.1-py3-none-any.whl + 5801510d4e827e4859bc9a5aca021e4d30c2ea42d60a4c8ad0c2baab1b7782c9 *ascend_training_accuracy_tools-0.0.1-py3-none-any.whl + ``` + +3. 执行如下命令进行安装。 + + ```bash + pip3 install ./ascend_training_accuracy_tools-{version}-py3-none-any.whl + ``` + + 若为覆盖安装,请在命令行末尾增加“--force-reinstall”参数强制安装,例如: + + ```bash + pip3 install ./ascend_training_accuracy_tools-{version}-py3-none-any.whl --force-reinstall + ``` + + 提示如下信息则表示安装成功。 + + ```bash + Successfully installed ascend_training_accuracy_tools-{version} + ``` + + +## 工具使用 + +安装精度工具合一软件包后,精度工具支持使用命令行启动各种功能(除ptdbg_ascend工具的dump和精度比对操作)。命令格式如下: + +```bash +atat [-h] parse run_ut multi_run_ut benchmark_compare run_overflow_check +``` + +| 参数 | 说明 | +| ------------------ | ------------------------------------------------------------ | +| parse | ptdbg_ascend.parse数据解析功能入口,执行atat parse命令后进入parse交互式界面,更多参数请参见《[ptdbg_ascend精度工具功能说明](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/ptdbg_ascend/doc)》的“ptdbg_ascend.parse数据解析功能”。 | +| run_ut | 预检工具run_ut功能,可以通过atat run_ut命令执行精度预检操作,更多参数请参见《[Ascend模型精度预检工具](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/api_accuracy_checker)》的“执行预检”。 | +| multi_run_ut | 预检工具multi_run_ut功能,可以通过atat multi_run_ut命令执行多线程预检操作,更多参数请参见《[Ascend模型精度预检工具](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/api_accuracy_checker)》的“multi_run_ut多线程预检”。 | +| benchmark_compare | 预检工具预检结果比对功能,可以通过atat benchmark_compare命令执行预检结果比对操作,更多参数请参见《[Ascend模型精度预检工具](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/api_accuracy_checker)》的“multi_run_ut多线程预检”。 | +| run_overflow_check | 溢出解析工具,可以通过atat run_overflow_check命令执行溢出API解析操作,更多参数请参见《[Ascend模型精度预检工具](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/api_accuracy_checker)》的“溢出解析工具”。 | \ No newline at end of file diff --git a/debug/accuracy_tools/__init__.py b/debug/accuracy_tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..caef4efb9f20e49f9f7abf3831e4a7738fcaf9ef --- /dev/null +++ b/debug/accuracy_tools/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2024, Huawei Technologies Co., Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/debug/accuracy_tools/api_accuracy_checker/README.md b/debug/accuracy_tools/api_accuracy_checker/README.md index 9903f0a874e38632a12d88d8cc0014ee2b1206a7..7738501db87b1cacbc9eb96687bf09aed3a5ed68 100644 --- a/debug/accuracy_tools/api_accuracy_checker/README.md +++ b/debug/accuracy_tools/api_accuracy_checker/README.md @@ -1,39 +1,39 @@ # Ascend模型精度预检工具 -Ascend模型精度预检工具能在昇腾NPU上扫描用户训练模型中所有API,输出精度情况的诊断和分析。工具会提取模型中所有的API前反向信息,构造相应的API单元测试,将NPU输出与标杆(CPU高精度)比对,从而检测出精度有问题的API;另外工具还可以通过标杆比对法,从而确认NPU和GPU各自运行时的精度哪一方更接近标杆(CPU高精度)。 +Ascend模型精度预检工具能在昇腾NPU上扫描用户训练模型中所有API,输出精度情况的诊断和分析。工具会提取模型中所有的API前反向信息,构造相应的API单元测试,将NPU输出与标杆(CPU高精度)比对,从而检测出精度有问题的API;另外工具还可以通过新精度标准比对法,从而确认NPU和GPU各自运行时的精度哪一方更接近标杆(CPU高精度)。 -**标杆比对法**:真实数据模式下,将NPU vs CPU高精度(标杆)的预检比对结果和GPU vs CPU高精度(标杆)的预检比对结果进行比对汇总,最终给出判定结果的精度预检方法。当前仅支持torch.float16、torch.bfloat16、torch.float32数据类型的API进行比对。 +**新精度标准比对法**:依据新精度标准,对不同的API采取不同的比对算法进行比对(包括绝对阈值法,标杆比对法和二进制一致法),最终给定预检判定结果。 **真实数据模式**:精度预检工具支持随机生成模式和真实数据模式,即在预检dump时可以选择由工具构造随机数进行输入获得dump数据或选择获取真实输入数据进行预检dump操作;随机生成模式执行效率高,可以快速获得结果,但数据精度低,只能大致判断精度问题;真实数据模式执行效率略低于随机生成模式,但是数据精度高,可以准确判断精度问题。 -工具支持PyTorch版本:1.8.1/1.11.0/2.0/2.1。 +工具支持PyTorch版本:1.11.0/2.0/2.1。 ## 工具特性 1. 落盘数据小。 -2. 不依赖标杆侧GPU训练资源,本地即可完成预检(标杆比对法除外)。 +2. 不依赖标杆侧GPU训练资源,本地即可完成预检(新精度标准比对法除外)。 3. 支持随机生成模式和真实数据模式。 4. 单API测试,排除整网中的累计误差问题。 ## 预检流程 -精度预检可以分为:标准模式(直接进行NPU vs CPU高精度的预检比对操作)和标杆比对法(将NPU vs CPU高精度的预检比对结果和GPU vs CPU高精度的预检比对结果进行比对汇总),两种模式操作流程如下。 +精度预检可以分为:标准模式(直接进行NPU vs CPU高精度的预检比对操作)和新精度标准比对法(将NPU vs CPU高精度的预检比对结果和GPU vs CPU高精度的预检比对结果进行比对汇总),两种模式操作流程如下。 ### 标准模式 1. 在NPU环境下安装预检工具。详见“**工具安装**”。 -2. 在NPU环境下dump预检数据。详见“**预检数据dump**”。 -3. NPU环境下执行run_ut。详见“**执行预检**”。 +2. 在NPU环境下dump预检数据。详见“**dump预检数据**”。 +3. NPU环境下执行run_ut。详见“**run_ut预检操作**”。 4. 查看“**预检结果**”。 -### 标杆比对法 +### 新精度标准比对法 1. 在NPU和GPU环境下分别安装预检工具。详见“**工具安装**”。 -2. 在NPU环境下dump预检数据(使用msCheckerConfig.update_config开启真实数据模式)。详见“**预检数据dump**”。 +2. 在NPU环境下dump预检数据(使用msCheckerConfig.update_config开启真实数据模式)。详见“**dump预检数据**”。 3. 将NPU环境下dump的预检数据拷贝至GPU环境。 -4. 在NPU和GPU环境下分别执行run_ut。详见“**执行预检**”。 +4. 在NPU和GPU环境下分别执行run_ut。详见“**run_ut预检操作**”。 5. 将NPU和GPU执行run_ut生成的`accuracy_checking_details_{timestamp}.csv`结果文件拷贝至同一环境下。 -6. 运行benchmark_compare.py。详见“**预检结果比对**”。 +6. 运行api_precision_compare.py。详见“**预检结果比对**”。 ## 工具安装 @@ -51,38 +51,16 @@ Ascend模型精度预检工具能在昇腾NPU上扫描用户训练模型中所 ## 预检操作 -### 预检数据dump - -在训练脚本(如main.py)中加入以下代码导入工具dump模块,启动训练即可自动抓取网络所有API信息。 - -- 如果训练脚本是通过torch.utils.data.dataloader方式加载数据,就可以在训练脚本中加入以下代码导入工具dump模块,启动训练即可自动抓取网络所有API信息。 - - ```python - import api_accuracy_checker.dump - ``` - - 工具默认抓取训练的**第二个迭代**并且在第二个迭代后会报错退出训练进程,可通过target_iter参数配置。 - - **报错信息如下,这个报错仅用于停止训练,属于正常现象**: +### dump预检数据 - ```bash - Exception: Model pretest: exit after iteration 1. - ``` +#### dump操作 - 若报错信息不一致,可能是由于服务器的其他错误信息覆盖导致,可以尝试查找报错信息中的Exception。 +在训练脚本(如main.py)中加入以下代码导入工具dump模块,启动训练即可自动抓取网络所有API信息。 - 若训练脚本中的代码不是通过torch.utils.data.dataloader来加载数据或在部分流水并行、张量并行场景下,工具的开关无法在每张卡上自动打开,导致多卡训练dump结果只有一组json,那么需要在训练代码中添加打开工具开关的调用。 - 首先,需要关闭torch.utils.data.dataloader加载数据,操作如下: - - ```bash - cd att/debug/accuracy_tools/api_accuracy_checker - vi config.yaml - # 修改enable_dataloader参数值为False - ``` - - 其次,在训练代码中添加数据dump操作如下: - + 在训练代码中添加数据dump操作如下: + ```Python import api_accuracy_checker.dump as DP @@ -95,9 +73,34 @@ Ascend模型精度预检工具能在昇腾NPU上扫描用户训练模型中所 DP.dump.stop() # 控制dump结束 DP.dump.step() # 在DP.dump.stop()后加入DP.dump.step()即可指定需要dump的step ``` - - 上述代码要添加在迭代前向的代码段中,或者说是遍历数据集循环的代码段中。如对于GPT-3可以添加在pretrain_gpt.py 的forward_step函数中。之后工具会适配这个场景开关的自动打开。 - + + 上述代码要添加在迭代内,如对于[ModelLink](https://gitee.com/ascend/ModelLink)的LLAMA2-7B可以添加在training.py中train函数的iteration循环内。之后工具会适配这个场景开关的自动打开。 + +- 如果训练脚本是通过torch.utils.data.dataloader方式加载数据。 + + 首先,需要开启torch.utils.data.dataloader加载数据,操作如下: + + ```bash + cd att/debug/accuracy_tools/api_accuracy_checker + vi config.yaml + # 修改enable_dataloader参数值为True + ``` + + 其次,在训练脚本中加入以下代码导入工具dump模块,启动训练即可自动抓取网络所有API信息。 + + ```python + import api_accuracy_checker.dump + ``` + + 工具默认抓取训练的**第二个迭代**并且在第二个迭代后会报错退出训练进程,可通过target_iter参数配置。 + + **报错信息如下,这个报错仅用于停止训练,属于正常现象**: + + ```bash + Exception: Model pretest: exit after iteration 1. + ``` + + 若报错信息不一致,可能是由于服务器的其他错误信息覆盖导致,可以尝试查找报错信息中的Exception。 dump信息默认会存盘到“./step1”路径下(相对于启动训练的路径),包括: @@ -107,49 +110,40 @@ dump信息默认会存盘到“./step1”路径下(相对于启动训练的路 forward_info与stack_info中的key值一一对应,用户可根据forward_info中API的key在stack_info中查询到其调用栈及代码行位置。 -若有需要,用户可以通过msCheckerConfig.update_config来配置dump路径以及开启**真实数据模式**、指定dump某个step或配置API dump白名单,详见“**msCheckerConfig.update_config**”。 +若有需要,用户可以通过msCheckerConfig.update_config来配置dump路径以及开启**真实数据模式**、指定dump某个step或配置**API dump白名单**,详见“**msCheckerConfig.update_config**”。 -### 执行预检 +#### 真实数据模式 -1. 将API信息输入给run_ut模块运行精度检测并比对,运行如下命令: - - 大模型场景下,为提高预检效率,请使用multi_run_ut工具进行多线程预检,详见“**multi_run_ut多线程预检**”。 +预检工具默认为随机数据模式,如果想要完全复刻整网的API运行情况,可以使用真实数据模式,添加以下代码即可: - ```bash - cd $ATT_HOME/debug/accuracy_tools/api_accuracy_checker/run_ut - python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json - ``` - - 某些场景下(如推理),可以不指定backward_info_0.json,不影响预检功能。 +```python +from api_accuracy_checker.dump import msCheckerConfig +msCheckerConfig.update_config(real_data=True) +``` - | 参数名称 | 说明 | 是否必选 | - | -------------------------------- | ------------------------------------------------------------ | ---------------------------------- | - | -forward或--forward_input_file | 指定前向API信息文件forward_info_{pid}.json。 | 是 | - | -backward或--backward_input_file | 指定反向API信息文件backward_info_{pid}.json。 | 否 | - | -save_error_data | 保存精度未达标的API输入输出数据。 | 否 | - | -o或--out_path | 指指定run_ut执行结果存盘路径,默认“./”(相对于run_ut的路径)。 | 否 | - | -j或--jit_compile | 开启jit编译。 | 否 | - | -d或--device | 指定Device ID,选择UT代码运行所在的卡,默认值为0。 | 否 | - | -csv_path或--result_csv_path | 指定本次运行中断时生成的`accuracy_checking_result_{timestamp}.csv`文件路径,执行run_ut中断时,若想从中断处继续执行,配置此参数即可。需要指定为上次中断的`accuracy_checking_result_{timestamp}.csv`文件。详见“**断点续检**”。 | run_ut操作中断后继续执行场景下必选 | - | -real_data_path | 指定run_ut操作的真实数据路径。真实数据dump模式通过**msCheckerConfig.update_config**接口的real_data参数开启。指定绝对路径为forward_real_data和backward_real_data目录的父目录。 | dump的数据为真实数据下必选 | +#### API dump白名单 - run_ut执行结果包括`accuracy_checking_result_{timestamp}.csv`和`accuracy_checking_details_{timestamp}.csv`两个文件。`accuracy_checking_result_{timestamp}.csv`是API粒度的,标明每个API是否通过测试。建议用户先查看`accuracy_checking_result_{timestamp}.csv`文件,对于其中没有通过测试的或者特定感兴趣的API,根据其API name字段在`accuracy_checking_details_{timestamp}.csv`中查询其各个输出的达标情况以及比较指标。详细介绍请参见“**预检结果**”。 +精度预检工具可以对指定API进行预检操作,可以在dump时的训练脚本中直接添加白名单参数,只dump指定的API数据,示例代码如下: -2. (可选)如果需要保存比对不达标的输入和输出数据,可以在run_ut执行命令结尾添加-save_error_data,例如: +```python +from api_accuracy_checker.dump import msCheckerConfig +msCheckerConfig.update_config(white_list=["conv1d", "conv2d"]) +``` - ```bash - python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json -save_error_data - ``` +配置的API名称须存在于[support_wrap_ops.yaml](./hook_module/support_wrap_ops.yaml)文件下。 - 数据默认会存盘到'./ut_error_data{timestamp}'路径下(相对于启动run_ut的路径),有需要的话,用户可以通过修改att/debug/accuracy_tools/api_accuracy_checker目录下,config.yaml文件的error_data_path参数来配置保存路径。 +#### 工具支持的API列表 -3. (可选)如果dump的数据为真实数据,那么需要指定真实数据路径,例如: +预检工具维护固定的API支持列表,若需要删除或增加dump的API,可以在[support_wrap_ops.yaml](./hook_module/support_wrap_ops.yaml)文件内手动修改,如下示例: - ```bash - python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json -real_data_path /home/xxx/ut/real_data - ``` +```bash +functional: # functional为算子类别,找到对应的类别,在该类别下按照下列格式删除或添加API + - conv1d + - conv2d + - conv3d +``` -### msCheckerConfig.update_config +#### msCheckerConfig.update_config **功能说明** @@ -160,61 +154,70 @@ forward_info与stack_info中的key值一一对应,用户可根据forward_info **函数原型** ```python -msCheckerConfig.update_config(dump_path="./", real_data=False, target_iter=[1], white_list=[]) +msCheckerConfig.update_config(dump_path="./", real_data=False, target_iter=[1], white_list=[], enable_dataloader=False) ``` **参数说明** -| 参数名称 | 说明 | 是否必选 | -| ----------- | ------------------------------------------------------------ | -------- | -| dump_path | 设置dump路径,须为已存在目录,默认为当前目录。 | 否 | -| real_data | 真实数据模式,可取值True或False,默认为False,表示随机数据模式,配置为True后开启真实数据模式,dump信息增加forward_real_data和backward_real_data目录,目录下保存每个API输入的具体数值。 | 否 | -| target_iter | 指定dump某个step的数据,默认为[1],须指定为训练脚本中存在的step。target_iter为list格式,可配置逐个step,例如:target_iter=[0,1,2];也可以配置step范围,例如:target_iter=list(range(0,9)),表示dump第0到第8个step。 | 否 | -| white_list | API dump白名单,指定dump具体API数据,也可以直接配置预检的API白名单,详细请参见“**API预检白名单**”。参数示例:white_list=["conv1d", "conv2d"]。默认未配置白名单,即dump全量API数据。 | 否 | +| 参数名称 | 说明 | 是否必选 | +| ----------------- | ------------------------------------------------------------ | -------- | +| dump_path | 设置dump路径,默认为当前目录。若指定目录不存在,则自动创建。 | 否 | +| real_data | 真实数据模式,可取值True或False,默认为False,表示随机数据模式,配置为True后开启真实数据模式,dump信息增加forward_real_data和backward_real_data目录,目录下保存每个API输入的具体数值。 | 否 | +| target_iter | 指定dump某个step的数据,默认为[1],须指定为训练脚本中存在的step。target_iter为list格式,可配置逐个step,例如:target_iter=[0,1,2];也可以配置step范围,例如:target_iter=list(range(0,9)),表示dump第0到第8个step。 | 否 | +| white_list | API dump白名单,指定dump具体API数据,也可以直接配置预检的API白名单,详细请参见“**API预检白名单**”。参数示例:white_list=["conv1d", "conv2d"]。默认未配置白名单,即dump全量API数据。 | 否 | +| enable_dataloader | 自动dump数据开关,可取值True(开启)、False(关闭),默认关闭。 | 否 | -**函数示例** +### run_ut预检操作 -- 示例1:配置dump路径以及开启真实数据模式 +完成“dump预检数据”后,仅仅获取了API的输入数据,为了得到NPU vs CPU高精度(标杆)的预检比对结果和GPU vs CPU高精度(标杆)的预检比对结果,还需要进行run_ut操作。 - ```python - from api_accuracy_checker.dump import msCheckerConfig - msCheckerConfig.update_config(dump_path="my/dump/path", real_data=True) - ``` +run_ut预检操作包括如下场景: -- 示例2:指定dump某个step +- 使用run_ut.py执行预检:run_ut.py适用于数据量较小的单卡场景。 +- 使用multi_run_ut.py执行多线程预检:multi_run_ut.py适用于数据量较大的大模型场景。 - ```python - from api_accuracy_checker.dump import msCheckerConfig - msCheckerConfig.update_config(target_iter=[0,1,2]) - ``` +#### 使用run_ut.py执行预检 -### API预检白名单 +1. 将API信息输入给run_ut模块运行精度检测并比对,运行如下命令: -精度预检工具可以对指定API进行预检操作,可以使用如下方式: + ```bash + cd $ATT_HOME/debug/accuracy_tools/api_accuracy_checker/run_ut + python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json + ``` -- 方式一: + 某些场景下(如推理),可以不指定backward_info_0.json,不影响预检功能。 - 修改att/debug/accuracy_tools/api_accuracy_checker目录下config.yaml文件的white_list参数,配置需要预检的API名称。 + | 参数名称 | 说明 | 是否必选 | + | -------------------------------- | ------------------------------------------------------------ | ---------------------------------- | + | -forward或--forward_input_file | 指定前向API信息文件forward_info_{pid}.json。 | 是 | + | -backward或--backward_input_file | 指定反向API信息文件backward_info_{pid}.json。 | 否 | + | -save_error_data | 保存精度未达标的API输入输出数据。 | 否 | + | -o或--out_path | 指定run_ut执行结果存盘路径,默认“./”(相对于run_ut的路径)。 | 否 | + | -j或--jit_compile | 开启jit编译。 | 否 | + | -d或--device | 指定Device ID,选择UT代码运行所在的卡,默认值为0。 | 否 | + | -csv_path或--result_csv_path | 指定本次运行中断时生成的`accuracy_checking_result_{timestamp}.csv`文件路径,执行run_ut中断时,若想从中断处继续执行,配置此参数即可。需要指定为上次中断的`accuracy_checking_result_{timestamp}.csv`文件。详见“**断点续检**”。 | run_ut操作中断后继续执行场景下必选 | + | -real_data_path | 指定run_ut操作的真实数据路径。真实数据dump模式通过**msCheckerConfig.update_config**接口的real_data参数开启。指定绝对路径为forward_real_data和backward_real_data目录的父目录。 | dump的数据为真实数据下必选 | + | -f或--filter_api | 过滤模型中除最大值和最小值以外其他参数和结构相同的API。适用于模型较大且重复API较多的场景。 | 否 | -- 方式二: + run_ut执行结果包括`accuracy_checking_result_{timestamp}.csv`和`accuracy_checking_details_{timestamp}.csv`两个文件。`accuracy_checking_result_{timestamp}.csv`是API粒度的,标明每个API是否通过测试。建议用户先查看`accuracy_checking_result_{timestamp}.csv`文件,对于其中没有通过测试的或者特定感兴趣的API,根据其API name字段在`accuracy_checking_details_{timestamp}.csv`中查询其各个输出的达标情况以及比较指标。详细介绍请参见“**预检结果**”。 - 在dump时的训练脚本中直接添加白名单参数,只dump指定的API数据,示例代码如下: +2. (可选)如果需要保存比对不达标的输入和输出数据,可以在run_ut执行命令结尾添加-save_error_data,例如: - ```python - from api_accuracy_checker.dump import msCheckerConfig - msCheckerConfig.update_config(white_list=["conv1d", "conv2d"]) - ``` + ```bash + python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json -save_error_data + ``` -说明: + 数据默认会存盘到'./ut_error_data{timestamp}'路径下(相对于启动run_ut的路径),有需要的话,用户可以通过修改att/debug/accuracy_tools/api_accuracy_checker目录下,config.yaml文件的error_data_path参数来配置保存路径,详见“config.yaml文件说明”。。 -- 配置的API名称须存在于att\debug\accuracy_tools\api_accuracy_checker\hook_module目录下的support_wrap_ops.yaml文件下。 -- 方式一和方式二都可以在dump时设置并控制dump对应的API,默认情况下没有配置白名单,dump所有API数据,若在dump操作时没有配置白名单,那么可以在执行run_ut模块前使用方式一配置白名单。 +3. (可选)如果dump的数据为真实数据,那么需要指定真实数据路径,例如: -### multi_run_ut多线程预检 + ```bash + python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json -real_data_path /home/xxx/ut/real_data + ``` -大模型场景下,为提高预检效率,请使用本进行多线程预检。 +#### 使用multi_run_ut.py执行多线程预检 -multi_run_ut为multi_run_ut.py脚本,可以并行执行多个run_ut操作,从而降低预检耗时。 +multi_run_ut.py脚本,可以并行执行多个run_ut操作,从而降低预检耗时。 命令示例如下: @@ -236,8 +239,9 @@ python multi_run_ut.py -forward ./forward_info_0.json -backward ./backward_info_ | -d或--device | 指定Device ID,选择UT代码运行所在的卡,默认值为0,支持同时指定0~7,共8个Device。 | 否 | | -csv_path或--result_csv_path | 指定本次运行中断时生成的`accuracy_checking_result_{timestamp}.csv`文件路径,执行run_ut中断时,若想从中断处继续执行,配置此参数即可。需要指定为上次中断的`accuracy_checking_result_{timestamp}.csv`文件。详见“**断点续检**”。 | run_ut操作中断后继续执行场景下必选 | | -real_data_path | 指定run_ut操作的真实数据路径。真实数据dump模式通过**msCheckerConfig.update_config**接口的real_data参数开启。指定绝对路径为forward_real_data和backward_real_data目录的父目录。 | dump的数据为真实数据下必选 | +| -f或--filter_api | 过滤模型中除最大值和最小值以外其他参数和结构相同的API。适用于模型较大且重复API较多的场景。 | 否 | -### 断点续检 +#### 断点续检 精度预检run_ut过程中,若因环境、数据量过大等原因导致预检进程中断,那么当用户解决这些问题后,重新执行run_ut操作,可以通过断点续检操作继续前面未完成的预检,会在-csv_path指定的`accuracy_checking_result_{timestamp}.csv`文件以及对应的`accuracy_checking_details_{timestamp}.csv`文件中继续写入后续的结果,不会重新创建结果文件。 @@ -249,6 +253,29 @@ python multi_run_ut.py -forward ./forward_info_0.json -backward ./backward_info_ python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json -csv_path /home/xxx/ut/accuracy_checking_result_{timestamp}.csv ``` +#### API预检白名单 + +run_ut过程同样支持API预检白名单,操作方式如下: + +修改att/debug/accuracy_tools/api_accuracy_checker目录下config.yaml文件的white_list参数,配置需要预检的API名称,详见“config.yaml文件说明”。 + +### config.yaml文件说明 + +config.yaml文件可以通过配置参数来控制dump和run_ut操作的真实数据模式以及白名单等功能。 + +文件路径为:att/debug/accuracy_tools/api_accuracy_checker/config.yaml + +| 参数名称 | 说明 | 是否必选 | +| ----------------- | ------------------------------------------------------------ | -------- | +| dump_path | 设置dump路径,默认为当前目录。若指定目录不存在,则自动创建。 | 否 | +| real_data | 真实数据模式,可取值True或False,默认为False,表示随机数据模式,配置为True后开启真实数据模式,dump信息增加forward_real_data和backward_real_data目录,目录下保存每个API输入的具体数值。 | 否 | +| enable_dataloader | 自动dump数据开关,可取值True(开启)、False(关闭),默认关闭。 | 否 | +| target_iter | 指定dump某个step的数据,默认为[1],须指定为训练脚本中存在的step。target_iter为list格式,可配置逐个step,例如:target_iter=[0,1,2];也可以配置step范围,例如:target_iter=list(range(0,9)),表示dump第0到第8个step。 | 否 | +| white_list | API dump白名单,指定dump具体API数据,也可以直接配置预检的API白名单,详细请参见“**API预检白名单**”。参数示例:white_list=["conv1d", "conv2d"]。默认未配置白名单,即dump全量API数据。 | 否 | +| error_data_path | 配置保存精度未达标的API输入输出数据路径。 | 否 | +| jit_compile | 开启jit编译。 | 否 | +| precision | 浮点数表示位数,默认取小数点后14位。 | 否 | + ## 预检结果 精度预检生成的`accuracy_checking_result_{timestamp}.csv`和`accuracy_checking_details_{timestamp}.csv`文件示例如下: @@ -262,8 +289,8 @@ python run_ut.py -forward ./forward_info_0.json -backward ./backward_info_0.json | 字段 | 含义 | | --------------------- | ------------------------------------------------------------ | | API name | API名称。 | -| Forward Test Success | 前向API是否通过测试,TRUE为通过,FALSE为不通过。 | -| Backward Test Success | 反向API是否通过测试,TRUE为通过,FALSE为不通过,N/A表示该API没有反向。 | +| Forward Test Success | 前向API是否通过测试,pass为通过,warning为待观察,error为错误。 | +| Backward Test Success | 反向API是否通过测试,pass为通过,warning为待观察,error为错误,如果是空白的话代表该API没有反向输出。 | | Message | 提示信息。 | Forward Test Success和Backward Test Success是否通过测试是由`accuracy_checking_details_{timestamp}.csv`中的余弦相似度、最大绝对误差、双百双千双万指标判定结果决定的。 @@ -274,25 +301,28 @@ Forward Test Success和Backward Test Success是否通过测试是由`accuracy_ch ![f07237b1_12631423](img/accuracy_checking_details.png) -| 字段 | 含义 | -| -------------- | ------------------------------------------------------------ | -| API name | NPU或GPU下的API名称。 | -| Bench Dtype | 标杆数据的API数据类型。 | -| Device Dtype | NPU或GPU数据的API数据类型。 | -| Shape | API的Shape信息。 | -| 余弦相似度 | NPU或GPU数据与标杆数据的余弦相似度。 | -| 最大绝对误差 | NPU或GPU数据与标杆数据的最大绝对误差。 | -| 双百指标 | 双百精度指标。是指NPU或GPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差小于百分之一的个数占总元素个数的比例。测试通过标准为相对误差大于百分之一的个数占总元素个数的比例小于百分之一。 | -| 双千指标 | 双千精度指标。是指NPU或GPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差小于千分之一的个数占总元素个数的比例。测试通过标准为相对误差大于千分之一的个数占总元素个数的比例小于千分之一。 | -| 双万指标 | 双万精度指标。是指NPU或GPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差小于万分之一的个数占总元素个数的比例。测试通过标准为相对误差大于万分之一的个数占总元素个数的比例小于万分之一。 | -| 错误率 | NPU或GPU数据中每个Tensor精度不一致的数值的数量与Tensor中数值数量的比值。只有数据是builtin类型(bool、int、float、str)、torch.bool和torch的int类型才会展示。 | -| 误差均衡性 | NPU或GPU数据与标杆数据精度差的的上下浮动情况。 | -| 均方根误差 | NPU或GPU数据与标杆数据的均方根误差。 | -| 小值域错误占比 | NPU或GPU Tensor中与标杆的绝对误差大于错误阈值的小值在小值域(小值的总数量)中的占比。判断为小值以及绝对误差的错误阈值见“**小值域阈值**”。 | -| 相对误差最大值 | NPU或GPU数据与标杆数据相对误差的最大值。 | -| 相对误差平均值 | NPU或GPU数据与标杆数据相对误差的平均值。 | -| Status | API预检通过状态,pass表示通过测试,error表示未通过,warning表示测试未通过双千或双万精度指标。 | -| message | 提示信息。 | +| 字段 | 含义 | +| ---------------- | ------------------------------------------------------------ | +| API name | NPU或GPU下的API名称。 | +| Bench Dtype | 标杆数据的API数据类型。 | +| Device Dtype | NPU或GPU数据的API数据类型。 | +| Shape | API的Shape信息。 | +| 余弦相似度 | NPU或GPU数据与标杆数据的余弦相似度。 | +| 最大绝对误差 | NPU或GPU数据与标杆数据的最大绝对误差。 | +| 双百指标 | 双百精度指标。是指NPU或GPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差小于百分之一的个数占总元素个数的比例。测试通过标准为相对误差大于百分之一的个数占总元素个数的比例小于百分之一。 | +| 双千指标 | 双千精度指标。是指NPU或GPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差小于千分之一的个数占总元素个数的比例。测试通过标准为相对误差大于千分之一的个数占总元素个数的比例小于千分之一。 | +| 双万指标 | 双万精度指标。是指NPU或GPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差小于万分之一的个数占总元素个数的比例。测试通过标准为相对误差大于万分之一的个数占总元素个数的比例小于万分之一。 | +| 二进制一致错误率 | NPU或GPU数据中每个Tensor精度不一致的数值的数量与Tensor中数值数量的比值。只有数据是builtin类型(bool、int、float、str)、torch.bool和torch的int类型才会展示。 | +| 误差均衡性 | NPU或GPU数据与标杆数据精度差的上下浮动情况。 | +| 均方根误差 | NPU或GPU数据与标杆数据的均方根误差。 | +| 小值域错误占比 | NPU或GPU Tensor中与标杆的绝对误差大于错误阈值的小值在小值域(小值的总数量)中的占比。判断为小值以及绝对误差的错误阈值见“**小值域阈值**”。 | +| 相对误差最大值 | NPU或GPU数据与标杆数据相对误差的最大值。 | +| 相对误差平均值 | NPU或GPU数据与标杆数据相对误差的平均值。 | +| inf/nan错误率 | NPU与标杆inf/nan计算不一致的元素个数占总元素的个数比例。 | +| 相对误差错误率 | NPU与标杆的正常值计算相对误差,其大于错误阈值的元素个数占正常值元素个数的比例。 | +| 绝对误差错误率 | NPU与标杆的小值计算绝对误差,其大于错误阈值的元素个数占小值元素个数的比例。 | +| Status | API预检通过状态,pass表示通过测试,error表示未通过,warning表示测试未通过双千或双万精度指标,SKIP表示该API的某个参数的反向不要计算梯度,所以没有任何计算过程,其他信息均为空。 | +| message | 提示信息。 | ### 小值域阈值 @@ -324,53 +354,64 @@ API预检通过测试,则在`accuracy_checking_details_{timestamp}.csv`文件 ## 预检结果比对 -该步骤仅标杆比对法需要执行,需要同时获取NPU和GPU环境下run_ut操作的预检结果`accuracy_checking_details_{timestamp}.csv`文件。执行如下命令进行NPU和GPU预检结果的比对: +该步骤仅新精度标准比对法需要执行,需要同时获取NPU和GPU环境下run_ut操作的预检结果`accuracy_checking_details_{timestamp}.csv`文件。执行如下命令进行NPU和GPU预检结果的比对: ```bash cd $ATT_HOME/debug/accuracy_tools/api_accuracy_checker/compare -python benchmark_compare.py -npu /home/xxx/npu/accuracy_checking_details_{timestamp}.csv -gpu /home/xxx/gpu/accuracy_checking_details_{timestamp}.csv -o /home/xxx/ +python api_precision_compare.py -npu /home/xxx/npu/accuracy_checking_details_{timestamp}.csv -gpu /home/xxx/gpu/accuracy_checking_details_{timestamp}.csv -o /home/xxx/ ``` | 参数名称 | 说明 | 是否必选 | | -------------------- | ------------------------------------------------------------ | -------- | | -npu或--npu_csv_path | NPU预检结果`accuracy_checking_details_{timestamp}.csv`文件路径。默认从当前目录下识别该文件。 | 否 | | -gpu或--gpu_csv_path | GPU预检结果`accuracy_checking_details_{timestamp}.csv`文件路径。默认从当前目录下识别该文件。 | 否 | -| -o或--out_path | 指定benchmark_compare.py执行结果存盘路径,默认为当前目录。 | 否 | +| -o或--out_path | 指定api_precision_compare.py执行结果存盘路径,默认为当前目录。 | 否 | -执行完成后输出`benchmark_compare_result_{timestamp}.csv`和`benchmark_compare_details_{timestamp}.csv`文件。文件示例如下: +执行完成后输出`api_precision_compare_result_{timestamp}.csv`和`api_precision_compare_details_{timestamp}.csv`文件。文件示例如下: -可以通过先查看`benchmark_compare_result_{timestamp}.csv`文件的Forward Test Success和Backward Test Success,判断是否存在未通过测试的API,再查看`benchmark_compare_details_{timestamp}.csv`文件的API详细达标情况。 +可以通过先查看`api_precision_compare_result_{timestamp}.csv`文件的Forward Test Success和Backward Test Success,判断是否存在未通过测试的API,再查看`api_precision_compare_details_{timestamp}.csv`文件的API详细达标情况。 -`benchmark_compare_result_{timestamp}.csv` +`api_precision_compare_result_{timestamp}.csv` -![891a3bd8_12631423](img/benchmark_compare_result.png) +![api_precision_compare_result](img/api_precision_compare_result.png) | 字段 | 含义 | | --------------------- | ------------------------------------------------------------ | | API name | API名称。 | -| Forward Test Success | 前向API是否通过测试,TRUE为通过,FALSE为不通过。 | -| Backward Test Success | 反向API是否通过测试,TRUE为通过,FALSE为不通过,N/A表示该API没有反向。 | +| Forward Test Success | 前向API是否通过测试,pass为通过,warning为待观察,error为错误,skip表示该API的数据类型不支持使用新精度标准进行比对,如float64。 | +| Backward Test Success | 反向API是否通过测试,pass为通过,warning为待观察,error为错误,如果是空白的话代表该API没有反向输出,skip表示该API的数据类型不支持使用新精度标准进行比对,如float64。 | | Message | 提示信息。 | -Forward Test Success和Backward Test Success是否通过测试是由`benchmark_compare_details_{timestamp}.csv`中的各个指标判定结果决定的。需要注意的是`benchmark_compare_details_{timestamp}.csv`中可能存在一个API的前向(反向)有多个输出,那么每个输出记录一行,而在`benchmark_compare_result_{timestamp}.csv`中的结果需要该API的所有结果均为pass才能标记为TRUE,否则标记FALSE或WARING。 - -`benchmark_compare_details_{timestamp}.csv` - -![f07237b1_12631423](img/benchmark_compare_details.png) - -| 字段 | 含义 | -| ---------------------- | ------------------------------------------------------------ | -| API name | NPU或GPU下的API名称。 | -| 小值域错误比值 | NPU与CPU的小值域的错误比率/GPU与CPU的小值域的错误比率。 | -| 小值域错误判定结果 | 小值域错误比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。 | -| 均方根误差比值 | NPU与CPU的均方根误差/GPU与CPU的均方根误差。 | -| 均方根误差判定结果 | 均方根误差比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。 | -| 相对误差最大值比值 | NPU与CPU的相对误差最大值/GPU与CPU的相对误差最大值。 | -| 相对误差最大值判定结果 | 相对误差最大值比值小于等于1标记为pass,1~10之间标记为waring,大于10标记为error。 | -| 相对误差平均值比值 | NPU与CPU的相对误差的平均值/GPU与CPU的相对误差的平均值。 | -| 相对误差平均值判定结果 | 相对误差平均值比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。 | -| 误差均衡性比值 | NPU与CPU的误差均衡性/GPU与CPU的误差均衡性。 | -| 误差均衡性判定结果 | 误差均衡性比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。该字段暂不参与benchmark_compare_result的结果判定。 | +Forward Test Success和Backward Test Success是否通过测试是由`api_precision_compare_details_{timestamp}.csv`中的各个指标判定结果决定的。需要注意的是`api_precision_compare_details_{timestamp}.csv`中可能存在一个API的前向(反向)有多个输出,那么每个输出记录一行,而在`api_precision_compare_result_{timestamp}.csv`中的结果需要该API的所有结果均为pass才能标记为TRUE,否则标记FALSE或WARING。 + +`api_precision_compare_details_{timestamp}.csv` + +![api_precision_compare_details](img/api_precision_compare_details.png) + +| 字段 | 含义 | +| ------------------------ | ------------------------------------------------------------ | +| API name | NPU或GPU下的API名称。 | +| 小值域错误比值 | NPU与CPU的小值域的错误比率/GPU与CPU的小值域的错误比率。 | +| 小值域错误判定结果 | 小值域错误比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。 | +| 均方根误差比值 | NPU与CPU的均方根误差/GPU与CPU的均方根误差。 | +| 均方根误差判定结果 | 均方根误差比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。 | +| 相对误差最大值比值 | NPU与CPU的相对误差最大值/GPU与CPU的相对误差最大值。 | +| 相对误差最大值判定结果 | 相对误差最大值比值小于等于1标记为pass,1~10之间标记为waring,大于10标记为error。 | +| 相对误差平均值比值 | NPU与CPU的相对误差的平均值/GPU与CPU的相对误差的平均值。 | +| 相对误差平均值判定结果 | 相对误差平均值比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。 | +| 误差均衡性比值 | NPU与CPU的误差均衡性/GPU与CPU的误差均衡性。 | +| 误差均衡性判定结果 | 误差均衡性比值小于等于1标记为pass,1~2之间标记为waring,大于2标记为error。该字段暂不参与api_precision_compare_result的结果判定。 | +| inf/nan错误率 | NPU与标杆inf/nan计算不一致的元素个数占总元素的个数比例。 | +| inf/nan判定结果 | inf/nan错误率判定结果,等于0标记为pass,其余情况标记为error。 | +| 相对误差错误率 | NPU与标杆的正常值计算相对误差,其大于错误阈值的元素个数占正常值元素个数的比例。 | +| 相对误差判定结果 | 相对误差错误率判定结果,等于0标记为pass,其余情况标记为error。 | +| 绝对误差错误率 | NPU与标杆的小值计算绝对误差,其大于错误阈值的元素个数占小值元素个数的比例。 | +| 绝对误差判定结果 | 绝对误差错误率判定结果,等于0标记为pass,其余情况标记为error。 | +| 二进制一致错误率 | NPU或GPU数据中每个Tensor精度不一致的数值的数量与Tensor中数值数量的比值。只有数据是builtin类型(bool、int、float、str)、torch.bool和torch的int类型或者在新精度标准中使用二进制一致算法进行比对的API才会展示。 | +| 二进制一致错误率判定结果 | 二进制一致错误率判定结果,等于0标记为pass,其余情况标记为error。 | +| 比对结果 | 综合所有指标的最终结果。如果比对指标中有error,则标记为error;有warning,则标记为warning;否则标记为pass。 | +| 比对算法 | API使用的比对算法,为标杆比对法、二进制一致法和绝对阈值法中的一种。 | +| Message | 提示信息。当前提示该API比对结果为error或warning时对应不符合标准的指标。 | # 溢出解析工具 @@ -412,7 +453,7 @@ Forward Test Success和Backward Test Success是否通过测试是由`benchmark_c 反向过程溢出的API暂不支持该功能。 -具体参数解释请参见“Ascend模型精度预检工具”。 +具体参数解释请参见“**Ascend模型精度预检工具”**。 # FAQ @@ -436,7 +477,19 @@ Forward Test Success和Backward Test Success是否通过测试是由`benchmark_c 答:对于fp16的数据,CPU会上升一个精度fp32去计算,这是和算子那边对齐的精度结论,CPU用更高精度去计算会更接近真实值。 -6. Tensor 魔法函数具体对应什么操作? +6. 添加预检工具后截取操作报错:`IndexError: too many indices for tensor of dimension x` 或 `TypeError: len() of a 0-d tensor`。 + + 答:注释工具目录api_accuracy_checker/hook_module/support_wrap_ops.yaml文件中Tensor:下的`- __getitem__`,工具会跳过dump该API。如果是需要dump的关键位置API也可以考虑根据报错堆栈信息注释引发报错的类型检查。 + +7. 添加预检工具后F.gelu触发ValueError报错:`activation_func must be F.gelu`等。 + + 答:注释工具目录api_accuracy_checker/hook_module/support_wrap_ops.yaml文件中functional:下的的`- gelu`,工具会跳过dump该API。如果是需要dump的关键位置API也可以考虑根据报错堆栈信息注释引发报错的类型检查。 + +8. 添加预检工具后触发AsStrided算子相关的报错,或者编译相关的报错,如:`Failed to compile Op [AsStrided]`。 + + 答:注释工具目录api_accuracy_checker/hook_module/support_wrap_ops.yaml文件中Tensor:下的`- t`和`- transpose`。 + +9. Tensor 魔法函数具体对应什么操作? 答: diff --git a/debug/accuracy_tools/api_accuracy_checker/common/config.py b/debug/accuracy_tools/api_accuracy_checker/common/config.py index 55b25d25c6d3b0b673596fc22358e573a4e889d0..57f59b0785c5cbb2b3e41903ce7350c414fadf27 100644 --- a/debug/accuracy_tools/api_accuracy_checker/common/config.py +++ b/debug/accuracy_tools/api_accuracy_checker/common/config.py @@ -56,12 +56,13 @@ class Config: def __str__(self): return '\n'.join(f"{key}={value}" for key, value in self.config.items()) - def update_config(self, dump_path=None, real_data=False, target_iter=None, white_list=None): + def update_config(self, dump_path=None, real_data=None, target_iter=None, white_list=None, enable_dataloader=None): args = { "dump_path": dump_path if dump_path else self.config.get("dump_path", './'), - "real_data": real_data, + "real_data": real_data if real_data else self.config.get("real_data", False), "target_iter": target_iter if target_iter else self.config.get("target_iter", [1]), - "white_list": white_list if white_list else self.config.get("white_list", []) + "white_list": white_list if white_list else self.config.get("white_list", []), + "enable_dataloader": enable_dataloader if enable_dataloader else self.config.get("enable_dataloader", False) } for key, value in args.items(): if key in self.config: diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py b/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py index 394ea9cf0e49158355dfce62c58f65fd1a3722b5..9dd204f5bc9a625170f6f5957cd7d9c3179ee694 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py @@ -10,7 +10,7 @@ def cosine_sim(bench_output, device_output): msg = "" n_value = device_output.reshape(-1) b_value = bench_output.reshape(-1) - cos = CompareConst.NA + cos = CompareConst.SPACE np.seterr(divide="ignore", invalid="ignore") if n_value.shape != b_value.shape: msg = f"Shape of device and bench outputs don't match. device: {n_value.shape}, bench: {b_value.shape}." @@ -21,13 +21,14 @@ def cosine_sim(bench_output, device_output): n_value_max = np.max(np.abs(n_value)) b_value_max = np.max(np.abs(b_value)) if n_value_max <= np.finfo(float).eps and b_value_max <= np.finfo(float).eps: + msg = "All the data in device and bench outputs are zero." return cos, True, msg elif n_value_max <= np.finfo(float).eps: msg = "All the data is zero in device dump data." - return CompareConst.NA, False, msg + return CompareConst.SPACE, False, msg elif b_value_max <= np.finfo(float).eps: msg = "All the data is zero in bench dump data." - return CompareConst.NA, False, msg + return CompareConst.SPACE, False, msg else: n_value = n_value.astype(float) / n_value_max b_value = b_value.astype(float) / b_value_max @@ -119,3 +120,71 @@ def get_small_value_mask(abs_bench, both_finite_mask, small_value_threshold): small_value_mask = np.less_equal(abs_bench, small_value_threshold) small_value_mask = np.logical_and(small_value_mask, both_finite_mask) return small_value_mask + + +def get_abs_bench_with_eps(bench, dtype): + abs_bench = np.abs(bench) + eps = np.finfo(bench.dtype).eps if dtype != torch.bfloat16 else CompareConst.BFLOAT16_EPS + abs_bench_with_eps = abs_bench + eps + return abs_bench, abs_bench_with_eps + + +def check_inf_nan_value(inf_nan_mask, bench_output, device_output, dtype, rtol): + ''' + 新精度标准的绝对阈值法中,检查npu和golden输出的inf、nan是否一致 + 输入: + inf_nan_mask:npu输出和golden输出的inf、nan的mask + bench_output:golden输出 + device_output:npu输出 + dtype:npu输出的dtype + 输出: + inf_nan_err_ratio:npu输出和golden输出的inf、nan不一致的比例 + ''' + abs_gpu, abs_gpu_with_eps = get_abs_bench_with_eps(bench_output, dtype) + golden_same_dtype = bench_output.astype(device_output.dtype) + a_min = np.finfo(device_output.dtype).min if dtype != torch.bfloat16 else CompareConst.BFLOAT16_MIN + a_max = np.finfo(device_output.dtype).max if dtype != torch.bfloat16 else CompareConst.BFLOAT16_MAX + golden_clip = np.clip(golden_same_dtype, a_min, a_max) + npu_clip = np.clip(device_output, a_min, a_max) + clipped_abs_ae = np.abs(npu_clip - golden_clip) + clipped_re = clipped_abs_ae / abs_gpu_with_eps + pass_mask = np.less_equal(clipped_re, rtol) + both_nan_mask = np.logical_and(np.isnan(device_output), np.isnan(golden_clip)) + pass_mask = np.logical_or(pass_mask, both_nan_mask) + not_pass_mask = np.logical_not(pass_mask) + not_pass_mask = np.logical_and(not_pass_mask, inf_nan_mask) + + inf_nan_err_cnt = np.sum(not_pass_mask) + return 0 if np.sum(inf_nan_mask) == 0 else inf_nan_err_cnt / np.sum(inf_nan_mask) + + +def check_small_value(abs_err, small_value_mask, small_value_atol): + ''' + 新精度标准的相对阈值法中,检查npu和golden小值域输出的相对误差是否满足阈值 + 输入: + rel_err:npu输出和golden输出的相对误差 + normal_value_mask:npu输出和golden输出的正常值mask + rtol:相对误差的阈值 + 输出: + rel_err_ratio:npu输出和golden输出的相对误差不满足阈值的比例 + ''' + greater_mask = np.greater(abs_err, small_value_atol) + err_mask = np.logical_and(greater_mask, small_value_mask) + err_cnt = np.sum(err_mask) + return 0 if np.sum(small_value_mask) == 0 else err_cnt / np.sum(small_value_mask) + + +def check_norm_value(normal_value_mask, rel_err, rtol): + ''' + 新精度标准的绝对阈值法中,检查npu和golden正常值输出的绝对误差是否满足阈值 + 输入: + abs_err:npu输出和golden输出的绝对误差 + normal_value_mask:npu输出和golden输出的正常值mask + atol:绝对误差的阈值 + 输出: + abs_err_ratio:npu输出和golden输出的绝对误差不满足阈值的比例 + ''' + err_mask = np.greater(rel_err, rtol) + err_mask = np.logical_and(err_mask, normal_value_mask) + err_cnt = np.sum(err_mask) + return 0 if np.sum(normal_value_mask) == 0 else err_cnt / np.sum(normal_value_mask) diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/benchmark_compare.py b/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_compare.py similarity index 43% rename from debug/accuracy_tools/api_accuracy_checker/compare/benchmark_compare.py rename to debug/accuracy_tools/api_accuracy_checker/compare/api_precision_compare.py index 9357c6c2918daa90ebb5b1c0a87f7d973a09eea0..8c98130ab61d27479817bc9c8c7eed0cc4f38361 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/benchmark_compare.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_compare.py @@ -9,9 +9,10 @@ import pandas as pd from api_accuracy_checker.common.utils import print_info_log, print_warn_log, print_error_log, write_csv, \ CompareException, create_directory from api_accuracy_checker.common.config import msCheckerConfig -from api_accuracy_checker.compare.compare_utils import CompareConst, BENCHMARK_COMPARE_RESULT_FILE_NAME, \ -BENCHMARK_COMPARE_DETAILS_FILE_NAME, result_mapping, Benchmark_Compare_Support_List, Benchmark_Compare_Unsupport_List, \ - BenchmarkCompareColumn +from api_accuracy_checker.compare.compare_utils import CompareConst, API_PRECISION_COMPARE_RESULT_FILE_NAME, \ +API_PRECISION_COMPARE_DETAILS_FILE_NAME, BENCHMARK_COMPARE_SUPPORT_LIST, API_PRECISION_COMPARE_UNSUPPORT_LIST, \ + ApiPrecisionCompareColumn, AbsoluteStandardApi, BinaryStandardApi, BINARY_COMPARE_UNSUPPORT_LIST, convert_str_to_float +from api_accuracy_checker.compare.compare_column import ApiPrecisionOutputColumn from api_accuracy_checker.run_ut.run_ut import get_validated_result_csv_path from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileCheckConst, FileChecker, change_mode from ptdbg_ascend.src.python.ptdbg_ascend.common.utils import check_path_before_create @@ -45,6 +46,26 @@ benchmark_algorithms_thresholds = { } +benchmark_message = { + "small_value_err_status": { + CompareConst.ERROR: "ERROR: 小值域错误比值超过阈值\n", + CompareConst.WARNING: "WARNING: 小值域错误比值超过阈值\n" + }, + "rmse_status": { + CompareConst.ERROR: "ERROR: 均方根误差比值超过阈值\n", + CompareConst.WARNING: "WARNING: 均方根误差比值超过阈值\n" + }, + "max_rel_err_status": { + CompareConst.ERROR: "ERROR: 相对误差最大值比值超过阈值\n", + CompareConst.WARNING: "WARNING: 相对误差最大值比值超过阈值\n" + }, + "mean_rel_err_status": { + CompareConst.ERROR: "ERROR: 相对误差平均值比值超过阈值\n", + CompareConst.WARNING: "WARNING: 相对误差平均值比值超过阈值\n" + } +} + + class BenchmarkStandard: def __init__(self, api_name, npu_precision, gpu_precision): self.api_name = api_name @@ -84,19 +105,19 @@ class BenchmarkStandard: def _compare_ratio(self): self.small_value_err_ratio = self._calc_ratio( - self.npu_precision.get(BenchmarkCompareColumn.SMALL_VALUE_ERROR_RATE), - self.gpu_precision.get(BenchmarkCompareColumn.SMALL_VALUE_ERROR_RATE)) - self.rmse_ratio = self._calc_ratio(self.npu_precision.get(BenchmarkCompareColumn.RMSE), - self.gpu_precision.get(BenchmarkCompareColumn.RMSE), 10000.0) - self.max_rel_err_ratio = self._calc_ratio(self.npu_precision.get(BenchmarkCompareColumn.MAX_REL_ERR), - self.gpu_precision.get(BenchmarkCompareColumn.MAX_REL_ERR), 10000.0) - self.mean_rel_err_ratio = self._calc_ratio(self.npu_precision.get(BenchmarkCompareColumn.MEAN_REL_ERR), - self.gpu_precision.get(BenchmarkCompareColumn.MEAN_REL_ERR)) - self.eb_ratio = self._calc_ratio(self.npu_precision.get(BenchmarkCompareColumn.EB), - self.gpu_precision.get(BenchmarkCompareColumn.EB)) + self.npu_precision.get(ApiPrecisionCompareColumn.SMALL_VALUE_ERROR_RATE), + self.gpu_precision.get(ApiPrecisionCompareColumn.SMALL_VALUE_ERROR_RATE)) + self.rmse_ratio = self._calc_ratio(self.npu_precision.get(ApiPrecisionCompareColumn.RMSE), + self.gpu_precision.get(ApiPrecisionCompareColumn.RMSE), 10000.0) + self.max_rel_err_ratio = self._calc_ratio(self.npu_precision.get(ApiPrecisionCompareColumn.MAX_REL_ERR), + self.gpu_precision.get(ApiPrecisionCompareColumn.MAX_REL_ERR), 10000.0) + self.mean_rel_err_ratio = self._calc_ratio(self.npu_precision.get(ApiPrecisionCompareColumn.MEAN_REL_ERR), + self.gpu_precision.get(ApiPrecisionCompareColumn.MEAN_REL_ERR)) + self.eb_ratio = self._calc_ratio(self.npu_precision.get(ApiPrecisionCompareColumn.EB), + self.gpu_precision.get(ApiPrecisionCompareColumn.EB)) def to_column_value(self): - return [self.api_name, self.small_value_err_ratio, self.small_value_err_status, self.rmse_ratio, + return [self.small_value_err_ratio, self.small_value_err_status, self.rmse_ratio, self.rmse_status, self.max_rel_err_ratio, self.max_rel_err_status, self.mean_rel_err_ratio, self.mean_rel_err_status, self.eb_ratio, self.eb_status] @@ -112,6 +133,7 @@ class BenchmarkStandard: @staticmethod def _calc_ratio(x, y, default_value=1.0): + x, y = convert_str_to_float(x), convert_str_to_float(y) if math.isclose(y, 0.0): return 1.0 if math.isclose(x, 0.0) else default_value else: @@ -126,8 +148,10 @@ def write_detail_csv(content, save_path): write_csv(rows, save_path) -def benchmark_compare(config): - print_info_log("start benchmark compare task") +def api_precision_compare(config): + print_info_log("Start compare task") + print_info_log(f"Compare task result will be saved in {config.result_csv_path}") + print_info_log(f"Compare task detail will be saved in {config.details_csv_path}") try: npu_data = pd.read_csv(config.npu_csv_path) except Exception as err: @@ -138,8 +162,8 @@ def benchmark_compare(config): except Exception as err: print_error_log(f"Open gpu csv Error: %s" % str(err)) check_csv_columns(gpu_data.columns, "gpu_csv") - detail_csv_title = [BenchmarkCompareColumn.get_detail_csv_title()] - result_csv_title = [BenchmarkCompareColumn.get_result_csv_title()] + detail_csv_title = [ApiPrecisionCompareColumn.get_detail_csv_title()] + result_csv_title = [ApiPrecisionCompareColumn.get_result_csv_title()] write_csv(result_csv_title, config.result_csv_path) write_csv(detail_csv_title, config.details_csv_path) try: @@ -151,93 +175,185 @@ def benchmark_compare(config): def analyse_csv(npu_data, gpu_data, config): - forward_status, backward_status = CompareConst.NA, CompareConst.NA - last_api_name = None - last_api_dtype = None + forward_status, backward_status = [], [] + last_api_name, last_api_dtype = None, None for _, row_npu in npu_data.iterrows(): message = '' - part_api_name = row_npu[BenchmarkCompareColumn.API_NAME] - row_gpu = gpu_data[gpu_data[BenchmarkCompareColumn.API_NAME] == part_api_name] - api_name, direction_status, _, _ = part_api_name.split(".") - binary_consistency_check = False + compare_column = ApiPrecisionOutputColumn() + full_api_name_with_direction_status = row_npu[ApiPrecisionCompareColumn.API_NAME] + row_gpu = gpu_data[gpu_data[ApiPrecisionCompareColumn.API_NAME] == full_api_name_with_direction_status] + full_api_name, direction_status, _, _ = full_api_name_with_direction_status.split(".") if row_gpu.empty: - print_warn_log(f'This API : {part_api_name} does not exist in the GPU data.') + print_warn_log(f'This API : {full_api_name_with_direction_status} does not exist in the GPU data.') continue if len(row_gpu) > 1: - msg = f'This API : {part_api_name} has multiple records in the GPU data.' + msg = f'This API : {full_api_name_with_direction_status} has multiple records in the GPU data.' raise CompareException(CompareException.INVALID_DATA_ERROR, msg) row_gpu = row_gpu.iloc[0] - if row_npu[BenchmarkCompareColumn.DEVICE_DTYPE] in Benchmark_Compare_Support_List: - bs = BenchmarkStandard(part_api_name, row_npu, row_gpu) - bs.get_result() - write_detail_csv(bs.to_column_value(), config.details_csv_path) - else: - binary_consistency_check = True - - if last_api_name is not None and api_name != last_api_name: - if last_api_dtype in Benchmark_Compare_Unsupport_List: + #当前API的输出为空(例如反向过程中requires_grad=False),跳过比对 + if row_npu[ApiPrecisionCompareColumn.DEVICE_DTYPE].isspace(): + continue + _, api_name, _ = full_api_name.split("*") + new_status = CompareConst.SPACE + compare_column.api_name = full_api_name_with_direction_status + if row_npu[ApiPrecisionCompareColumn.DEVICE_DTYPE] not in BINARY_COMPARE_UNSUPPORT_LIST or api_name in BinaryStandardApi: + new_status = record_binary_consistency_result(compare_column, row_npu) + elif api_name in AbsoluteStandardApi: + new_status = record_absolute_threshold_result(compare_column, row_npu) + elif row_npu[ApiPrecisionCompareColumn.DEVICE_DTYPE] in BENCHMARK_COMPARE_SUPPORT_LIST: + bs = BenchmarkStandard(full_api_name_with_direction_status, row_npu, row_gpu) + new_status = record_benchmark_compare_result(compare_column, bs) + write_detail_csv(compare_column.to_column_value(), config.details_csv_path) + + if last_api_name is not None and full_api_name != last_api_name: + if last_api_dtype in API_PRECISION_COMPARE_UNSUPPORT_LIST: message = unsupported_message write_csv([[last_api_name, "skip", "skip", message]], config.result_csv_path) - forward_status, backward_status = CompareConst.NA, CompareConst.NA + forward_status, backward_status = [], [] message = '' else: - write_csv([[last_api_name, forward_status, backward_status, message]], config.result_csv_path) - forward_status, backward_status = CompareConst.NA, CompareConst.NA + forward_result = get_api_checker_result(forward_status) + backward_result = get_api_checker_result(backward_status) + write_csv([[last_api_name, forward_result, backward_result, message]], config.result_csv_path) + forward_status, backward_status = [], [] message = '' - is_supported = row_npu[BenchmarkCompareColumn.DEVICE_DTYPE] not in Benchmark_Compare_Unsupport_List - last_api_name = api_name - if pd.isna(row_npu[BenchmarkCompareColumn.DEVICE_DTYPE]): - continue - last_api_dtype = row_npu[BenchmarkCompareColumn.DEVICE_DTYPE] + is_supported = row_npu[ApiPrecisionCompareColumn.DEVICE_DTYPE] not in API_PRECISION_COMPARE_UNSUPPORT_LIST + last_api_name = full_api_name + last_api_dtype = row_npu[ApiPrecisionCompareColumn.DEVICE_DTYPE] if not is_supported: continue - - if binary_consistency_check: - new_status = check_error_rate(row_npu[BenchmarkCompareColumn.ERROR_RATE], - row_gpu[BenchmarkCompareColumn.ERROR_RATE]) - else: - new_status = result_mapping.get(bs.final_result) - + if direction_status == 'forward': - forward_status = update_status(forward_status, new_status) + forward_status.append(new_status) elif direction_status == 'backward': - backward_status = update_status(backward_status, new_status) + backward_status.append(new_status) else: print_error_log(f"Invalid direction status: {direction_status}") if last_api_name is not None: - if last_api_dtype in Benchmark_Compare_Unsupport_List: + if last_api_dtype in API_PRECISION_COMPARE_UNSUPPORT_LIST: message = unsupported_message write_csv([[last_api_name, "skip", "skip", message]], config.result_csv_path) else: - write_csv([[last_api_name, forward_status, backward_status, message]], config.result_csv_path) + forward_result = get_api_checker_result(forward_status) + backward_result = get_api_checker_result(backward_status) + write_csv([[last_api_name, forward_result, backward_result, message]], config.result_csv_path) + + +def check_error_rate(npu_error_rate): + return CompareConst.PASS if convert_str_to_float(npu_error_rate) == 0 else CompareConst.ERROR -def check_error_rate(npu_error_rate, gpu_error_rate): - return npu_error_rate == 0 and gpu_error_rate == 0 +def get_absolute_threshold_result(row_npu): + inf_nan_error_ratio = convert_str_to_float(row_npu[ApiPrecisionCompareColumn.INF_NAN_ERROR_RATIO]) + rel_err_ratio = convert_str_to_float(row_npu[ApiPrecisionCompareColumn.REL_ERR_RATIO]) + abs_err_ratio = convert_str_to_float(row_npu[ApiPrecisionCompareColumn.ABS_ERR_RATIO]) + inf_nan_result = CompareConst.PASS if inf_nan_error_ratio == 0 else CompareConst.ERROR + rel_err_result = CompareConst.PASS if rel_err_ratio == 0 else CompareConst.ERROR + abs_err_result = CompareConst.PASS if abs_err_ratio == 0 else CompareConst.ERROR -def update_status(status, new_status): - if status != CompareConst.NA: - return status and new_status + if CompareConst.ERROR in [inf_nan_result, rel_err_result, abs_err_result]: + absolute_threshold_result = CompareConst.ERROR else: - return new_status + absolute_threshold_result = CompareConst.PASS + + return { + "inf_nan_error_ratio": inf_nan_error_ratio, + "inf_nan_result": inf_nan_result, + "rel_err_ratio": rel_err_ratio, + "rel_err_result": rel_err_result, + "abs_err_ratio": abs_err_ratio, + "abs_err_result": abs_err_result, + "absolute_threshold_result": absolute_threshold_result, + } + + +def get_api_checker_result(status): + if not status: + return CompareConst.SPACE + for const in (CompareConst.ERROR, CompareConst.WARNING): + if const in status: + return const + return CompareConst.PASS def check_csv_columns(columns, csv_type): - required_columns = BenchmarkCompareColumn.to_required_columns() + required_columns = ApiPrecisionCompareColumn.to_required_columns() missing_columns = [column for column in required_columns if column not in columns] if missing_columns: msg = f"The followint columns {','.join(missing_columns)} are missing in{csv_type}" raise CompareException(CompareException.INVALID_DATA_ERROR, msg) -def _benchmark_compare(): - parser = argparse.ArgumentParser() - _benchmark_compare_parser(parser) +def record_binary_consistency_result(compare_column, row_npu): + new_status = check_error_rate(row_npu[ApiPrecisionCompareColumn.ERROR_RATE]) + compare_column.error_rate = row_npu[ApiPrecisionCompareColumn.ERROR_RATE] + compare_column.error_rate_status = new_status + compare_column.compare_result = new_status + compare_column.compare_algorithm = "二进制一致法" + message = '' + if compare_column.error_rate_status == CompareConst.ERROR: + message += "ERROR: 二进制一致错误率超过阈值" + compare_column.compare_message = message + return new_status + + +def record_absolute_threshold_result(compare_column, row_npu): + absolute_threshold_result = get_absolute_threshold_result(row_npu) + compare_column.inf_nan_error_ratio = absolute_threshold_result.get("inf_nan_error_ratio") + compare_column.inf_nan_error_ratio_status = absolute_threshold_result.get("inf_nan_result") + compare_column.rel_err_ratio = absolute_threshold_result.get("rel_err_ratio") + compare_column.rel_err_ratio_status = absolute_threshold_result.get("rel_err_result") + compare_column.abs_err_ratio = absolute_threshold_result.get("abs_err_ratio") + compare_column.abs_err_ratio_status = absolute_threshold_result.get("abs_err_result") + compare_column.compare_result = absolute_threshold_result.get("absolute_threshold_result") + compare_column.compare_algorithm = "绝对阈值法" + message = '' + if compare_column.inf_nan_error_ratio_status == CompareConst.ERROR: + message += "ERROR: inf/nan错误率超过阈值\n" + if compare_column.rel_err_ratio_status == CompareConst.ERROR: + message += "ERROR: 相对误差错误率超过阈值\n" + if compare_column.abs_err_ratio_status == CompareConst.ERROR: + message += "ERROR: 绝对误差错误率超过阈值\n" + compare_column.compare_message = message + return compare_column.compare_result + + +def record_benchmark_compare_result(compare_column, bs): + bs.get_result() + compare_column.small_value_err_ratio = bs.small_value_err_ratio + compare_column.small_value_err_status = bs.small_value_err_status + compare_column.rmse_ratio = bs.rmse_ratio + compare_column.rmse_status = bs.rmse_status + compare_column.max_rel_err_ratio = bs.max_rel_err_ratio + compare_column.max_rel_err_status = bs.max_rel_err_status + compare_column.mean_rel_err_ratio = bs.mean_rel_err_ratio + compare_column.mean_rel_err_status = bs.mean_rel_err_status + compare_column.eb_ratio = bs.eb_ratio + compare_column.eb_status = bs.eb_status + compare_column.compare_result = bs.final_result + compare_column.compare_algorithm = "标杆比对法" + message = '' + for status_attr, messages in benchmark_message.items(): + status_value = getattr(compare_column, status_attr) + if status_value in messages: + message += messages[status_value] + compare_column.compare_message = message + return compare_column.compare_result + + +def _api_precision_compare(parser=None): + if not parser: + parser = argparse.ArgumentParser() + _api_precision_compare_parser(parser) args = parser.parse_args(sys.argv[1:]) + _api_precision_compare_command(args) + + +def _api_precision_compare_command(args): npu_csv_path = get_validated_result_csv_path(args.npu_csv_path, 'detail') gpu_csv_path = get_validated_result_csv_path(args.gpu_csv_path, 'detail') out_path = os.path.realpath(args.out_path) if args.out_path else "./" @@ -245,13 +361,13 @@ def _benchmark_compare(): create_directory(out_path) out_path_checker = FileChecker(out_path, FileCheckConst.DIR, ability=FileCheckConst.WRITE_ABLE) out_path = out_path_checker.common_check() - result_csv_path = os.path.join(out_path, BENCHMARK_COMPARE_RESULT_FILE_NAME) - details_csv_path = os.path.join(out_path, BENCHMARK_COMPARE_DETAILS_FILE_NAME) + result_csv_path = os.path.join(out_path, API_PRECISION_COMPARE_RESULT_FILE_NAME) + details_csv_path = os.path.join(out_path, API_PRECISION_COMPARE_DETAILS_FILE_NAME) compare_config = CompareConfig(npu_csv_path, gpu_csv_path, result_csv_path, details_csv_path) - benchmark_compare(compare_config) + api_precision_compare(compare_config) -def _benchmark_compare_parser(parser): +def _api_precision_compare_parser(parser): parser.add_argument("-npu", "--npu_csv_path", dest="npu_csv_path", default="", type=str, help=" , Accuracy_checking_details.csv generated on the NPU by using the " "api_accuracy_checker tool.", @@ -261,11 +377,11 @@ def _benchmark_compare_parser(parser): "api_accuracy_checker tool.", required=False) parser.add_argument("-o", "--out_path", dest="out_path", default="", type=str, - help=" The benchmark compare task result out path.", + help=" The api precision compare task result out path.", required=False) if __name__ == '__main__': - _benchmark_compare() - print_info_log("Benchmark compare task completed.") + _api_precision_compare() + print_info_log("Compare task completed.") \ No newline at end of file diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_standard.yaml b/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_standard.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ceccf65a46bdb757db4079c3e5e5bff71a5625b5 --- /dev/null +++ b/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_standard.yaml @@ -0,0 +1,107 @@ +# Copyright (c) 2024 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +AbsoluteThreshStandard: + - mul + - mul_ + - __mul__ + - __imul__ + - __rmul__ + - add + - add_ + - __add__ + - __iadd__ + - __radd__ + - div + - div_ + - __div__ + - __idiv__ + - divide + - divide_ + - leaky_relu + - leaky_relu_ + - prelu + - reciprocal + - reciprocal_ + - rsqrt + - rsqrt_ + - square + - square_ + - sub + - sub_ + - rsub + - __isub__ + - __sub__ + +BinaryCompareStandard: + - abs + - abs_ + - absolute + - absolute_ + - argmin + - bitwise_and + - bitwise_and_ + - broadcast_to + - ceil + - ceil_ + - equal + - fill_ + - flatten + - floor + - floor_ + - gather + - greater + - greater_ + - greater_equal + - greater_equal_ + - isfinite + - isnan + - less + - less_ + - less_equal + - less_equal_ + - logical_and + - logical_and_ + - logical_not + - logical_not_ + - logical_or + - logical_or_ + - masked_fill + - masked_fill_ + - max_pool3d + - maximum + - minimum + - neg + - neg_ + - nonzero + - not_equal + - not_equal_ + - one_hot + - pad + - relu + - reshape + - round + - round_ + - select + - sign + - sign_ + - sort + - tile + - transpose + - transpose_ + - tril + - tril_ + - triu + - triu_ diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_threshold.yaml b/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_threshold.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7565112da3122c636ac1e67a8494bbaed51d17c7 --- /dev/null +++ b/debug/accuracy_tools/api_accuracy_checker/compare/api_precision_threshold.yaml @@ -0,0 +1,390 @@ +mul: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +mul_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__mul__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__imul__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__rmul__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +add: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +add_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__add__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__iadd__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__radd__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +div: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +div_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__div__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__idiv__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +divide: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +divide_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +leaky_relu: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +leaky_relu_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +prelu: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +reciprocal: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +reciprocal_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +rsqrt: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +rsqrt_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +square: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +square_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +sub: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +sub_: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +rsub: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__isub__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 +__sub__: + torch.float32: + rtol: 0.000001 + small_value: 0.000001 + small_value_atol: 0.000001 + torch.float16: + rtol: 0.001 + small_value: 0.001 + small_value_atol: 0.001 + torch.bfloat16: + rtol: 0.004 + small_value: 0.001 + small_value_atol: 0.001 diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/compare.py b/debug/accuracy_tools/api_accuracy_checker/compare/compare.py index d9c94b78404c28a51f4a4c81aa1632b25aecbaef..15bfb1904c810f756600daef1a90ab1d4176ac0d 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/compare.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/compare.py @@ -7,11 +7,12 @@ from rich.table import Table from rich.console import Console from api_accuracy_checker.common.utils import get_json_contents, write_csv from api_accuracy_checker.compare.compare_utils import CompareConst, check_dtype_comparable, DETAIL_TEST_ROWS, \ - precision_configs, Benchmark_Compare_Support_List + precision_configs, BENCHMARK_COMPARE_SUPPORT_LIST, AbsoluteStandardApi, BinaryStandardApi, apis_threshold from api_accuracy_checker.compare.compare_column import CompareColumn from api_accuracy_checker.compare.algorithm import get_rmse, get_error_balance, get_max_rel_err, get_mean_rel_err, \ get_rel_err, get_abs_err, get_max_abs_err, get_rel_err_ratio, cosine_sim, get_rel_err_origin, \ - get_small_value_err_ratio, get_finite_and_infinite_mask, get_small_value_mask + get_small_value_err_ratio, get_finite_and_infinite_mask, get_small_value_mask, check_inf_nan_value, \ + check_small_value, check_norm_value, get_abs_bench_with_eps from api_accuracy_checker.common.config import msCheckerConfig from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileOpen @@ -71,7 +72,7 @@ class Comparator: console.print(table_detail) def get_statistics_from_result_csv(self): - checklist = [CompareConst.PASS, CompareConst.ERROR, CompareConst.WARNING, CompareConst.NA, CompareConst.SKIP, "skip"] + checklist = [CompareConst.PASS, CompareConst.ERROR, CompareConst.WARNING, CompareConst.SPACE, CompareConst.SKIP, "skip"] self.test_result_cnt = { "success_num": 0, "warning_num": 0, "error_num": 0, "forward_fail_num": 0, "backward_fail_num": 0, "forward_and_backward_fail_num": 0, @@ -86,7 +87,7 @@ class Comparator: raise ValueError("The number of columns in %s is incorrect" % result_csv_name) if not all(item[i] and item[i] in checklist for i in (1, 2)): raise ValueError( - "The value in the 2nd or 3rd column of %s is wrong, it must be pass, error, warning, skip, or N/A" + "The value in the 2nd or 3rd column of %s is wrong, it must be pass, error, warning, skip, or SPACE" % result_csv_name) column1 = item[1] column2 = item[2] @@ -94,7 +95,7 @@ class Comparator: self.test_result_cnt["total_skip_num"] += 1 continue self.test_result_cnt["total_num"] += 1 - if column1 == CompareConst.PASS and column2 in [CompareConst.PASS, CompareConst.NA]: + if column1 == CompareConst.PASS and column2 in [CompareConst.PASS, CompareConst.SPACE]: self.test_result_cnt['success_num'] += 1 elif column1 == CompareConst.ERROR and column2 == CompareConst.ERROR: self.test_result_cnt['forward_and_backward_fail_num'] += 1 @@ -155,14 +156,15 @@ class Comparator: self.write_summary_csv(args) self.write_detail_csv(args) - def compare_output(self, api_name, bench_output, device_output, bench_grad=None, npu_grad=None): - compare_func = self._compare_dropout if "dropout" in api_name else self._compare_core_wrapper - fwd_success_status, fwd_compare_alg_results = compare_func(bench_output, device_output) - bwd_success_status, bwd_compare_alg_results = (CompareConst.PASS, []) if not (bench_grad and npu_grad) else compare_func(bench_grad[0], npu_grad[0]) if "dropout" in api_name else compare_func(bench_grad, npu_grad) - self.record_results(api_name, fwd_success_status, bwd_success_status if bwd_compare_alg_results is not None else CompareConst.NA, fwd_compare_alg_results, bwd_compare_alg_results) + def compare_output(self, full_api_name, bench_output, device_output, bench_grad=None, npu_grad=None): + _, api_name, _ = full_api_name.split("*") + compare_func = self._compare_dropout if "dropout" in full_api_name else self._compare_core_wrapper + fwd_success_status, fwd_compare_alg_results = compare_func(api_name, bench_output, device_output) + bwd_success_status, bwd_compare_alg_results = (CompareConst.PASS, []) if not (bench_grad and npu_grad) else compare_func(api_name, bench_grad[0], npu_grad[0]) if "dropout" in full_api_name else compare_func(api_name, bench_grad, npu_grad) + self.record_results(full_api_name, fwd_success_status, bwd_success_status if bwd_compare_alg_results is not None else CompareConst.SPACE, fwd_compare_alg_results, bwd_compare_alg_results) return fwd_success_status == CompareConst.PASS, bwd_success_status == CompareConst.PASS - def _compare_core_wrapper(self, bench_output, device_output): + def _compare_core_wrapper(self, api_name, bench_output, device_output): detailed_result_total = [] test_final_success = CompareConst.PASS if isinstance(bench_output, (list, tuple)): @@ -172,12 +174,12 @@ class Comparator: message = ["bench and npu output structure is different."] else: for b_out_i, n_out_i in zip(bench_output, device_output): - status_i, compare_result_i, message_i = self._compare_core(b_out_i, n_out_i) + status_i, compare_result_i, message_i = self._compare_core(api_name, b_out_i, n_out_i) status.append(status_i) compare_result.append(compare_result_i) message.append(message_i) else: - status, compare_result, message = self._compare_core(bench_output, device_output) + status, compare_result, message = self._compare_core(api_name, bench_output, device_output) if not isinstance(status, list): detailed_result_total.append(compare_result.to_column_value(status, message)) if status == CompareConst.ERROR: @@ -193,7 +195,7 @@ class Comparator: test_final_success = CompareConst.WARNING return test_final_success, detailed_result_total - def _compare_core(self, bench_output, device_output): + def _compare_core(self, api_name, bench_output, device_output): compare_column = CompareColumn() if not isinstance(bench_output, type(device_output)): return CompareConst.ERROR, compare_column, "bench and npu output type is different." @@ -202,7 +204,7 @@ class Comparator: if b_keys != n_keys: return CompareConst.ERROR, compare_column, "bench and npu output dict keys are different." else: - status, compare_result, message = self._compare_core(list(bench_output.values()), + status, compare_result, message = self._compare_core(api_name, list(bench_output.values()), list(device_output.values())) elif isinstance(bench_output, torch.Tensor): copy_bench_out = bench_output.detach().clone() @@ -210,21 +212,21 @@ class Comparator: compare_column.bench_type = str(copy_bench_out.dtype) compare_column.npu_type = str(copy_device_output.dtype) compare_column.shape = tuple(device_output.shape) - status, compare_result, message = self._compare_torch_tensor(copy_bench_out, copy_device_output, + status, compare_result, message = self._compare_torch_tensor(api_name, copy_bench_out, copy_device_output, compare_column) elif isinstance(bench_output, (bool, int, float, str)): compare_column.bench_type = str(type(bench_output)) compare_column.npu_type = str(type(device_output)) status, compare_result, message = self._compare_builtin_type(bench_output, device_output, compare_column) elif bench_output is None: - return CompareConst.PASS, compare_column, "Output is None." + return CompareConst.SKIP, compare_column, "Bench output is None, skip this test." else: return CompareConst.PASS, compare_column, "Unexpected output type in compare_core: {}".format(type(bench_output)) return status, compare_result, message - def _compare_torch_tensor(self, bench_output, device_output, compare_column): + def _compare_torch_tensor(self, api_name, bench_output, device_output, compare_column): cpu_shape = bench_output.shape npu_shape = device_output.shape npu_dtype = device_output.dtype @@ -242,75 +244,57 @@ class Comparator: message = "" if bench_output.dtype in [bool, np.uint8, np.int8, np.int16, np.uint16, np.uint32, np.int32, np.int64, np.uint64]: - message += f"Compare algorithm cosine_sim is not supported for {bench_output.dtype} data. " \ + message += f"Compare algorithm is not supported for {bench_output.dtype} data. " \ f"Only judged by Error Rate." err_rate, status, msg = self._compare_bool_tensor(bench_output, device_output) message += msg + "\n" compare_column.error_rate = err_rate return status, compare_column, message else: - status, compare_column, message = self._compare_float_tensor(bench_output, device_output, + status, compare_column, message = self._compare_float_tensor(api_name, bench_output, device_output, compare_column, npu_dtype) return status, compare_column, message - @staticmethod - def _compare_dropout(bench_output, device_output): - tensor_num = bench_output.numel() - if tensor_num >= 100: - if abs((bench_output == 0).sum() - (device_output == 0).cpu().sum()) / tensor_num < 0.1: - return CompareConst.PASS, 1 - else: - return CompareConst.ERROR, 0 - else: - return CompareConst.PASS, 1 - - @staticmethod - def _compare_builtin_type(bench_output, device_output, compare_column): - if not isinstance(bench_output, (bool, int, float, str)): - return CompareConst.PASS, compare_column, "" - if bench_output != device_output: - return CompareConst.ERROR, compare_column, "" - compare_column.error_rate = 0 - return CompareConst.PASS, compare_column, "" - - - @staticmethod - def _compare_bool_tensor(bench_output, device_output): - error_nums = (bench_output != device_output).sum() - if bench_output.size == 0: - return CompareConst.NAN, CompareConst.ERROR, "There is not bench calculation result." - error_rate = float(error_nums / bench_output.size) - result = CompareConst.PASS if error_rate == 0 else CompareConst.ERROR - return error_rate, result, "" - - @staticmethod - def _compare_float_tensor(bench_output, device_output, compare_column, dtype): + def _compare_float_tensor(self, api_name, bench_output, device_output, compare_column, dtype): message = "" - eps = np.finfo(bench_output.dtype).eps - abs_bench = np.abs(bench_output) - abs_bench_with_eps = abs_bench + eps + abs_bench, abs_bench_with_eps = get_abs_bench_with_eps(bench_output, dtype) abs_err = get_abs_err(bench_output, device_output) - if str(dtype) in Benchmark_Compare_Support_List: - dtype_config = precision_configs.get(dtype) + if str(dtype) in BENCHMARK_COMPARE_SUPPORT_LIST: both_finite_mask, inf_nan_mask = get_finite_and_infinite_mask(bench_output, device_output) - small_value_mask = get_small_value_mask(abs_bench, both_finite_mask, dtype_config['small_value'][0]) - abs_err_greater_mask = np.greater(abs_err, dtype_config['small_value_atol'][0]) - compare_column.small_value_err_ratio = get_small_value_err_ratio(small_value_mask, abs_err_greater_mask) - rel_err = get_rel_err(abs_err, abs_bench_with_eps, small_value_mask, inf_nan_mask) - compare_column.RMSE = get_rmse(abs_err, np.logical_or(inf_nan_mask, small_value_mask)) - compare_column.EB = get_error_balance(bench_output, device_output) - compare_column.Max_rel_error = get_max_rel_err(rel_err) - compare_column.Mean_rel_error = get_mean_rel_err(rel_err) + if api_name in BinaryStandardApi: + err_rate, _, _ = self._compare_bool_tensor(bench_output, device_output) + compare_column.error_rate = err_rate + elif api_name in AbsoluteStandardApi: + small_value_threshold, small_value_atol, rtol = self._get_absolute_threshold_attribute( + api_name, str(dtype)) + rel_err = abs_err / abs_bench_with_eps + small_value_mask = get_small_value_mask(abs_bench, both_finite_mask, small_value_threshold) + normal_value_mask = np.logical_and(both_finite_mask, np.logical_not(small_value_mask)) + compare_column.inf_nan_error_ratio = check_inf_nan_value(inf_nan_mask, bench_output, device_output, dtype, rtol) + compare_column.rel_err_ratio = check_norm_value(normal_value_mask, rel_err, rtol) + compare_column.abs_err_ratio = check_small_value(abs_err, small_value_mask, small_value_atol) + else: + dtype_config = precision_configs.get(dtype) + small_value_mask = get_small_value_mask(abs_bench, both_finite_mask, dtype_config['small_value'][0]) + abs_err_greater_mask = np.greater(abs_err, dtype_config['small_value_atol'][0]) + compare_column.small_value_err_ratio = get_small_value_err_ratio(small_value_mask, abs_err_greater_mask) + rel_err = get_rel_err(abs_err, abs_bench_with_eps, small_value_mask, inf_nan_mask) + compare_column.RMSE = get_rmse(abs_err, np.logical_or(inf_nan_mask, small_value_mask)) + compare_column.EB = get_error_balance(bench_output, device_output) + compare_column.Max_rel_error = get_max_rel_err(rel_err) + compare_column.Mean_rel_error = get_mean_rel_err(rel_err) cos_res, cos_status, msg = cosine_sim(bench_output, device_output) compare_column.cosine_sim = cos_res message += msg + "\n" if not cos_status: + message += "Cosine similarity is less than 0.99, consider as error, skip other check and set to SPACE.\n" return CompareConst.ERROR, compare_column, message max_abs_res, max_abs_status = get_max_abs_err(abs_err) compare_column.max_abs_err = max_abs_res if max_abs_status: + message += "Max abs error is less than 0.001, consider as pass, skip other check and set to SPACE.\n" return CompareConst.PASS, compare_column, message rel_err_orign = get_rel_err_origin(abs_err, abs_bench_with_eps) @@ -318,18 +302,61 @@ class Comparator: hundred_res, hundred_status = get_rel_err_ratio(rel_err_orign, 0.01) compare_column.rel_err_hundredth = hundred_res if not hundred_status: + message += "Relative error is greater than 0.01, consider as error, skip other check and set to SPACE.\n" return CompareConst.ERROR, compare_column, message thousand_res, thousand_status = get_rel_err_ratio(rel_err_orign, 0.001) compare_column.rel_err_thousandth = thousand_res if dtype in [torch.float16, torch.bfloat16]: if thousand_status: + message += "Relative error is less than 0.001, consider as pass, skip other check and set to SPACE.\n" return CompareConst.PASS, compare_column, message + message += "Relative error is greater than 0.001, consider as warning, skip other check and set to SPACE.\n" return CompareConst.WARNING, compare_column, message ten_thousand_res, ten_thousand_status = get_rel_err_ratio(rel_err_orign, 0.0001) compare_column.rel_err_ten_thousandth = ten_thousand_res if dtype in [torch.float32, torch.float64]: if not thousand_status: + message += "Relative error is greater than 0.001, consider as error, skip other check and set to SPACE.\n" return CompareConst.ERROR, compare_column, message if not ten_thousand_status: + message += "Relative error is greater than 0.0001, consider as warning, skip other check and set to SPACE.\n" return CompareConst.WARNING, compare_column, message + message += "Relative error is less than 0.0001, consider as pass.\n" return CompareConst.PASS, compare_column, message + + @staticmethod + def _compare_dropout(api_name, bench_output, device_output): + tensor_num = bench_output.numel() + if tensor_num >= 100: + if abs((bench_output == 0).sum() - (device_output == 0).cpu().sum()) / tensor_num < 0.1: + return CompareConst.PASS, 1 + else: + return CompareConst.ERROR, 0 + else: + return CompareConst.PASS, 1 + + @staticmethod + def _compare_builtin_type(bench_output, device_output, compare_column): + if not isinstance(bench_output, (bool, int, float, str)): + return CompareConst.PASS, compare_column, "" + if bench_output != device_output: + return CompareConst.ERROR, compare_column, "" + compare_column.error_rate = 0 + return CompareConst.PASS, compare_column, "" + + + @staticmethod + def _compare_bool_tensor(bench_output, device_output): + error_nums = (bench_output != device_output).sum() + if bench_output.size == 0: + return CompareConst.NAN, CompareConst.ERROR, "There is not bench calculation result." + error_rate = float(error_nums / bench_output.size) + result = CompareConst.PASS if error_rate == 0 else CompareConst.ERROR + return error_rate, result, "" + + @staticmethod + def _get_absolute_threshold_attribute(api_name, dtype): + small_value_threshold = apis_threshold.get(api_name).get(dtype).get('small_value') + small_value_atol = apis_threshold.get(api_name).get(dtype).get('small_value_atol') + rtol = apis_threshold.get(api_name).get(dtype).get('rtol') + return small_value_threshold, small_value_atol, rtol diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/compare_column.py b/debug/accuracy_tools/api_accuracy_checker/compare/compare_column.py index 9579f6a915910533ddc14d60cdc41afb00f8ee67..961fce6811efd34789cb06f19d894244da681c33 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/compare_column.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/compare_column.py @@ -3,22 +3,61 @@ from api_accuracy_checker.compare.compare_utils import CompareConst class CompareColumn: def __init__(self): - self.bench_type = CompareConst.NA - self.npu_type = CompareConst.NA - self.shape = CompareConst.NA - self.cosine_sim = CompareConst.NA - self.max_abs_err = CompareConst.NA - self.rel_err_hundredth = CompareConst.NA - self.rel_err_thousandth = CompareConst.NA - self.rel_err_ten_thousandth = CompareConst.NA - self.error_rate = CompareConst.NA - self.EB = CompareConst.NA - self.RMSE = CompareConst.NA - self.small_value_err_ratio = CompareConst.NA - self.Max_rel_error = CompareConst.NA - self.Mean_rel_error = CompareConst.NA + self.bench_type = CompareConst.SPACE + self.npu_type = CompareConst.SPACE + self.shape = CompareConst.SPACE + self.cosine_sim = CompareConst.SPACE + self.max_abs_err = CompareConst.SPACE + self.rel_err_hundredth = CompareConst.SPACE + self.rel_err_thousandth = CompareConst.SPACE + self.rel_err_ten_thousandth = CompareConst.SPACE + self.error_rate = CompareConst.SPACE + self.EB = CompareConst.SPACE + self.RMSE = CompareConst.SPACE + self.small_value_err_ratio = CompareConst.SPACE + self.Max_rel_error = CompareConst.SPACE + self.Mean_rel_error = CompareConst.SPACE + self.inf_nan_error_ratio = CompareConst.SPACE + self.rel_err_ratio = CompareConst.SPACE + self.abs_err_ratio = CompareConst.SPACE def to_column_value(self, is_pass, message): return [self.bench_type, self.npu_type, self.shape, self.cosine_sim, self.max_abs_err, self.rel_err_hundredth, self.rel_err_thousandth, self.rel_err_ten_thousandth, self.error_rate, self.EB, self.RMSE, - self.small_value_err_ratio, self.Max_rel_error, self.Mean_rel_error, is_pass, message] \ No newline at end of file + self.small_value_err_ratio, self.Max_rel_error, self.Mean_rel_error, self.inf_nan_error_ratio, + self.rel_err_ratio, self.abs_err_ratio, is_pass, message] + + +class ApiPrecisionOutputColumn: + def __init__(self): + self.api_name = CompareConst.SPACE + self.small_value_err_ratio = CompareConst.SPACE + self.small_value_err_status = CompareConst.SPACE + self.rmse_ratio = CompareConst.SPACE + self.rmse_status = CompareConst.SPACE + self.max_rel_err_ratio = CompareConst.SPACE + self.max_rel_err_status = CompareConst.SPACE + self.mean_rel_err_ratio = CompareConst.SPACE + self.mean_rel_err_status = CompareConst.SPACE + self.eb_ratio = CompareConst.SPACE + self.eb_status = CompareConst.SPACE + self.inf_nan_error_ratio = CompareConst.SPACE + self.inf_nan_error_ratio_status = CompareConst.SPACE + self.rel_err_ratio = CompareConst.SPACE + self.rel_err_ratio_status = CompareConst.SPACE + self.abs_err_ratio = CompareConst.SPACE + self.abs_err_ratio_status = CompareConst.SPACE + self.error_rate = CompareConst.SPACE + self.error_rate_status = CompareConst.SPACE + self.compare_result = CompareConst.SPACE + self.compare_algorithm = CompareConst.SPACE + self.compare_message = CompareConst.SPACE + + def to_column_value(self): + return [self.api_name, self.small_value_err_ratio, self.small_value_err_status, self.rmse_ratio, + self.rmse_status, self.max_rel_err_ratio, self.max_rel_err_status, self.mean_rel_err_ratio, + self.mean_rel_err_status, self.eb_ratio, self.eb_status, self.inf_nan_error_ratio, + self.inf_nan_error_ratio_status, self.rel_err_ratio, self.rel_err_ratio_status, self.abs_err_ratio, + self.abs_err_ratio_status, self.error_rate, self.error_rate_status, self.compare_result, + self.compare_algorithm, self.compare_message] + \ No newline at end of file diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/compare_utils.py b/debug/accuracy_tools/api_accuracy_checker/compare/compare_utils.py index d96944bcec613009deafc1a5b4128511778e2730..d711265cc781c634caa189692b5e0141eee2f83e 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/compare_utils.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/compare_utils.py @@ -1,19 +1,31 @@ import time +import os import numpy as np import torch +import yaml from api_accuracy_checker.common.utils import Const, print_warn_log +from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileOpen current_time = time.strftime("%Y%m%d%H%M%S") -BENCHMARK_COMPARE_RESULT_FILE_NAME = "benchmark_compare_result_" + current_time + ".csv" -BENCHMARK_COMPARE_DETAILS_FILE_NAME = "benchmark_compare_details_" + current_time + ".csv" -Benchmark_Compare_Support_List = ['torch.float16', 'torch.bfloat16', 'torch.float32'] -Benchmark_Compare_Unsupport_List = ['torch.float64'] -result_mapping = { - 'pass' : True, - 'warning': False, - 'error' : False -} +API_PRECISION_COMPARE_RESULT_FILE_NAME = "api_precision_compare_result_" + current_time + ".csv" +API_PRECISION_COMPARE_DETAILS_FILE_NAME = "api_precision_compare_details_" + current_time + ".csv" +BENCHMARK_COMPARE_SUPPORT_LIST = ['torch.float16', 'torch.bfloat16', 'torch.float32'] +API_PRECISION_COMPARE_UNSUPPORT_LIST = ['torch.float64', 'torch.complex64', 'torch.complex128'] +BINARY_COMPARE_UNSUPPORT_LIST = BENCHMARK_COMPARE_SUPPORT_LIST + API_PRECISION_COMPARE_UNSUPPORT_LIST + + +cur_path = os.path.dirname(os.path.realpath(__file__)) +standard_yaml_path = os.path.join(cur_path, "api_precision_standard.yaml") +with FileOpen(standard_yaml_path, 'r') as f: + Apis = yaml.safe_load(f) + AbsoluteStandardApi = Apis.get('AbsoluteThreshStandard') + BinaryStandardApi = Apis.get('BinaryCompareStandard') + + +threshold_yaml_path = os.path.join(cur_path, "api_precision_threshold.yaml") +with FileOpen(threshold_yaml_path, 'r') as f: + apis_threshold = yaml.safe_load(f) DETAIL_TEST_ROWS = [[ @@ -23,12 +35,15 @@ DETAIL_TEST_ROWS = [[ "双百指标", "双千指标", "双万指标", - "错误率", + "二进制一致错误率", "误差均衡性", "均方根误差", "小值域错误占比", "相对误差最大值", "相对误差平均值", + "inf/nan错误率", + "相对误差错误率", + "绝对误差错误率", "Status", "Message" ]] @@ -71,9 +86,13 @@ class CompareConst: SKIP = 'SKIP' TRUE = 'TRUE' FALSE = 'FALSE' + BFLOAT16_MIN = -3.3895313892515355e+38 + BFLOAT16_MAX = 3.3895313892515355e+38 + BFLOAT16_EPS = 2 ** -8 + SPACE = " " -class BenchmarkCompareColumn: +class ApiPrecisionCompareColumn: API_NAME = 'API Name' DEVICE_DTYPE = 'DEVICE Dtype' SMALL_VALUE_ERROR_RATE = '小值域错误占比' @@ -91,31 +110,46 @@ class BenchmarkCompareColumn: MEAN_REL_ERR_STATUS = '相对误差平均值判定结果' EB_RATIO = '误差均衡性比值' EB_STATUS = '误差均衡性判定结果' - ERROR_RATE = '错误率' + ERROR_RATE = '二进制一致错误率' + ERROR_RATE_STATUS = '二进制一致错误率判定结果' + INF_NAN_ERROR_RATIO = 'inf/nan错误率' + INF_NAN_ERROR_RATIO_STATUS = 'inf/nan判定结果' + REL_ERR_RATIO = '相对误差错误率' + REL_ERR_RATIO_STATUS = '相对误差判定结果' + ABS_ERR_RATIO = '绝对误差错误率' + ABS_ERR_RATIO_STATUS = '绝对误差判定结果' + FINAL_RESULT = '比对结果' + ALGORITHM = '比对算法' FORWWARD_STATUS = 'Forward Test Success' BACKWARD_STATUS = 'Backward Test Success' MESSAGE = 'Message' @staticmethod def to_required_columns(): - return [BenchmarkCompareColumn.API_NAME, BenchmarkCompareColumn.DEVICE_DTYPE, - BenchmarkCompareColumn.SMALL_VALUE_ERROR_RATE, BenchmarkCompareColumn.RMSE, - BenchmarkCompareColumn.MAX_REL_ERR, BenchmarkCompareColumn.MEAN_REL_ERR, BenchmarkCompareColumn.EB, - BenchmarkCompareColumn.ERROR_RATE] - + return [ApiPrecisionCompareColumn.API_NAME, ApiPrecisionCompareColumn.DEVICE_DTYPE, + ApiPrecisionCompareColumn.SMALL_VALUE_ERROR_RATE, ApiPrecisionCompareColumn.RMSE, + ApiPrecisionCompareColumn.MAX_REL_ERR, ApiPrecisionCompareColumn.MEAN_REL_ERR, ApiPrecisionCompareColumn.EB, + ApiPrecisionCompareColumn.ERROR_RATE, ApiPrecisionCompareColumn.INF_NAN_ERROR_RATIO, + ApiPrecisionCompareColumn.REL_ERR_RATIO, ApiPrecisionCompareColumn.ABS_ERR_RATIO] + @staticmethod def get_detail_csv_title(): - return [BenchmarkCompareColumn.API_NAME, - BenchmarkCompareColumn.SMALL_VALUE_ERROR_RATIO, BenchmarkCompareColumn.SMALL_VALUE_ERROR_STATUS, - BenchmarkCompareColumn.RMSE_RATIO, BenchmarkCompareColumn.RMSE_STATUS, - BenchmarkCompareColumn.MAX_REL_ERR_RATIO, BenchmarkCompareColumn.MAX_REL_ERR_STATUS, - BenchmarkCompareColumn.MEAN_REL_ERR_RATIO, BenchmarkCompareColumn.MEAN_REL_ERR_STATUS, - BenchmarkCompareColumn.EB_RATIO, BenchmarkCompareColumn.EB_STATUS] + return [ApiPrecisionCompareColumn.API_NAME, + ApiPrecisionCompareColumn.SMALL_VALUE_ERROR_RATIO, ApiPrecisionCompareColumn.SMALL_VALUE_ERROR_STATUS, + ApiPrecisionCompareColumn.RMSE_RATIO, ApiPrecisionCompareColumn.RMSE_STATUS, + ApiPrecisionCompareColumn.MAX_REL_ERR_RATIO, ApiPrecisionCompareColumn.MAX_REL_ERR_STATUS, + ApiPrecisionCompareColumn.MEAN_REL_ERR_RATIO, ApiPrecisionCompareColumn.MEAN_REL_ERR_STATUS, + ApiPrecisionCompareColumn.EB_RATIO, ApiPrecisionCompareColumn.EB_STATUS, + ApiPrecisionCompareColumn.INF_NAN_ERROR_RATIO, ApiPrecisionCompareColumn.INF_NAN_ERROR_RATIO_STATUS, + ApiPrecisionCompareColumn.REL_ERR_RATIO, ApiPrecisionCompareColumn.REL_ERR_RATIO_STATUS, + ApiPrecisionCompareColumn.ABS_ERR_RATIO, ApiPrecisionCompareColumn.ABS_ERR_RATIO_STATUS, + ApiPrecisionCompareColumn.ERROR_RATE, ApiPrecisionCompareColumn.ERROR_RATE_STATUS, + ApiPrecisionCompareColumn.FINAL_RESULT, ApiPrecisionCompareColumn.ALGORITHM, ApiPrecisionCompareColumn.MESSAGE] @staticmethod def get_result_csv_title(): - return [BenchmarkCompareColumn.API_NAME, BenchmarkCompareColumn.FORWWARD_STATUS, - BenchmarkCompareColumn.BACKWARD_STATUS, BenchmarkCompareColumn.MESSAGE] + return [ApiPrecisionCompareColumn.API_NAME, ApiPrecisionCompareColumn.FORWWARD_STATUS, + ApiPrecisionCompareColumn.BACKWARD_STATUS, ApiPrecisionCompareColumn.MESSAGE] def check_dtype_comparable(x, y): @@ -133,3 +167,19 @@ def check_dtype_comparable(x, y): return False print_warn_log(f"Compare: Unexpected dtype {x.dtype}, {y.dtype}") return False + + +def convert_str_to_float(input_data): + if isinstance(input_data, str) and input_data.strip() == "": + msg = 'ERROR: Input data is an empty string' + raise CompareException(CompareException.INVALID_DATA_ERROR, msg) + try: + float_data = float(input_data) + if str(float_data) in ('inf', '-inf', 'nan'): + msg = 'ERROR: Input data is either "inf", "-inf", "nan"' + raise CompareException(CompareException.INVALID_DATA_ERROR, msg) + return float_data + except ValueError as e: + msg = 'ERROR: Input data cannot be converted to float' + raise CompareException(CompareException.INVALID_DATA_ERROR, msg) from e + \ No newline at end of file diff --git a/debug/accuracy_tools/api_accuracy_checker/config.yaml b/debug/accuracy_tools/api_accuracy_checker/config.yaml index ff3b91de7e07ce0055bf14ee3265f49dde2cc4df..a6e70c57ebaec9141434499cfebe2aed6c21a7be 100644 --- a/debug/accuracy_tools/api_accuracy_checker/config.yaml +++ b/debug/accuracy_tools/api_accuracy_checker/config.yaml @@ -1,6 +1,6 @@ dump_path: './' real_data: False -enable_dataloader: True +enable_dataloader: False target_iter: [1] white_list: [] error_data_path: './' diff --git a/debug/accuracy_tools/api_accuracy_checker/dump/api_info.py b/debug/accuracy_tools/api_accuracy_checker/dump/api_info.py index b3fe1249486756d6709f1d9963e5353c5eebffa7..e14405fe143e0a39030b6cca18a615c48e802682 100644 --- a/debug/accuracy_tools/api_accuracy_checker/dump/api_info.py +++ b/debug/accuracy_tools/api_accuracy_checker/dump/api_info.py @@ -17,10 +17,11 @@ def get_tensor_extremum(data, operator): return True in data elif operator == 'min': return False not in data + data_clone = data.clone().detach() if operator == 'max': - return torch._C._VariableFunctionsClass.max(data.float()).item() + return torch._C._VariableFunctionsClass.max(data_clone.float()).item() else: - return torch._C._VariableFunctionsClass.min(data.float()).item() + return torch._C._VariableFunctionsClass.min(data_clone.float()).item() def get_type_name(name): diff --git a/debug/accuracy_tools/api_accuracy_checker/dump/info_dump.py b/debug/accuracy_tools/api_accuracy_checker/dump/info_dump.py index c71ac649c3e3f78f87c72d551c3a30f72bd08781..c73058e4f3058a9d1bf10b0a14046845f78440ee 100644 --- a/debug/accuracy_tools/api_accuracy_checker/dump/info_dump.py +++ b/debug/accuracy_tools/api_accuracy_checker/dump/info_dump.py @@ -2,6 +2,7 @@ import fcntl import json import os import threading +import multiprocessing from api_accuracy_checker.dump.api_info import ForwardAPIInfo, BackwardAPIInfo from api_accuracy_checker.common.utils import check_file_or_directory_path, initialize_save_path, create_directory @@ -12,6 +13,7 @@ from api_accuracy_checker.common.config import msCheckerConfig from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileOpen, FileCheckConst, FileChecker, change_mode lock = threading.Lock() +proc_lock = multiprocessing.Lock() def write_api_info_json(api_info): @@ -36,27 +38,26 @@ def write_api_info_json(api_info): def write_json(file_path, data, indent=None): check_file_or_directory_path(os.path.dirname(file_path), True) - if not os.path.exists(file_path): - with FileOpen(file_path, 'w') as f: - f.write("{\n}") - change_mode(file_path, FileCheckConst.DATA_FILE_AUTHORITY) - lock.acquire() - with FileOpen(file_path, 'a+') as f: + with proc_lock, lock, FileOpen(file_path, 'a+') as f: fcntl.flock(f, fcntl.LOCK_EX) try: f.seek(0, os.SEEK_END) - f.seek(f.tell() - 1, os.SEEK_SET) - f.truncate() - if f.tell() > 3: - f.seek(f.tell() - 1, os.SEEK_SET) + current_position = f.tell() + if current_position > 0: + f.seek(current_position - 1, os.SEEK_SET) f.truncate() - f.write(',\n') - f.write(json.dumps(data, indent=indent)[1:-1] + '\n}') + if f.tell() > 3: + f.seek(f.tell() - 1, os.SEEK_SET) + f.truncate() + f.write(',\n') + f.write(json.dumps(data, indent=indent)[1:-1] + '\n}') + else: + change_mode(file_path, FileCheckConst.DATA_FILE_AUTHORITY) + f.write('{\n' + json.dumps(data, indent=indent)[1:] + '\n') except Exception as e: raise ValueError(f"Json save failed:{e}") from e finally: fcntl.flock(f, fcntl.LOCK_UN) - lock.release() def initialize_output_json(): diff --git a/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_details.png b/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_details.png index 55024831faf41b206424b0fea91c9617c25c0a3a..ddc4fb348ee55197459c7303b0817853e201ace4 100644 Binary files a/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_details.png and b/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_details.png differ diff --git a/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_result.png b/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_result.png index d7f60d0916690d871f7a2eca37ca919e42596c15..aa0b29d8d057ff806d5f5e82a35c5ce085dee1f3 100644 Binary files a/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_result.png and b/debug/accuracy_tools/api_accuracy_checker/img/accuracy_checking_result.png differ diff --git a/debug/accuracy_tools/api_accuracy_checker/img/api_precision_compare_details.png b/debug/accuracy_tools/api_accuracy_checker/img/api_precision_compare_details.png new file mode 100644 index 0000000000000000000000000000000000000000..c3fd909a8d187fd6a725c7f3cc6798989d3fa0cf Binary files /dev/null and b/debug/accuracy_tools/api_accuracy_checker/img/api_precision_compare_details.png differ diff --git a/debug/accuracy_tools/api_accuracy_checker/img/api_precision_compare_result.png b/debug/accuracy_tools/api_accuracy_checker/img/api_precision_compare_result.png new file mode 100644 index 0000000000000000000000000000000000000000..2b95897031441408f6a88185e3cda36e4fea8049 Binary files /dev/null and b/debug/accuracy_tools/api_accuracy_checker/img/api_precision_compare_result.png differ diff --git a/debug/accuracy_tools/api_accuracy_checker/img/benchmark_compare_details.png b/debug/accuracy_tools/api_accuracy_checker/img/benchmark_compare_details.png deleted file mode 100644 index 6b6cac265b1433e92caff8333088b20cfcc25aab..0000000000000000000000000000000000000000 Binary files a/debug/accuracy_tools/api_accuracy_checker/img/benchmark_compare_details.png and /dev/null differ diff --git a/debug/accuracy_tools/api_accuracy_checker/img/benchmark_compare_result.png b/debug/accuracy_tools/api_accuracy_checker/img/benchmark_compare_result.png deleted file mode 100644 index 1cd77cd52f4352e298f5fb1e54a73342bf377893..0000000000000000000000000000000000000000 Binary files a/debug/accuracy_tools/api_accuracy_checker/img/benchmark_compare_result.png and /dev/null differ diff --git a/debug/accuracy_tools/api_accuracy_checker/run_ut/data_generate.py b/debug/accuracy_tools/api_accuracy_checker/run_ut/data_generate.py index 65aa264ab28a3f887ed2d5cde8dd1c8755ea94ca..ec3c539f7f86149b9a8c1f26864bbcb5751748bf 100644 --- a/debug/accuracy_tools/api_accuracy_checker/run_ut/data_generate.py +++ b/debug/accuracy_tools/api_accuracy_checker/run_ut/data_generate.py @@ -210,7 +210,7 @@ def gen_kwargs(api_info, convert_type=None, real_data_path=None): if isinstance(value, (list, tuple)): kwargs_params[key] = gen_list_kwargs(value, convert_type, real_data_path) elif value.get('type') in TENSOR_DATA_LIST or value.get('type').startswith("numpy"): - kwargs_params[key] = gen_data(value, False, convert_type, real_data_path) + kwargs_params[key] = gen_data(value, True, convert_type, real_data_path) elif value.get('type') in TORCH_TYPE: gen_torch_kwargs(kwargs_params, key, value) else: diff --git a/debug/accuracy_tools/api_accuracy_checker/run_ut/multi_run_ut.py b/debug/accuracy_tools/api_accuracy_checker/run_ut/multi_run_ut.py index 20535ed9a251c8d78ea052bf686a0cee242d50b1..760e088eb38a26ba01fd25ac579130240a76a9e8 100644 --- a/debug/accuracy_tools/api_accuracy_checker/run_ut/multi_run_ut.py +++ b/debug/accuracy_tools/api_accuracy_checker/run_ut/multi_run_ut.py @@ -116,6 +116,7 @@ def run_parallel_ut(config): except subprocess.TimeoutExpired: process.kill() for file in config.forward_files: + check_link(file) try: os.remove(file) except FileNotFoundError: @@ -129,6 +130,8 @@ def run_parallel_ut(config): except Exception as e: print_error_log(f"An unexpected error occurred: {e}") finally: + if progress_bar.n < config.total_items: + print_warn_log("The UT task has not been completed. The parameter '-csv_path' along with the path to the result CSV file will be utilized to resume the UT task.") clean_up() progress_bar_thread.join() try: diff --git a/debug/accuracy_tools/api_accuracy_checker/run_ut/run_overflow_check.py b/debug/accuracy_tools/api_accuracy_checker/run_ut/run_overflow_check.py index 9f968c1d87064d81c0e6b723a1754bb6dbdcf9d0..2e8a12231ed31c499648f12ee93486dfed47e00c 100644 --- a/debug/accuracy_tools/api_accuracy_checker/run_ut/run_overflow_check.py +++ b/debug/accuracy_tools/api_accuracy_checker/run_ut/run_overflow_check.py @@ -84,7 +84,7 @@ def run_torch_api(api_full_name, api_info_dict): return -def _run_ut_parser(parser): +def _run_overflow_check_parser(parser): parser.add_argument("-forward", "--forward_input_file", dest="forward_input_file", default="", help=" The api param tool forward result file: generate from api param tool, " "a json file.", @@ -95,10 +95,15 @@ def _run_ut_parser(parser): default=0, required=False) -def _run_overflow_check(): - parser = argparse.ArgumentParser() - _run_ut_parser(parser) +def _run_overflow_check(parser=None): + if not parser: + parser = argparse.ArgumentParser() + _run_overflow_check_parser(parser) args = parser.parse_args(sys.argv[1:]) + _run_overflow_check_command(args) + + +def _run_overflow_check_command(args): torch.npu.set_compile_mode(jit_compile=args.jit_compile) npu_device = "npu:" + str(args.device_id) check_link(args.forward_input_file) diff --git a/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py b/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py index 642ca949de0a20b46aacab2f2b29dbf930d8a062..856cb237ca7e1ce71da45938be2284c2e6133a2b 100644 --- a/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py +++ b/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py @@ -1,443 +1,460 @@ -import argparse -import os -import csv -import re -import sys -import time -import gc -from collections import namedtuple -try: - import torch_npu -except ImportError: - is_gpu = True - current_device = "cuda" -else: - is_gpu = False - current_device = "npu" -import torch -from tqdm import tqdm -from api_accuracy_checker.run_ut.data_generate import gen_api_params, gen_args -from api_accuracy_checker.common.utils import print_info_log, print_warn_log, get_json_contents, api_info_preprocess, \ - print_error_log, initialize_save_path, Const, create_directory -from api_accuracy_checker.compare.compare import Comparator -from api_accuracy_checker.hook_module.wrap_tensor import TensorOPTemplate -from api_accuracy_checker.hook_module.wrap_functional import FunctionalOPTemplate -from api_accuracy_checker.hook_module.wrap_torch import TorchOPTemplate -from api_accuracy_checker.common.config import msCheckerConfig -from api_accuracy_checker.dump.api_info import APIInfo -from ptdbg_ascend.src.python.ptdbg_ascend.common.utils import check_path_before_create - - -from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileOpen, FileCheckConst, FileChecker, \ - change_mode, check_file_suffix, check_link - -current_time = time.strftime("%Y%m%d%H%M%S") -UT_ERROR_DATA_DIR = 'ut_error_data' + current_time -RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv" -DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv" -RunUTConfig = namedtuple('RunUTConfig', ['forward_content', 'backward_content', 'result_csv_path', 'details_csv_path', - 'save_error_data', 'is_continue_run_ut', 'real_data_path']) -not_backward_list = ['repeat_interleave'] -not_detach_set = {'resize_', 'resize_as_', 'set_', 'transpose_', 't_', 'squeeze_', 'unsqueeze_'} - -tqdm_params = { - 'smoothing': 0, # 平滑进度条的预计剩余时间,取值范围0到1 - 'desc': 'Processing', # 进度条前的描述文字 - 'leave': True, # 迭代完成后保留进度条的显示 - 'ncols': 75, # 进度条的固定宽度 - 'mininterval': 0.1, # 更新进度条的最小间隔秒数 - 'maxinterval': 1.0, # 更新进度条的最大间隔秒数 - 'miniters': 1, # 更新进度条之间的最小迭代次数 - 'ascii': None, # 根据环境自动使用ASCII或Unicode字符 - 'unit': 'it', # 迭代单位 - 'unit_scale': True, # 自动根据单位缩放 - 'dynamic_ncols': True, # 动态调整进度条宽度以适应控制台 - 'bar_format': '{l_bar}{bar}| {n}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]' # 自定义进度条输出格式 -} - - -def exec_api(api_type, api_name, args, kwargs): - if api_type == "Functional": - functional_api = FunctionalOPTemplate(api_name, str, False) - out = functional_api.forward(*args, **kwargs) - if api_type == "Tensor": - tensor_api = TensorOPTemplate(api_name, str, False) - out = tensor_api.forward(*args, **kwargs) - if api_type == "Torch": - torch_api = TorchOPTemplate(api_name, str, False) - out = torch_api.forward(*args, **kwargs) - return out - - -def deal_detach(arg, to_detach=True): - return arg.detach() if to_detach else arg - - -def deal_dtype(arg, raise_dtype=None): - if raise_dtype is None or arg.dtype not in Const.RAISE_PRECISION or raise_dtype == arg.dtype: - return arg - return arg.type(raise_dtype) - - -def generate_device_params(input_args, input_kwargs, need_backward, api_name): - def recursive_arg_to_device(arg_in, to_detach): - if isinstance(arg_in, (list, tuple)): - return type(arg_in)(recursive_arg_to_device(arg, to_detach) for arg in arg_in) - elif isinstance(arg_in, torch.Tensor): - if need_backward and arg_in.requires_grad: - arg_in = deal_detach(arg_in.clone(), to_detach).to(current_device).requires_grad_() - temp_arg_in = arg_in * 1 - arg_in = temp_arg_in.type_as(arg_in) - arg_in.retain_grad() - return arg_in - else: - return deal_detach(arg_in.clone(), to_detach).to(current_device) - else: - return arg_in - - is_detach = api_name not in not_detach_set - device_args = recursive_arg_to_device(input_args, is_detach) - device_kwargs = \ - {key: recursive_arg_to_device(value, key != "out" and is_detach) for key, value in input_kwargs.items()} - return device_args, device_kwargs - - -def generate_cpu_params(input_args, input_kwargs, need_backward, api_name): - def recursive_arg_to_cpu(arg_in, to_detach, raise_dtype=None): - if isinstance(arg_in, (list, tuple)): - return type(arg_in)(recursive_arg_to_cpu(arg, to_detach, raise_dtype=raise_dtype) for arg in arg_in) - elif isinstance(arg_in, torch.Tensor): - if need_backward and arg_in.requires_grad: - arg_in = deal_detach(deal_dtype(arg_in.clone(), raise_dtype), to_detach).requires_grad_() - temp_arg_in = arg_in * 1 - arg_in = temp_arg_in.type_as(arg_in) - arg_in.retain_grad() - return arg_in - else: - return deal_detach(deal_dtype(arg_in.clone(), raise_dtype=raise_dtype), to_detach) - else: - return arg_in - - def recursive_find_dtypes(arg_in): - if isinstance(arg_in, (list, tuple)): - return set().union(*tuple(recursive_find_dtypes(arg) for arg in arg_in)) - elif isinstance(arg_in, torch.Tensor) and arg_in.dtype in Const.RAISE_PRECISION: - return set([arg_in.dtype]) - return set() - - raise_dtype = None - need_raise_dtypes = recursive_find_dtypes(input_args) - if len(need_raise_dtypes) == 1: - raise_dtype = Const.RAISE_PRECISION.get(need_raise_dtypes.pop()) - elif len(need_raise_dtypes) >= 2: - raise_dtype = torch.float32 - - is_detach = api_name not in not_detach_set - cpu_args = recursive_arg_to_cpu(input_args, is_detach, raise_dtype=raise_dtype) - cpu_kwargs = {key: recursive_arg_to_cpu(value, key != "out" and is_detach) for key, value in input_kwargs.items()} - return cpu_args, cpu_kwargs - - -def run_ut(config): - print_info_log("start UT test") - print_info_log(f"UT task result will be saved in {config.result_csv_path}") - print_info_log(f"UT task details will be saved in {config.details_csv_path}") - if config.save_error_data: - error_data_path = os.path.abspath(os.path.join(msCheckerConfig.error_data_path, UT_ERROR_DATA_DIR)) - print_info_log(f"UT task error_datas will be saved in {error_data_path}") - compare = Comparator(config.result_csv_path, config.details_csv_path, config.is_continue_run_ut) - with FileOpen(config.result_csv_path, 'r') as file: - csv_reader = csv.reader(file) - next(csv_reader) - api_name_set = {row[0] for row in csv_reader} - for i, (api_full_name, api_info_dict) in enumerate(tqdm(config.forward_content.items(), **tqdm_params)): - if api_full_name in api_name_set: - continue - try: - if msCheckerConfig.white_list: - [_, api_name, _] = api_full_name.split("*") - if api_name not in set(msCheckerConfig.white_list): - continue - data_info = run_torch_api(api_full_name, config.real_data_path, config.backward_content, api_info_dict) - is_fwd_success, is_bwd_success = compare.compare_output(api_full_name, - data_info.bench_out, - data_info.device_out, - data_info.bench_grad_out, - data_info.device_grad_out) - if config.save_error_data: - do_save_error_data(api_full_name, data_info, is_fwd_success, is_bwd_success) - except Exception as err: - [_, api_name, _] = api_full_name.split("*") - if "expected scalar type Long" in str(err): - print_warn_log(f"API {api_name} not support int32 tensor in CPU, please add {api_name} to CONVERT_API " - f"'int32_to_int64' list in accuracy_tools/api_accuracy_check/common/utils.py file.") - else: - print_error_log(f"Run {api_full_name} UT Error: %s" % str(err)) - compare.write_summary_csv((api_full_name, "SKIP", "SKIP", str(err))) - finally: - if is_gpu: - torch.cuda.empty_cache() - else: - torch.npu.empty_cache() - gc.collect() - change_mode(compare.save_path, FileCheckConst.DATA_FILE_AUTHORITY) - change_mode(compare.detail_save_path, FileCheckConst.DATA_FILE_AUTHORITY) - compare.print_pretest_result() - - -def do_save_error_data(api_full_name, data_info, is_fwd_success, is_bwd_success): - if not is_fwd_success or not is_bwd_success: - api_full_name = api_full_name.replace("*", ".") - for element in data_info.in_fwd_data_list: - UtAPIInfo(api_full_name + '.forward.input', element) - UtAPIInfo(api_full_name + '.forward.output.bench', data_info.bench_out) - UtAPIInfo(api_full_name + '.forward.output.device', data_info.device_out) - UtAPIInfo(api_full_name + '.backward.input', data_info.grad_in) - UtAPIInfo(api_full_name + '.backward.output.bench', data_info.bench_grad_out) - UtAPIInfo(api_full_name + '.backward.output.device', data_info.device_grad_out) - - -def run_torch_api(api_full_name, real_data_path, backward_content, api_info_dict): - in_fwd_data_list = [] - [api_type, api_name, _] = api_full_name.split("*") - args, kwargs, need_grad = get_api_info(api_info_dict, api_name, real_data_path) - in_fwd_data_list.append(args) - in_fwd_data_list.append(kwargs) - need_backward = api_full_name in backward_content - if not need_grad: - print_warn_log("%s function with out=... arguments don't support automatic differentiation, skip backward." - % api_full_name) - if api_name in not_backward_list: - need_grad = False - print_warn_log( - "%s function backward result is None, skip backward." % api_full_name) - need_backward = need_backward and need_grad - if kwargs.get("device"): - del kwargs["device"] - cpu_args, cpu_kwargs = generate_cpu_params(args, kwargs, need_backward, api_name) - device_args, device_kwargs = generate_device_params(args, kwargs, need_backward, api_name) - bench_grad_out, device_grad_out = None, None - out = exec_api(api_type, api_name, cpu_args, cpu_kwargs) - device_out = exec_api(api_type, api_name, device_args, device_kwargs) - api_setting_dict = get_json_contents("torch_ut_setting.json") - grad_input_index = api_setting_dict.get(api_name) - grad_index = None - grad, bench_grad = None, None - if grad_input_index is not None: - grad_index = grad_input_index.get('grad_index') - - if need_backward: - backward_args = backward_content[api_full_name] - grad = gen_args(backward_args, real_data_path=real_data_path)[0] - bench_grad, _ = generate_cpu_params(grad, {}, False, api_name) - bench_grad_out = run_backward(cpu_args, bench_grad, grad_index, out) - device_grad = grad.clone().detach().to(current_device) - device_grad_out = run_backward(device_args, device_grad, grad_index, device_out) - - if grad_index is not None: - return UtDataInfo(bench_grad_out, device_grad_out, device_out[grad_index], out[grad_index], bench_grad, - in_fwd_data_list) - return UtDataInfo(bench_grad_out, device_grad_out, device_out, out, bench_grad, in_fwd_data_list) - - -def get_api_info(api_info_dict, api_name, real_data_path): - convert_type, api_info_dict = api_info_preprocess(api_name, api_info_dict) - need_grad = True - if api_info_dict.get("kwargs") and "out" in api_info_dict.get("kwargs"): - need_grad = False - args, kwargs = gen_api_params(api_info_dict, need_grad, convert_type, real_data_path) - return args, kwargs, need_grad - - -def run_backward(args, grad, grad_index, out): - - if grad_index is not None: - out[grad_index].backward(grad) - elif isinstance(out, (list, tuple)): - raise NotImplementedError("Multiple backward is not supported.") - else: - out.backward(grad) - args_grad = [] - for arg in args: - if isinstance(arg, torch.Tensor): - args_grad.append(arg.grad) - grad_out = args_grad - - return grad_out - - -def initialize_save_error_data(): - error_data_path = msCheckerConfig.error_data_path - check_path_before_create(error_data_path) - create_directory(error_data_path) - error_data_path_checker = FileChecker(msCheckerConfig.error_data_path, FileCheckConst.DIR, - ability=FileCheckConst.WRITE_ABLE) - error_data_path = error_data_path_checker.common_check() - initialize_save_path(error_data_path, UT_ERROR_DATA_DIR) - - -def get_validated_result_csv_path(result_csv_path, mode): - if mode not in ['result', 'detail']: - raise ValueError("The csv mode must be result or detail") - result_csv_path_checker = FileChecker(result_csv_path, FileCheckConst.FILE, ability=FileCheckConst.READ_WRITE_ABLE, - file_type=FileCheckConst.CSV_SUFFIX) - validated_result_csv_path = result_csv_path_checker.common_check() - if mode == 'result': - result_csv_name = os.path.basename(validated_result_csv_path) - pattern = r"^accuracy_checking_result_\d{14}\.csv$" - if not re.match(pattern, result_csv_name): - raise ValueError("When continue run ut, please do not modify the result csv name.") - return validated_result_csv_path - - -def get_validated_details_csv_path(validated_result_csv_path): - result_csv_name = os.path.basename(validated_result_csv_path) - details_csv_name = result_csv_name.replace('result', 'details') - details_csv_path = os.path.join(os.path.dirname(validated_result_csv_path), details_csv_name) - details_csv_path_checker = FileChecker(details_csv_path, FileCheckConst.FILE, - ability=FileCheckConst.READ_WRITE_ABLE, file_type=FileCheckConst.CSV_SUFFIX) - validated_details_csv_path = details_csv_path_checker.common_check() - return validated_details_csv_path - - -def _run_ut_parser(parser): - parser.add_argument("-forward", "--forward_input_file", dest="forward_input_file", default="", type=str, - help=" The api param tool forward result file: generate from api param tool, " - "a json file.", - required=True) - parser.add_argument("-backward", "--backward_input_file", dest="backward_input_file", default="", type=str, - help=" The api param tool backward result file: generate from api param tool, " - "a json file.", - required=False) - parser.add_argument("-o", "--out_path", dest="out_path", default="", type=str, - help=" The ut task result out path.", - required=False) - parser.add_argument('-save_error_data', dest="save_error_data", action="store_true", - help=" Save compare failed api output.", required=False) - parser.add_argument("-j", "--jit_compile", dest="jit_compile", action="store_true", - help=" whether to turn on jit compile", required=False) - - class UniqueDeviceAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - unique_values = set(values) - if len(values) != len(unique_values): - parser.error("device id must be unique") - for device_id in values: - if not 0 <= device_id: - parser.error("device id must be greater than or equal to 0") - setattr(namespace, self.dest, values) - - parser.add_argument("-d", "--device", dest="device_id", nargs='+', type=int, - help=" set device id to run ut, must be unique and in range 0-7", - default=[0], required=False, action=UniqueDeviceAction) - parser.add_argument("-csv_path", "--result_csv_path", dest="result_csv_path", default="", type=str, - help=" The path of accuracy_checking_result_{timestamp}.csv, " - "when run ut is interrupted, enter the file path to continue run ut.", - required=False) - parser.add_argument("-real_data_path", dest="real_data_path", nargs="?", const="", default="", type=str, - help=" In real data mode, the root directory for storing real data " - "must be configured.", - required=False) - parser.add_argument("-f", "--filter_api", dest="filter_api", action="store_true", - help=" Whether to filter the api in the forward_input_file.", required=False) - - -def preprocess_forward_content(forward_content): - processed_content = {} - base_keys_variants = {} - for key, value in forward_content.items(): - base_key = key.rsplit('*', 1)[0] - new_args = value['args'] - new_kwargs = value['kwargs'] - filtered_new_args = [{k: v for k, v in arg.items() if k not in ['Max', 'Min']} for arg in new_args if isinstance(arg, dict)] - if base_key in base_keys_variants: - is_duplicate = False - for variant in base_keys_variants.get(base_key, []): - try: - existing_args = processed_content[variant].get('args', []) - existing_kwargs = processed_content[variant].get('kwargs', {}) - filtered_existing_args = [{k: v for k, v in arg.items() if k not in ['Max', 'Min']} for arg in existing_args if isinstance(arg, dict)] - except KeyError as e: - print_error_log(f"KeyError: {e} when processing {key}") - if filtered_existing_args == filtered_new_args and existing_kwargs == new_kwargs: - is_duplicate = True - break - if not is_duplicate: - processed_content[key] = value - base_keys_variants[base_key].append(key) - else: - processed_content[key] = value - base_keys_variants[base_key] = [key] - return processed_content - - -def _run_ut(): - parser = argparse.ArgumentParser() - _run_ut_parser(parser) - args = parser.parse_args(sys.argv[1:]) - if not is_gpu: - torch.npu.set_compile_mode(jit_compile=args.jit_compile) - used_device = current_device + ":" + str(args.device_id[0]) - try: - if is_gpu: - torch.cuda.set_device(used_device) - else: - torch.npu.set_device(used_device) - except Exception as error: - print_error_log(f"Set device id failed. device id is: {args.device_id}") - raise NotImplementedError from error - check_link(args.forward_input_file) - forward_file = os.path.realpath(args.forward_input_file) - check_file_suffix(forward_file, FileCheckConst.JSON_SUFFIX) - out_path = os.path.realpath(args.out_path) if args.out_path else "./" - check_path_before_create(out_path) - create_directory(out_path) - out_path_checker = FileChecker(out_path, FileCheckConst.DIR, ability=FileCheckConst.WRITE_ABLE) - out_path = out_path_checker.common_check() - save_error_data = args.save_error_data - forward_content = get_json_contents(forward_file) - if args.filter_api: - forward_content = preprocess_forward_content(forward_content) - backward_content = {} - if args.backward_input_file: - check_link(args.backward_input_file) - backward_file = os.path.realpath(args.backward_input_file) - check_file_suffix(backward_file, FileCheckConst.JSON_SUFFIX) - backward_content = get_json_contents(backward_file) - result_csv_path = os.path.join(out_path, RESULT_FILE_NAME) - details_csv_path = os.path.join(out_path, DETAILS_FILE_NAME) - if args.result_csv_path: - result_csv_path = get_validated_result_csv_path(args.result_csv_path, 'result') - details_csv_path = get_validated_details_csv_path(result_csv_path) - if save_error_data: - if args.result_csv_path: - time_info = result_csv_path.split('.')[0].split('_')[-1] - global UT_ERROR_DATA_DIR - UT_ERROR_DATA_DIR = 'ut_error_data' + time_info - initialize_save_error_data() - run_ut_config = RunUTConfig(forward_content, backward_content, result_csv_path, details_csv_path, save_error_data, - args.result_csv_path, args.real_data_path) - run_ut(run_ut_config) - - -class UtDataInfo: - def __init__(self, bench_grad_out, device_grad_out, device_out, bench_out, grad_in, in_fwd_data_list): - self.bench_grad_out = bench_grad_out - self.device_grad_out = device_grad_out - self.device_out = device_out - self.bench_out = bench_out - self.grad_in = grad_in - self.in_fwd_data_list = in_fwd_data_list - - -class UtAPIInfo(APIInfo): - def __init__(self, api_name, element): - super().__init__(api_name, - save_path=self.get_full_save_path(msCheckerConfig.error_data_path, UT_ERROR_DATA_DIR), - is_save_data=True) - self.analyze_element(element) - - -if __name__ == '__main__': - _run_ut() - print_info_log("UT task completed.") +import argparse +import os +import csv +import re +import sys +import time +import gc +from collections import namedtuple +try: + import torch_npu +except ImportError: + is_gpu = True + current_device = "cuda" +else: + is_gpu = False + current_device = "npu" +import torch +from tqdm import tqdm +from api_accuracy_checker.run_ut.data_generate import gen_api_params, gen_args +from api_accuracy_checker.common.utils import print_info_log, print_warn_log, get_json_contents, api_info_preprocess, \ + print_error_log, initialize_save_path, Const, create_directory +from api_accuracy_checker.compare.compare import Comparator +from api_accuracy_checker.hook_module.wrap_tensor import TensorOPTemplate +from api_accuracy_checker.hook_module.wrap_functional import FunctionalOPTemplate +from api_accuracy_checker.hook_module.wrap_torch import TorchOPTemplate +from api_accuracy_checker.common.config import msCheckerConfig +from api_accuracy_checker.dump.api_info import APIInfo +from ptdbg_ascend.src.python.ptdbg_ascend.common.utils import check_path_before_create + + +from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileOpen, FileCheckConst, FileChecker, \ + change_mode, check_file_suffix, check_link + +current_time = time.strftime("%Y%m%d%H%M%S") +UT_ERROR_DATA_DIR = 'ut_error_data' + current_time +RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv" +DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv" +RunUTConfig = namedtuple('RunUTConfig', ['forward_content', 'backward_content', 'result_csv_path', 'details_csv_path', + 'save_error_data', 'is_continue_run_ut', 'real_data_path']) +not_backward_list = ['repeat_interleave'] +not_detach_set = {'resize_', 'resize_as_', 'set_', 'transpose_', 't_', 'squeeze_', 'unsqueeze_'} + +tqdm_params = { + 'smoothing': 0, # 平滑进度条的预计剩余时间,取值范围0到1 + 'desc': 'Processing', # 进度条前的描述文字 + 'leave': True, # 迭代完成后保留进度条的显示 + 'ncols': 75, # 进度条的固定宽度 + 'mininterval': 0.1, # 更新进度条的最小间隔秒数 + 'maxinterval': 1.0, # 更新进度条的最大间隔秒数 + 'miniters': 1, # 更新进度条之间的最小迭代次数 + 'ascii': None, # 根据环境自动使用ASCII或Unicode字符 + 'unit': 'it', # 迭代单位 + 'unit_scale': True, # 自动根据单位缩放 + 'dynamic_ncols': True, # 动态调整进度条宽度以适应控制台 + 'bar_format': '{l_bar}{bar}| {n}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]' # 自定义进度条输出格式 +} + + +def exec_api(api_type, api_name, args, kwargs): + if api_type == "Functional": + functional_api = FunctionalOPTemplate(api_name, str, False) + out = functional_api.forward(*args, **kwargs) + if api_type == "Tensor": + tensor_api = TensorOPTemplate(api_name, str, False) + out = tensor_api.forward(*args, **kwargs) + if api_type == "Torch": + torch_api = TorchOPTemplate(api_name, str, False) + out = torch_api.forward(*args, **kwargs) + return out + + +def deal_detach(arg, to_detach=True): + return arg.detach() if to_detach else arg + + +def deal_dtype(arg, raise_dtype=None): + if raise_dtype is None or arg.dtype not in Const.RAISE_PRECISION or raise_dtype == arg.dtype: + return arg + return arg.type(raise_dtype) + + +def generate_device_params(input_args, input_kwargs, need_backward, api_name): + def recursive_arg_to_device(arg_in, to_detach): + if isinstance(arg_in, (list, tuple)): + return type(arg_in)(recursive_arg_to_device(arg, to_detach) for arg in arg_in) + elif isinstance(arg_in, torch.Tensor): + if need_backward and arg_in.requires_grad: + arg_in = deal_detach(arg_in.clone(), to_detach).to(current_device).requires_grad_() + temp_arg_in = arg_in * 1 + arg_in = temp_arg_in.type_as(arg_in) + arg_in.retain_grad() + return arg_in + else: + return deal_detach(arg_in.clone(), to_detach).to(current_device) + else: + return arg_in + + is_detach = api_name not in not_detach_set + device_args = recursive_arg_to_device(input_args, is_detach) + device_kwargs = \ + {key: recursive_arg_to_device(value, key != "out" and is_detach) for key, value in input_kwargs.items()} + return device_args, device_kwargs + + +def generate_cpu_params(input_args, input_kwargs, need_backward, api_name): + def recursive_arg_to_cpu(arg_in, to_detach, raise_dtype=None): + if isinstance(arg_in, (list, tuple)): + return type(arg_in)(recursive_arg_to_cpu(arg, to_detach, raise_dtype=raise_dtype) for arg in arg_in) + elif isinstance(arg_in, torch.Tensor): + if need_backward and arg_in.requires_grad: + arg_in = deal_detach(deal_dtype(arg_in.clone(), raise_dtype), to_detach).requires_grad_() + temp_arg_in = arg_in * 1 + arg_in = temp_arg_in.type_as(arg_in) + arg_in.retain_grad() + return arg_in + else: + return deal_detach(deal_dtype(arg_in.clone(), raise_dtype=raise_dtype), to_detach) + else: + return arg_in + + def is_tensor_with_raise_precision(arg_in, check_kwargs=False): + if arg_in.dtype in Const.RAISE_PRECISION: + return True + if check_kwargs and arg_in.dtype in [torch.half, torch.bfloat16]: + return True + return False + + def recursive_find_dtypes(arg_in, kwargs=None, check_kwargs=False): + if isinstance(arg_in, (list, tuple)): + return set().union(*tuple(recursive_find_dtypes(arg, kwargs, check_kwargs=check_kwargs) for arg in arg_in)) + elif isinstance(arg_in, torch.Tensor) and is_tensor_with_raise_precision(arg_in, check_kwargs): + return set([arg_in.dtype]) + elif isinstance(arg_in, dict) and check_kwargs: + return set().union(*tuple(recursive_find_dtypes(v, kwargs, check_kwargs=True) for v in arg_in.values())) + return set() + + raise_dtype = None + need_raise_dtypes = recursive_find_dtypes(input_args) + need_raise_dtypes.update(recursive_find_dtypes(input_kwargs, check_kwargs=True)) + if len(need_raise_dtypes) == 1: + raise_dtype = Const.RAISE_PRECISION.get(need_raise_dtypes.pop(), torch.float32) + elif len(need_raise_dtypes) >= 2: + raise_dtype = torch.float32 + + is_detach = api_name not in not_detach_set + cpu_args = recursive_arg_to_cpu(input_args, is_detach, raise_dtype=raise_dtype) + cpu_kwargs = {key: recursive_arg_to_cpu(value, key != "out" and is_detach, raise_dtype=raise_dtype) for key, value in input_kwargs.items()} + return cpu_args, cpu_kwargs + + +def run_ut(config): + print_info_log("start UT test") + print_info_log(f"UT task result will be saved in {config.result_csv_path}") + print_info_log(f"UT task details will be saved in {config.details_csv_path}") + if config.save_error_data: + error_data_path = os.path.abspath(os.path.join(msCheckerConfig.error_data_path, UT_ERROR_DATA_DIR)) + print_info_log(f"UT task error_datas will be saved in {error_data_path}") + compare = Comparator(config.result_csv_path, config.details_csv_path, config.is_continue_run_ut) + with FileOpen(config.result_csv_path, 'r') as file: + csv_reader = csv.reader(file) + next(csv_reader) + api_name_set = {row[0] for row in csv_reader} + for i, (api_full_name, api_info_dict) in enumerate(tqdm(config.forward_content.items(), **tqdm_params)): + if api_full_name in api_name_set: + continue + try: + if msCheckerConfig.white_list: + [_, api_name, _] = api_full_name.split("*") + if api_name not in set(msCheckerConfig.white_list): + continue + data_info = run_torch_api(api_full_name, config.real_data_path, config.backward_content, api_info_dict) + is_fwd_success, is_bwd_success = compare.compare_output(api_full_name, + data_info.bench_out, + data_info.device_out, + data_info.bench_grad_out, + data_info.device_grad_out) + if config.save_error_data: + do_save_error_data(api_full_name, data_info, is_fwd_success, is_bwd_success) + except Exception as err: + [_, api_name, _] = api_full_name.split("*") + if "expected scalar type Long" in str(err): + print_warn_log(f"API {api_name} not support int32 tensor in CPU, please add {api_name} to CONVERT_API " + f"'int32_to_int64' list in accuracy_tools/api_accuracy_check/common/utils.py file.") + else: + print_error_log(f"Run {api_full_name} UT Error: %s" % str(err)) + compare.write_summary_csv((api_full_name, "SKIP", "SKIP", str(err))) + finally: + if is_gpu: + torch.cuda.empty_cache() + else: + torch.npu.empty_cache() + gc.collect() + change_mode(compare.save_path, FileCheckConst.DATA_FILE_AUTHORITY) + change_mode(compare.detail_save_path, FileCheckConst.DATA_FILE_AUTHORITY) + compare.print_pretest_result() + + +def do_save_error_data(api_full_name, data_info, is_fwd_success, is_bwd_success): + if not is_fwd_success or not is_bwd_success: + api_full_name = api_full_name.replace("*", ".") + for element in data_info.in_fwd_data_list: + UtAPIInfo(api_full_name + '.forward.input', element) + UtAPIInfo(api_full_name + '.forward.output.bench', data_info.bench_out) + UtAPIInfo(api_full_name + '.forward.output.device', data_info.device_out) + UtAPIInfo(api_full_name + '.backward.input', data_info.grad_in) + UtAPIInfo(api_full_name + '.backward.output.bench', data_info.bench_grad_out) + UtAPIInfo(api_full_name + '.backward.output.device', data_info.device_grad_out) + + +def run_torch_api(api_full_name, real_data_path, backward_content, api_info_dict): + in_fwd_data_list = [] + [api_type, api_name, _] = api_full_name.split("*") + args, kwargs, need_grad = get_api_info(api_info_dict, api_name, real_data_path) + in_fwd_data_list.append(args) + in_fwd_data_list.append(kwargs) + need_backward = api_full_name in backward_content + if not need_grad: + print_warn_log("%s function with out=... arguments don't support automatic differentiation, skip backward." + % api_full_name) + if api_name in not_backward_list: + need_grad = False + print_warn_log( + "%s function backward result is None, skip backward." % api_full_name) + need_backward = need_backward and need_grad + if kwargs.get("device"): + del kwargs["device"] + cpu_args, cpu_kwargs = generate_cpu_params(args, kwargs, need_backward, api_name) + device_args, device_kwargs = generate_device_params(args, kwargs, need_backward, api_name) + bench_grad_out, device_grad_out = None, None + out = exec_api(api_type, api_name, cpu_args, cpu_kwargs) + device_out = exec_api(api_type, api_name, device_args, device_kwargs) + current_path = os.path.dirname(os.path.realpath(__file__)) + ut_setting_path = os.path.join(current_path, "torch_ut_setting.json") + api_setting_dict = get_json_contents(ut_setting_path) + grad_input_index = api_setting_dict.get(api_name) + grad_index = None + grad, bench_grad = None, None + if grad_input_index is not None: + grad_index = grad_input_index.get('grad_index') + + if need_backward: + backward_args = backward_content[api_full_name] + grad = gen_args(backward_args, real_data_path=real_data_path)[0] + bench_grad, _ = generate_cpu_params(grad, {}, False, api_name) + bench_grad_out = run_backward(cpu_args, bench_grad, grad_index, out) + device_grad = grad.clone().detach().to(current_device) + device_grad_out = run_backward(device_args, device_grad, grad_index, device_out) + + if grad_index is not None: + return UtDataInfo(bench_grad_out, device_grad_out, device_out[grad_index], out[grad_index], bench_grad, + in_fwd_data_list) + return UtDataInfo(bench_grad_out, device_grad_out, device_out, out, bench_grad, in_fwd_data_list) + + +def get_api_info(api_info_dict, api_name, real_data_path): + convert_type, api_info_dict = api_info_preprocess(api_name, api_info_dict) + need_grad = True + if api_info_dict.get("kwargs") and "out" in api_info_dict.get("kwargs"): + need_grad = False + args, kwargs = gen_api_params(api_info_dict, need_grad, convert_type, real_data_path) + return args, kwargs, need_grad + + +def run_backward(args, grad, grad_index, out): + + if grad_index is not None: + out[grad_index].backward(grad) + elif isinstance(out, (list, tuple)): + raise NotImplementedError("Multiple backward is not supported.") + else: + out.backward(grad) + args_grad = [] + for arg in args: + if isinstance(arg, torch.Tensor): + args_grad.append(arg.grad) + grad_out = args_grad + + return grad_out + + +def initialize_save_error_data(): + error_data_path = msCheckerConfig.error_data_path + check_path_before_create(error_data_path) + create_directory(error_data_path) + error_data_path_checker = FileChecker(msCheckerConfig.error_data_path, FileCheckConst.DIR, + ability=FileCheckConst.WRITE_ABLE) + error_data_path = error_data_path_checker.common_check() + initialize_save_path(error_data_path, UT_ERROR_DATA_DIR) + + +def get_validated_result_csv_path(result_csv_path, mode): + if mode not in ['result', 'detail']: + raise ValueError("The csv mode must be result or detail") + result_csv_path_checker = FileChecker(result_csv_path, FileCheckConst.FILE, ability=FileCheckConst.READ_WRITE_ABLE, + file_type=FileCheckConst.CSV_SUFFIX) + validated_result_csv_path = result_csv_path_checker.common_check() + if mode == 'result': + result_csv_name = os.path.basename(validated_result_csv_path) + pattern = r"^accuracy_checking_result_\d{14}\.csv$" + if not re.match(pattern, result_csv_name): + raise ValueError("When continue run ut, please do not modify the result csv name.") + return validated_result_csv_path + + +def get_validated_details_csv_path(validated_result_csv_path): + result_csv_name = os.path.basename(validated_result_csv_path) + details_csv_name = result_csv_name.replace('result', 'details') + details_csv_path = os.path.join(os.path.dirname(validated_result_csv_path), details_csv_name) + details_csv_path_checker = FileChecker(details_csv_path, FileCheckConst.FILE, + ability=FileCheckConst.READ_WRITE_ABLE, file_type=FileCheckConst.CSV_SUFFIX) + validated_details_csv_path = details_csv_path_checker.common_check() + return validated_details_csv_path + + +def _run_ut_parser(parser): + parser.add_argument("-forward", "--forward_input_file", dest="forward_input_file", default="", type=str, + help=" The api param tool forward result file: generate from api param tool, " + "a json file.", + required=True) + parser.add_argument("-backward", "--backward_input_file", dest="backward_input_file", default="", type=str, + help=" The api param tool backward result file: generate from api param tool, " + "a json file.", + required=False) + parser.add_argument("-o", "--out_path", dest="out_path", default="", type=str, + help=" The ut task result out path.", + required=False) + parser.add_argument('-save_error_data', dest="save_error_data", action="store_true", + help=" Save compare failed api output.", required=False) + parser.add_argument("-j", "--jit_compile", dest="jit_compile", action="store_true", + help=" whether to turn on jit compile", required=False) + + class UniqueDeviceAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + unique_values = set(values) + if len(values) != len(unique_values): + parser.error("device id must be unique") + for device_id in values: + if not 0 <= device_id: + parser.error("device id must be greater than or equal to 0") + setattr(namespace, self.dest, values) + + parser.add_argument("-d", "--device", dest="device_id", nargs='+', type=int, + help=" set device id to run ut, must be unique and in range 0-7", + default=[0], required=False, action=UniqueDeviceAction) + parser.add_argument("-csv_path", "--result_csv_path", dest="result_csv_path", default="", type=str, + help=" The path of accuracy_checking_result_{timestamp}.csv, " + "when run ut is interrupted, enter the file path to continue run ut.", + required=False) + parser.add_argument("-real_data_path", dest="real_data_path", nargs="?", const="", default="", type=str, + help=" In real data mode, the root directory for storing real data " + "must be configured.", + required=False) + parser.add_argument("-f", "--filter_api", dest="filter_api", action="store_true", + help=" Whether to filter the api in the forward_input_file.", required=False) + + +def preprocess_forward_content(forward_content): + processed_content = {} + base_keys_variants = {} + for key, value in forward_content.items(): + base_key = key.rsplit('*', 1)[0] + new_args = value['args'] + new_kwargs = value['kwargs'] + filtered_new_args = [{k: v for k, v in arg.items() if k not in ['Max', 'Min']} for arg in new_args if isinstance(arg, dict)] + if base_key in base_keys_variants: + is_duplicate = False + for variant in base_keys_variants.get(base_key, []): + try: + existing_args = processed_content[variant].get('args', []) + existing_kwargs = processed_content[variant].get('kwargs', {}) + filtered_existing_args = [{k: v for k, v in arg.items() if k not in ['Max', 'Min']} for arg in existing_args if isinstance(arg, dict)] + except KeyError as e: + print_error_log(f"KeyError: {e} when processing {key}") + if filtered_existing_args == filtered_new_args and existing_kwargs == new_kwargs: + is_duplicate = True + break + if not is_duplicate: + processed_content[key] = value + base_keys_variants[base_key].append(key) + else: + processed_content[key] = value + base_keys_variants[base_key] = [key] + return processed_content + + +def _run_ut(parser=None): + if not parser: + parser = argparse.ArgumentParser() + _run_ut_parser(parser) + args = parser.parse_args(sys.argv[1:]) + run_ut_command(args) + + +def run_ut_command(args): + if not is_gpu: + torch.npu.set_compile_mode(jit_compile=args.jit_compile) + used_device = current_device + ":" + str(args.device_id[0]) + try: + if is_gpu: + torch.cuda.set_device(used_device) + else: + torch.npu.set_device(used_device) + except Exception as error: + print_error_log(f"Set device id failed. device id is: {args.device_id}") + raise NotImplementedError from error + check_link(args.forward_input_file) + forward_file = os.path.realpath(args.forward_input_file) + check_file_suffix(forward_file, FileCheckConst.JSON_SUFFIX) + out_path = os.path.realpath(args.out_path) if args.out_path else "./" + check_path_before_create(out_path) + create_directory(out_path) + out_path_checker = FileChecker(out_path, FileCheckConst.DIR, ability=FileCheckConst.WRITE_ABLE) + out_path = out_path_checker.common_check() + save_error_data = args.save_error_data + forward_content = get_json_contents(forward_file) + if args.filter_api: + forward_content = preprocess_forward_content(forward_content) + backward_content = {} + if args.backward_input_file: + check_link(args.backward_input_file) + backward_file = os.path.realpath(args.backward_input_file) + check_file_suffix(backward_file, FileCheckConst.JSON_SUFFIX) + backward_content = get_json_contents(backward_file) + result_csv_path = os.path.join(out_path, RESULT_FILE_NAME) + details_csv_path = os.path.join(out_path, DETAILS_FILE_NAME) + if args.result_csv_path: + result_csv_path = get_validated_result_csv_path(args.result_csv_path, 'result') + details_csv_path = get_validated_details_csv_path(result_csv_path) + if save_error_data: + if args.result_csv_path: + time_info = result_csv_path.split('.')[0].split('_')[-1] + global UT_ERROR_DATA_DIR + UT_ERROR_DATA_DIR = 'ut_error_data' + time_info + initialize_save_error_data() + run_ut_config = RunUTConfig(forward_content, backward_content, result_csv_path, details_csv_path, save_error_data, + args.result_csv_path, args.real_data_path) + run_ut(run_ut_config) + + +class UtDataInfo: + def __init__(self, bench_grad_out, device_grad_out, device_out, bench_out, grad_in, in_fwd_data_list): + self.bench_grad_out = bench_grad_out + self.device_grad_out = device_grad_out + self.device_out = device_out + self.bench_out = bench_out + self.grad_in = grad_in + self.in_fwd_data_list = in_fwd_data_list + + +class UtAPIInfo(APIInfo): + def __init__(self, api_name, element): + super().__init__(api_name, + save_path=self.get_full_save_path(msCheckerConfig.error_data_path, UT_ERROR_DATA_DIR), + is_save_data=True) + self.analyze_element(element) + + +if __name__ == '__main__': + _run_ut() + print_info_log("UT task completed.") diff --git a/debug/accuracy_tools/api_accuracy_checker/test/ut/compare/test_compare.py b/debug/accuracy_tools/api_accuracy_checker/test/ut/compare/test_compare.py index 242db8584af15a8e3d78c09078a9cd09a7fbb109..4ce73ce550dfc5d5cd21246dbc2756a6024f6fea 100644 --- a/debug/accuracy_tools/api_accuracy_checker/test/ut/compare/test_compare.py +++ b/debug/accuracy_tools/api_accuracy_checker/test/ut/compare/test_compare.py @@ -33,32 +33,38 @@ class TestCompare(unittest.TestCase): dummmy_input = torch.randn(100, 100) bench_out = torch.nn.functional.dropout2d(dummmy_input, 0.3) npu_out = torch.nn.functional.dropout2d(dummmy_input, 0.3) - self.assertTrue(self.compare._compare_dropout(bench_out, npu_out)) + self.assertTrue(self.compare._compare_dropout("api", bench_out, npu_out)) def test_compare_core_wrapper(self): dummy_input = torch.randn(100, 100) bench_out, npu_out = dummy_input, dummy_input - test_final_success, detailed_result_total = self.compare._compare_core_wrapper(bench_out, npu_out) + test_final_success, detailed_result_total = self.compare._compare_core_wrapper("api", bench_out, npu_out) actual_cosine_similarity = detailed_result_total[0][3] # 设置一个小的公差值 tolerance = 1e-4 # 判断实际的余弦相似度值是否在预期值的公差范围内 self.assertTrue(np.isclose(actual_cosine_similarity, 1.0, atol=tolerance)) # 对其他值进行比较,确保它们符合预期 - self.assertEqual(detailed_result_total, [['torch.float32', 'torch.float32', (100, 100), - actual_cosine_similarity, 0.0, 'N/A', 'N/A', - 'N/A', 'N/A', 0.0, 0.0, 0, 0.0, 0.0, 'pass', '\n']]) + detailed_result_total[0][3] = 1.0 + self.assertEqual(detailed_result_total, [['torch.float32', 'torch.float32', (100, 100), 1.0, 0.0, ' ', ' ', ' ', + ' ', 0.0, 0.0, 0, 0.0, 0.0, ' ', ' ', ' ', 'pass', + '\nMax abs error is less than 0.001, consider as pass, skip other check and set to SPACE.\n']]) self.assertTrue(test_final_success) bench_out, npu_out = [dummy_input, dummy_input], [dummy_input, dummy_input] - test_final_success, detailed_result_total = self.compare._compare_core_wrapper(bench_out, npu_out) + test_final_success, detailed_result_total = self.compare._compare_core_wrapper("api", bench_out, npu_out) + actual_cosine_similarity = detailed_result_total[0][3] + self.assertTrue(np.isclose(actual_cosine_similarity, 1.0, atol=tolerance)) + actual_cosine_similarity = detailed_result_total[1][3] + self.assertTrue(np.isclose(actual_cosine_similarity, 1.0, atol=tolerance)) + detailed_result_total[0][3] = 1.0 + detailed_result_total[1][3] = 1.0 self.assertTrue(test_final_success) - self.assertEqual(detailed_result_total, [['torch.float32', 'torch.float32', (100, 100), - actual_cosine_similarity, 0.0, 'N/A', 'N/A', - 'N/A', 'N/A', 0.0, 0.0, 0, 0.0, 0.0, 'pass', '\n'], - ['torch.float32', 'torch.float32', (100, 100), - actual_cosine_similarity, 0.0, 'N/A', 'N/A', - 'N/A', 'N/A', 0.0, 0.0, 0, 0.0, 0.0, 'pass', '\n']]) + self.assertEqual(detailed_result_total, [['torch.float32', 'torch.float32', (100, 100), 1.0, 0.0, ' ', ' ', ' ', + ' ', 0.0, 0.0, 0, 0.0, 0.0, ' ', ' ', ' ', 'pass', + '\nMax abs error is less than 0.001, consider as pass, skip other check and set to SPACE.\n'], + ['torch.float32', 'torch.float32', (100, 100), 1.0, 0.0, ' ', ' ', ' ', ' ', 0.0, 0.0, 0, 0.0, 0.0, ' ', ' ', + ' ', 'pass', '\nMax abs error is less than 0.001, consider as pass, skip other check and set to SPACE.\n']]) def test_compare_output(self): bench_out, npu_out = torch.randn(100, 100), torch.randn(100, 100) @@ -89,7 +95,7 @@ class TestCompare(unittest.TestCase): cpu_output = torch.Tensor([1.0, 2.0, 3.0]) npu_output = torch.Tensor([1.0, 2.0, 3.0]) compare_column = CompareColumn() - status, compare_column, message = self.compare._compare_torch_tensor(cpu_output, npu_output, compare_column) + status, compare_column, message = self.compare._compare_torch_tensor("api", cpu_output, npu_output, compare_column) self.assertEqual(status, "pass") def test_compare_bool_tensor(self): diff --git a/debug/accuracy_tools/atat/__init__.py b/debug/accuracy_tools/atat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/debug/accuracy_tools/atat/atat.py b/debug/accuracy_tools/atat/atat.py new file mode 100644 index 0000000000000000000000000000000000000000..4f69afd2349f211d1c6e17ab9386de3c8fcd6909 --- /dev/null +++ b/debug/accuracy_tools/atat/atat.py @@ -0,0 +1,63 @@ +# Copyright (c) 2024, Huawei Technologies Co., Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys +from api_accuracy_checker.run_ut.run_ut import _run_ut_parser, run_ut_command +from ptdbg_ascend.src.python.ptdbg_ascend.parse_tool.cli import parse as cli_parse +from api_accuracy_checker.run_ut.multi_run_ut import prepare_config, run_parallel_ut +from api_accuracy_checker.compare.api_precision_compare import _api_precision_compare_parser, _api_precision_compare_command +from api_accuracy_checker.run_ut.run_overflow_check import _run_overflow_check_parser, _run_overflow_check_command + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="atat(ascend training accuracy tools), [Powered by MindStudio].\n" + "Providing one-site accuracy difference debugging toolkit for training on Ascend Devices.\n" + f"For any issue, refer README.md first", + ) + parser.set_defaults(print_help=parser.print_help) + subparsers = parser.add_subparsers() + subparsers.add_parser('parse') + run_ut_cmd_parser = subparsers.add_parser('run_ut') + multi_run_ut_cmd_parser = subparsers.add_parser('multi_run_ut') + api_precision_compare_cmd_parser = subparsers.add_parser('api_precision_compare') + run_overflow_check_cmd_parser = subparsers.add_parser('run_overflow_check') + _run_ut_parser(run_ut_cmd_parser) + _run_ut_parser(multi_run_ut_cmd_parser) + multi_run_ut_cmd_parser.add_argument('-n', '--num_splits', type=int, choices=range(1, 65), default=8, + help='Number of splits for parallel processing. Range: 1-64') + _api_precision_compare_parser(api_precision_compare_cmd_parser) + _run_overflow_check_parser(run_overflow_check_cmd_parser) + if len(sys.argv) == 1: + parser.print_help() + sys.exit(0) + args = parser.parse_args(sys.argv[1:]) + if sys.argv[1] == "run_ut": + run_ut_command(args) + elif sys.argv[1] == "parse": + cli_parse() + elif sys.argv[1] == "multi_run_ut": + config = prepare_config(args) + run_parallel_ut(config) + elif sys.argv[1] == "api_precision_compare": + _api_precision_compare_command(args) + elif sys.argv[1] == "run_overflow_check": + _run_overflow_check_command(args) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/debug/accuracy_tools/grad_tool/README.md b/debug/accuracy_tools/grad_tool/README.md index 80de6a51487262d28fae4eac5b90ece174872f95..78acffbe685f4b724dd1cff82a48236276e8b16f 100644 --- a/debug/accuracy_tools/grad_tool/README.md +++ b/debug/accuracy_tools/grad_tool/README.md @@ -1,12 +1,15 @@ # Ascend模型梯度状态监测工具 -Ascend模型梯度状态检测工具能监控一个模型的梯度数据,并且将梯度数据导出。 +训练状态监控工具提供了两种能力 + +1. 将模型的梯度数据导出。这种功能可以将模型权重的梯度值以统计量的形式采集出来,用以分析问题。 +2. 将两份梯度数据进行相似度对比。这种功能可以发现训练过程中问题出现的step,以及抓取反向过程中的问题。 工具支持PyTorch版本:1.11.0/2.0/2.1。 ## 工具特性 -1. 使用便捷,不需要去训练流程里插入代码 +1. 使用便捷,修改处少 2. 可配置多种过滤项 ## 工具安装 @@ -20,11 +23,13 @@ Ascend模型梯度状态检测工具能监控一个模型的梯度数据,并 2. 安装依赖pandas、pyyaml ```bash - pip3 install pandas pyyaml + pip3 install pandas pyyaml tqdm ``` ## 使用方式 +### 梯度数据导出 + 1. 自己写一个配置文件 config.yaml,样例如下: ```python @@ -61,7 +66,8 @@ Ascend模型梯度状态检测工具能监控一个模型的梯度数据,并 |-----| -- | -- | | L0 | ("param_name", "MD5", "max", "min", "norm", "shape") | 无 | | L1 | ("param_name", "MD5", *intervals, "=0", "max", "min", "norm", "shape") | 无 | - | L2 | ("param_name", "MD5", *intervals, "=0", "max", "min", "norm", "shape") | 有 | + | L2 | ("param_name", "MD5", "max", "min", "norm", "shape") | 有 | + | L3 | ("param_name", "MD5", *intervals, "=0", "max", "min", "norm", "shape") | 有 | intervals 就是根据值分布划分出的区间 @@ -71,25 +77,36 @@ Ascend模型梯度状态检测工具能监控一个模型的梯度数据,并 -2. 在训练流程执行之前传入 config.yaml 的路径实例化一个 GradientDumper 对象,并用该对象的 monitor 方法对需要监控的模型挂上钩子。示例代码如下: +2. 在训练流程执行之前传入 config.yaml 的路径实例化一个 GradientDumper 对象。实例代码如下: ```python from grad_tool.grad_monitor import GradientMonitor gm = GradientMonitor("config_path") - gm.monitor(model) ``` +3. 插入代码监控模型,有两种方法,选择其一即可: + + 推荐:在训练流程中,反向执行之后梯度清零之前的位置,调用 gm.save_grad 并将模型作为参数传入 + + ```python + gm.save_grad(model) + ``` + + 另一种:在训练开始前,调用 gm.monitor 并将模型作为参数传入。这种方式目前不稳定。 + + ```python + gm.monitor(model) + ``` -## 输出结果 +### 输出结果 **输出目录结构**(level 为 L2) ```bash {output_path} - └── {timestamp} - ├── rank_{rank_id} - │ ├── grad_summary_{step}.csv - │ ├── step_{step} - │ │ ├── {param_name}.pt + ├── rank_{rank_id} + │ ├── grad_summary_{step}.csv + │ ├── step_{step} + │ │ ├── {param_name}.pt ``` + {timestamp}:梯度工具导出数据的时候会在 output_path 下生成一个时间戳目录,然后在这个时间戳目录下输出结果 + rank_{rank_id}:在分布式场景下,会记录卡的 rank_id。非分布式场景下,如果是 cpu 则记录进程号,如果是 cpu/gpu 则记录卡号 @@ -116,6 +133,60 @@ Ascend模型梯度状态检测工具能监控一个模型的梯度数据,并 | Norm | L2norm值 | | Shape | 形状 | +### 梯度相似度比对 + +会根据所导出的权重,分step比对梯度相似度,输出每个权重的梯度相似度和总的梯度相似度。单个权重的梯度相似度为两份方向数据的重合度,总的梯度相似度为每个权重的梯度相似度按元素个数加权. + +#### 前提条件 + +1. 需要两份以相同配置导出的梯度数据。 +2. 两份数据导出时都需要将 config.yaml 的 level 设置为 L2 或者 L3,因为比对功能需要方向数据。 + +#### 使用方式 + + 1. 单卡比对。新写一个 python 脚本,里面调用 grad_tool.grad_comparator 的 GradComparator.compare 函数,传入的前两个参数分别为梯度数据的 rank 层目录,顺序无所谓,第三个参数为输出目录。如下所示: + + ```python + from grad_tool.grad_comparator import GradComparator + GradComparator.compare("需要对比的rank_id级目录", + "需要对比的rank_id级目录", + "比对结果输出目录") + ``` + + 2. 多卡比对。新写一个 python 脚本,里面调用 grad_tool.grad_comparator 的 GradComparator.compare_distributed 函数,传入的前两个参数分别为梯度数据的 rank 层目录,顺序无所谓,第三个参数为输出目录。如下所示: + + ```python + from grad_tool.grad_comparator import GradComparator + GradComparator.compare_distributed("配置文件里写的输出目录", + "配置文件里写的输出目录", + "比对结果输出目录") + ``` + +### 比对结果 + +**输出目录结构**(多卡比对结果,单卡则没有 rank_{rank_id} 这一级目录) + +```bash +比对结果输出目录 + ├── rank_{rank_id} + │ ├── similarities.csv + │ └── similarities_picture + │ ├── {param_name}.png + │ └── summary_similarities.png +``` + +**similarities.csv示例** + +![Alt text](img/image-2.png) + +这份文件记录了所有权重在每一步的梯度相似度和总的梯度相似度 + +**summary_similarities.png示例** + +![Alt text](img/image-3.png) + +这是梯度相似度随 step 变化的图片 + ## 公开接口 ```python @@ -128,6 +199,16 @@ GradientMonitor.monitor(model) | ----------- | ------------------------------------------------------------ | -------- | | model | 设置需要监测的模型 | 是 | +```python +GradientMonitor.save_grad(model) +``` + +**参数说明** + +| 参数名称 | 说明 | 是否必选 | +| ----------- | ------------------------------------------------------------ | -------- | +| model | 设置需要监测的模型 | 是 | + ```python GradientMonitor.__init__(config_path) ``` @@ -138,5 +219,7 @@ GradientMonitor.__init__(config_path) | ----------- | ------------------------------------------------------------ | -------- | | config_path | 配置文件路径,需要以.yaml结尾 | 是 | + + # FAQ diff --git a/debug/accuracy_tools/grad_tool/grad_comparator.py b/debug/accuracy_tools/grad_tool/grad_comparator.py new file mode 100644 index 0000000000000000000000000000000000000000..ff6302d4fc29d76796cdb533dc6ba3622bcc25a5 --- /dev/null +++ b/debug/accuracy_tools/grad_tool/grad_comparator.py @@ -0,0 +1,127 @@ +import os +import torch +from tqdm import tqdm +import matplotlib.pyplot as plt +from grad_tool.utils import write_csv, path_check, print_info_log, create_directory + + +class GradComparator: + @staticmethod + def compare_distributed(path1: str, path2: str, output_dir): + ranks = GradComparator._get_matched_dirs(path1, path2, "rank") + print_info_log(f"the following ranks will be compared: {ranks}") + if not ranks: + raise Exception("no matched ranks for comparison, please dump data in same configuration") + if not os.path.isdir(output_dir): + create_directory(output_dir) + for rank in tqdm(ranks, desc="rank"): + print_info_log(f"now comparing rank {rank}:") + GradComparator.compare(os.path.join(path1, f"rank_{rank}"), + os.path.join(path2, f"rank_{rank}"), + os.path.join(output_dir, f"rank_{rank}")) + + @staticmethod + def compare(path1: str, path2: str, output_dir): + steps = GradComparator._get_matched_dirs(path1, path2, "step") + if not steps: + raise Exception("no matched steps for comparison, please dump data in same configuration") + similarities = GradComparator._calculate_separated_similarities(path1, path2, steps) + if not os.path.isdir(output_dir): + create_directory(output_dir) + GradComparator._save_similarities(similarities, steps, output_dir) + + @staticmethod + def _calculate_separated_similarities(path1, path2, steps): + similarities = {} + print_info_log(f"{len(steps)} steps will be compared") + for step in tqdm(steps, desc="culculate similarities (by step)"): + pt_files = GradComparator._get_matched_pt_files(path1, path2, step) + same_count_summary = 0 + total_count_summary = 0 + for pt_file in pt_files: + pt1 = os.path.join(path1, f"step_{step}", pt_file) + pt2 = os.path.join(path2, f"step_{step}", pt_file) + same_count, total_count = GradComparator._calculate_similarity(pt1, pt2) + same_count_summary += same_count + total_count_summary += total_count + param_name = pt_file[:-3] + if param_name not in similarities: + similarities[param_name] = [] + if total_count == 0: + similarities[param_name].append(0) + else: + similarities[param_name].append(same_count / total_count) + if "summary" not in similarities: + similarities["summary"] = [] + if total_count_summary == 0: + similarities["summary"].append(0) + else: + similarities["summary"].append(same_count_summary / total_count_summary) + return similarities + + @staticmethod + def _get_matched_dirs(path1: str, path2: str, dir_prefix): + path_check(path1, isdir=True) + path_check(path2, isdir=True) + dirs = [] + for dirname in os.listdir(path1): + splits = dirname.split('_') + if not splits or splits[0] != dir_prefix or not splits[1].isdigit(): + continue + + folder2 = os.path.join(path2, dirname) + if not os.path.isdir(folder2): + continue + dirs.append(int(splits[1])) + dirs = sorted(dirs) + return dirs + + @staticmethod + def _get_matched_pt_files(path1: str, path2: str, step: int): + path1 = os.path.join(path1, f"step_{step}") + path2 = os.path.join(path2, f"step_{step}") + path_check(path1, isdir=True) + path_check(path2, isdir=True) + pt_files = [] + for dirname in os.listdir(path1): + splits = dirname.split('.') + if len(splits) < 1 or splits[-1] != 'pt': + continue + folder2 = os.path.join(path2, dirname) + if not os.path.exists(folder2): + continue + pt_files.append(dirname) + return sorted(pt_files) + + @staticmethod + def _save_similarities(similarities: [float], steps: [int], output_dir: str): + if not similarities: + raise Exception(f"length of similarities is 0") + for key, value in tqdm(similarities.items(), desc="save similarities (by param)"): + if len(value) != len(steps): + raise Exception(f"similarities length of {key}:{len(value)} not equal steps:{len(steps)}") + plt.plot(steps, value) + plt.xlabel('steps') + plt.ylabel('similarities') + plt.title(f'{key}_similarities') + picture_dir = os.path.join(output_dir, "similarities_picture") + if not os.path.isdir(picture_dir): + create_directory(picture_dir) + plt.savefig(os.path.join(picture_dir, f"{key}_similarities.png")) + plt.close() + head_tuple = tuple(['step'] + [str(step) for step in steps]) + write_csv(os.path.join(output_dir, "similarities.csv"), [[key] + value], head_tuple) + + @staticmethod + def _calculate_similarity(pt_file1: str, pt_file2: str): + tensor1 = torch.load(pt_file1) + tensor2 = torch.load(pt_file2) + if tensor1.shape != tensor2.shape: + raise Exception(f"tensor shape is not equal: {pt_file1}, {pt_file2}") + if tensor1.dtype != torch.bool: + raise Exception(f"tensor type is not bool: {pt_file1}") + if tensor2.dtype != torch.bool: + raise Exception(f"tensor type is not bool: {pt_file2}") + same_count = (tensor1 == tensor2).sum().item() + total_count = tensor1.numel() + return same_count, total_count diff --git a/debug/accuracy_tools/grad_tool/grad_monitor.py b/debug/accuracy_tools/grad_tool/grad_monitor.py index 1abbc63bc4cda47e488fe7409a82318fc14b54e7..bf1a6da0ba31af41d6a7e43b6b3b4bacba75b4b8 100644 --- a/debug/accuracy_tools/grad_tool/grad_monitor.py +++ b/debug/accuracy_tools/grad_tool/grad_monitor.py @@ -1,10 +1,9 @@ import os import torch - from grad_tool.level_adapter import Level, LevelAdapter from grad_tool.grad_stat_csv import GradStatCsv from grad_tool.utils import get_config, check_numeral_list_ascend, ListCache, data_in_list_target,\ - write_csv, make_localtime_dir, get_rank_id + write_csv, get_rank_id, print_info_log, create_directory, print_warn_log class GradientMonitor: @@ -15,41 +14,53 @@ class GradientMonitor: self._level_adp: Level = LevelAdapter.level_adapter(config.get("level")) self._param_list = config.get('param_list') self._target_ranks = config.get("rank") + print_info_log(f"target rank {self._target_ranks}") self._target_step = config.get("step") + print_info_log(f"target step {self._target_step}") self._bounds = config.get("bounds") if not self._bounds or len(self._bounds) == 0: self._bounds = GradientMonitor.default_bounds check_numeral_list_ascend(self._bounds) - self._output_path = make_localtime_dir(config.get("output_path")) + self._output_path = config.get("output_path") + if not os.path.isdir(self._output_path): + create_directory(self._output_path) + else: + print_warn_log(f"the file in {self._output_path} will be recoverd") self._step = -1 self._list_cache = ListCache() @staticmethod - def hook_fun(param_name, f): + def _hook_fun(param_name, f): def backward_hook(grad): f(param_name, grad) return backward_hook - def model_backward_hook(self, module, gin, gout): + def _rank_in_targets(self): + if not hasattr(self, "_rank"): + raise AttributeError("grad monitor need attribute {_rank}") + return not torch.distributed.is_initialized() or data_in_list_target(getattr(self, "_rank"), self._target_ranks) + + def _model_backward_hook(self, module, gin, gout): + self._step += 1 if not hasattr(self, "_rank"): setattr(self, "_rank", get_rank_id(gout)) - if torch.distributed.is_initialized() and not data_in_list_target(getattr(self, "_rank"), self._target_ranks): + print_info_log(f"rank_{self._rank} exists") + if not self._rank_in_targets(): return self._list_cache.flush() - self._step += 1 if not data_in_list_target(self._step, self._target_step): return - output_path = f'{self._output_path}/rank_{self._rank}/grad_summary_{self._step}.csv' + print_info_log(f"result generate: rank_{self._rank} step_{self._step}") + output_path = os.path.join(self._output_path, f"rank_{getattr(self, '_rank')}", f"grad_summary_{self._step}.csv") write_csv(output_path, [], GradStatCsv.generate_csv_header(level=self._level_adp, bounds=self._bounds)) self._list_cache.set_output_file(output_path) - def save_grad_stat(self, param_name, grad): - if not hasattr(self, "_rank"): - raise AttributeError("grad monitor need attribute {_rank} when save grad stat") - if torch.distributed.is_initialized() and not data_in_list_target(getattr(self, "_rank"), self._target_ranks): + def _save_grad_stat(self, param_name, grad): + if not self._rank_in_targets(): return if not data_in_list_target(self._step, self._target_step): return + print_info_log(f"param result: rank{self._rank} step{self._step} {param_name}") grad_info = GradStatCsv.generate_csv_line( level=self._level_adp, param_name=param_name, @@ -58,10 +69,39 @@ class GradientMonitor: self._list_cache.append(grad_info) self._level_adp.save_grad_direction(param_name, grad, f'{self._output_path}/rank_{self._rank}/step_{self._step}') - def monitor(self, model): - model.register_full_backward_hook(self.model_backward_hook) + last_module = None + for name, module in model.named_modules(): + last_module = module + last_module.register_backward_hook(self._model_backward_hook) for param_name, param in model.named_parameters(): if not data_in_list_target(param_name, self._param_list): continue - param.register_hook(GradientMonitor.hook_fun(param_name, self.save_grad_stat)) \ No newline at end of file + if param is None or param.requires_grad == False: + continue + param.register_hook(GradientMonitor._hook_fun(param_name, self._save_grad_stat)) + + def save_grad(self, model): + self._step += 1 + if not hasattr(self, "_rank"): + setattr(self, "_rank", get_rank_id(next(model.parameters()))) + if not self._rank_in_targets(): + return + if not data_in_list_target(self._step, self._target_step): + return + print_info_log(f"save grad rank_{getattr(self, '_rank')} step_{self._step}") + output_path = os.path.join(self._output_path, f"rank_{getattr(self, '_rank')}", f"grad_summary_{self._step}.csv") + write_csv(output_path, [], GradStatCsv.generate_csv_header(level=self._level_adp, bounds=self._bounds)) + self._list_cache.set_output_file(output_path) + for param_name, param in model.named_parameters(): + if not data_in_list_target(param_name, self._param_list): + continue + if param.grad is not None: + grad = param.grad + elif hasattr(param, "main_grad") and param.main_grad is not None: + grad = param.main_grad + else: + continue + self._save_grad_stat(param_name, grad) + print_info_log(f"{param_name} is saved") + self._list_cache.flush() diff --git a/debug/accuracy_tools/grad_tool/img/image-2.png b/debug/accuracy_tools/grad_tool/img/image-2.png new file mode 100644 index 0000000000000000000000000000000000000000..75b6a8e073c52f439602e89b4871813b76ff6357 Binary files /dev/null and b/debug/accuracy_tools/grad_tool/img/image-2.png differ diff --git a/debug/accuracy_tools/grad_tool/img/image-3.png b/debug/accuracy_tools/grad_tool/img/image-3.png new file mode 100644 index 0000000000000000000000000000000000000000..92a91f110aba9448df053471a1a0431588ce8648 Binary files /dev/null and b/debug/accuracy_tools/grad_tool/img/image-3.png differ diff --git a/debug/accuracy_tools/grad_tool/level_adapter.py b/debug/accuracy_tools/grad_tool/level_adapter.py index d27a79e2867ca38fa6b79bdf58f80e37e2271e2a..51e6717d941f3d64fc085f0da39eb9927693e54f 100644 --- a/debug/accuracy_tools/grad_tool/level_adapter.py +++ b/debug/accuracy_tools/grad_tool/level_adapter.py @@ -1,6 +1,7 @@ import os from abc import ABC, abstractmethod import torch +from grad_tool.utils import print_info_log class LevelOps: @@ -29,6 +30,15 @@ class LevelOps: return_list = [x / element_num if element_num != 0 else 0 for x in interval_nums] return return_list + @staticmethod + def save_grad_direction(param_name, grad, save_path): + if not os.path.exists(save_path): + os.makedirs(save_path) + param_grad = torch.Tensor(grad.clone().cpu()) + is_positive = param_grad > 0 + torch.save(is_positive, f'{save_path}/{param_name}.pt') + print_info_log(f'Save {param_name} bool tensor, it has {is_positive.sum()}/{is_positive.numel()} positive elements') + class Level(ABC): @abstractmethod @@ -68,12 +78,18 @@ class Level_1(Level): class Level_2(Level): def save_grad_direction(self, param_name, grad, save_path): - if not os.path.exists(save_path): - os.makedirs(save_path) - param_grad = torch.Tensor(grad.clone().cpu()) - is_positive = param_grad > 0 - torch.save(is_positive, f'{save_path}/{param_name}.pt') - print(f'Save {param_name} bool tensor, it has {is_positive.sum()}/{is_positive.numel()} positive elements') + LevelOps.save_grad_direction(param_name, grad, save_path) + + def count_grad_distribution(self, grad, bounds): + return [] + + def intervals_header(self, bounds): + return [] + + +class Level_3(Level): + def save_grad_direction(self, param_name, grad, save_path): + LevelOps.save_grad_direction(param_name, grad, save_path) def count_grad_distribution(self, grad, bounds): return LevelOps.count_grad_distribution(grad, bounds) @@ -83,7 +99,7 @@ class Level_2(Level): class LevelAdapter: - levels = {"L0": Level_0, "L1": Level_1, "L2": Level_2} + levels = {"L0": Level_0, "L1": Level_1, "L2": Level_2, "L3": Level_3} @staticmethod def level_adapter(level): diff --git a/debug/accuracy_tools/grad_tool/test/resources/test_grad_monitor.yaml b/debug/accuracy_tools/grad_tool/test/resources/test_grad_monitor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0c895e78f2092a0312c08a55c315085a2272ee4 --- /dev/null +++ b/debug/accuracy_tools/grad_tool/test/resources/test_grad_monitor.yaml @@ -0,0 +1,6 @@ +level: L2 +param_list: +rank: +step: +bounds: +output_path: ./output/test_grad_monitor \ No newline at end of file diff --git a/debug/accuracy_tools/grad_tool/test/resources/test_save_grad.yaml b/debug/accuracy_tools/grad_tool/test/resources/test_save_grad.yaml new file mode 100644 index 0000000000000000000000000000000000000000..136803d26e96f4871f2c96137cf731b2b1366009 --- /dev/null +++ b/debug/accuracy_tools/grad_tool/test/resources/test_save_grad.yaml @@ -0,0 +1,6 @@ +level: L2 +param_list: +rank: +step: +bounds: +output_path: ./output/test_save_grad \ No newline at end of file diff --git a/debug/accuracy_tools/grad_tool/test/run_test.sh b/debug/accuracy_tools/grad_tool/test/run_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..ef21e1bb028e6691cbf585fad1fb318e738295c3 --- /dev/null +++ b/debug/accuracy_tools/grad_tool/test/run_test.sh @@ -0,0 +1,36 @@ +#!/bin/bash +CUR_DIR=$(dirname $(readlink -f $0)) +TOP_DIR=${CUR_DIR}/.. +TEST_DIR=${TOP_DIR}/"test" +SRC_DIR=${TOP_DIR}/../ + +clean() { + cd ${TEST_DIR} + + if [ -e ${TEST_DIR}/"report" ]; then + rm -r ${TEST_DIR}/"report" + echo "remove last ut_report successfully." + fi + + if [ -e ${TEST_DIR}/"output" ]; then + rm -r ${TEST_DIR}/"output" + echo "remove last output successfully." + fi + +} + +run_ut() { + export PYTHONPATH=${SRC_DIR}:${PYTHONPATH} + python3 run_ut.py +} + +main() { + clean + if [ "$1"x == "clean"x ]; then + return 0 + fi + + cd ${TEST_DIR} && run_ut +} + +main $@ diff --git a/debug/accuracy_tools/grad_tool/test/run_ut.py b/debug/accuracy_tools/grad_tool/test/run_ut.py new file mode 100644 index 0000000000000000000000000000000000000000..c73949697941d84782c4983aa484c06b1a7cbcc2 --- /dev/null +++ b/debug/accuracy_tools/grad_tool/test/run_ut.py @@ -0,0 +1,41 @@ +import os +import shutil +import subprocess +import sys + +def run_ut(): + cur_dir = os.path.realpath(os.path.dirname(__file__)) + top_dir = os.path.realpath(os.path.dirname(cur_dir)) + ut_path = os.path.join(cur_dir, "ut/") + src_dir = top_dir + report_dir = os.path.join(cur_dir, "report") + + if os.path.exists(report_dir): + shutil.rmtree(report_dir) + + os.makedirs(report_dir) + + cmd = ["python3", "-m", "pytest", ut_path, "--junitxml=" + report_dir + "/final.xml", + "--cov=" + src_dir, "--cov-branch", "--cov-report=xml:" + report_dir + "/coverage.xml"] + + result_ut = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + while result_ut.poll() is None: + line = result_ut.stdout.readline().strip() + if line: + print(line) + + ut_flag = False + if result_ut.returncode == 0: + ut_flag = True + print("run ut successfully.") + else: + print("run ut failed.") + + return ut_flag + +if __name__=="__main__": + if run_ut(): + sys.exit(0) + else: + sys.exit(1) diff --git a/debug/accuracy_tools/grad_tool/test/ut/test_grad_csv.py b/debug/accuracy_tools/grad_tool/test/ut/test_grad_csv.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b6d9dd6aee13b0797a7696c92bc76c1746186b --- /dev/null +++ b/debug/accuracy_tools/grad_tool/test/ut/test_grad_csv.py @@ -0,0 +1,63 @@ +# coding=utf-8 +import unittest +import os +import torch +from grad_tool.grad_stat_csv import GradStatCsv +from grad_tool.level_adapter import LevelAdapter + + +grad_tensor = torch.tensor([[-2, 2], [0.2, 0.3]]) + + +class TestGradCSV(unittest.TestCase): + def test_level_L0_header(self): + self.assertEqual(['param_name', 'MD5', 'max', 'min', 'norm', 'shape'], + GradStatCsv.generate_csv_header(level=LevelAdapter.level_adapter("L0"), bounds=[-1, 0, 1])) + + def test_level_L1_header(self): + self.assertEqual(['param_name', 'MD5', '(-inf, -1]', '(-1, 0]', '(0, 1]', '(1, inf)', '=0', 'max', 'min', 'norm', 'shape'], + GradStatCsv.generate_csv_header(level=LevelAdapter.level_adapter("L1"), bounds=[-1, 0, 1])) + + def test_level_L2_header(self): + self.assertEqual(['param_name', 'MD5', 'max', 'min', 'norm', 'shape'], + GradStatCsv.generate_csv_header(level=LevelAdapter.level_adapter("L2"), bounds=[-1, 0, 1])) + + def test_level_L3_header(self): + self.assertEqual(['param_name', 'MD5', '(-inf, -1]', '(-1, 0]', '(0, 1]', '(1, inf)', '=0', 'max', 'min', 'norm', 'shape'], + GradStatCsv.generate_csv_header(level=LevelAdapter.level_adapter("L3"), bounds=[-1, 0, 1])) + + def test_level_L0_content(self): + generated_csv_line = GradStatCsv.generate_csv_line( + level=LevelAdapter.level_adapter("L0"), + param_name="model.conv2d", + grad=grad_tensor, + bounds=[-1, 0, 1]) + self.assertEqual(['model.conv2d', '678a6c7d9d9716682b56fda097d0936c', 2.0, -2.0, 2.851315498352051, [2, 2]], + generated_csv_line) + + def test_level_L1_content(self): + generated_csv_line = GradStatCsv.generate_csv_line( + level=LevelAdapter.level_adapter("L1"), + param_name="model.conv2d", + grad=grad_tensor, + bounds=[-1, 0, 1]) + self.assertEqual(['model.conv2d', '678a6c7d9d9716682b56fda097d0936c', 0.25, 0.0, 0.5, 0.25, 0.0, 2.0, -2.0, 2.851315498352051, [2, 2]], + generated_csv_line) + + def test_level_L2_content(self): + generated_csv_line = GradStatCsv.generate_csv_line( + level=LevelAdapter.level_adapter("L2"), + param_name="model.conv2d", + grad=grad_tensor, + bounds=[-1, 0, 1]) + self.assertEqual(['model.conv2d', '678a6c7d9d9716682b56fda097d0936c', 2.0, -2.0, 2.851315498352051, [2, 2]], + generated_csv_line) + + def test_level_L3_content(self): + generated_csv_line = GradStatCsv.generate_csv_line( + level=LevelAdapter.level_adapter("L3"), + param_name="model.conv2d", + grad=grad_tensor, + bounds=[-1, 0, 1]) + self.assertEqual(['model.conv2d', '678a6c7d9d9716682b56fda097d0936c', 0.25, 0.0, 0.5, 0.25, 0.0, 2.0, -2.0, 2.851315498352051, [2, 2]], + generated_csv_line) diff --git a/debug/accuracy_tools/grad_tool/test/ut/test_grad_monitor.py b/debug/accuracy_tools/grad_tool/test/ut/test_grad_monitor.py new file mode 100644 index 0000000000000000000000000000000000000000..f233997e73e66f46104db6eeff7fa722e83ccbb7 --- /dev/null +++ b/debug/accuracy_tools/grad_tool/test/ut/test_grad_monitor.py @@ -0,0 +1,83 @@ +import os +import random +import unittest +import hashlib +import torch +import numpy as np +import torch.nn as nn +from grad_tool.grad_monitor import GradientMonitor +from grad_tool.grad_comparator import GradComparator + + +def seed_all(seed=1234, mode=False): + random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.use_deterministic_algorithms(mode) + +seed_all() + +base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +inputs = [torch.rand(10, 10) for _ in range(10)] +labels = [torch.randint(0, 5, (10,)) for _ in range(10)] + + +class TestModule(nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.Linear(10, 5) + self.relu = nn.ReLU() + + def forward(self, x): + x1 = self.linear(x) + x2 = self.relu(x1) + return x2 + + +def test_grad_monitor(): + gm = GradientMonitor(os.path.join(base_dir, "resources/test_grad_monitor.yaml")) + loss_fun = nn.CrossEntropyLoss() + test_module = TestModule() + nn.init.constant_(test_module.linear.weight, 1.0) + nn.init.constant_(test_module.linear.bias, 1.0) + gm.monitor(test_module) + optimizer = torch.optim.SGD(test_module.parameters(), lr=1e-2) + for input_data, label in zip(inputs, labels): + output = test_module(input_data) + loss = loss_fun(output, label) + optimizer.zero_grad() + loss.backward() + optimizer.step() + return gm + + +def test_save_grad(): + gm = GradientMonitor(os.path.join(base_dir, "resources/test_save_grad.yaml")) + loss_fun = nn.CrossEntropyLoss() + test_module = TestModule() + nn.init.constant_(test_module.linear.weight, 1.0) + nn.init.constant_(test_module.linear.bias, 1.0) + optimizer = torch.optim.SGD(test_module.parameters(), lr=1e-2) + for input_data, label in zip([x + 0.1 for x in inputs], labels): + output = test_module(input_data) + loss = loss_fun(output, label) + optimizer.zero_grad() + loss.backward() + gm.save_grad(test_module) + optimizer.step() + return gm + + +class TestGradMonitor(unittest.TestCase): + def test_compare(self): + gm1 = test_grad_monitor() + gm2 = test_save_grad() + compare_output_path = os.path.join(os.path.dirname(gm1._output_path), "grad_compare") + GradComparator.compare_distributed(gm1._output_path, gm2._output_path, compare_output_path) + items = os.listdir(compare_output_path) + self.assertEqual(len(items), 1) + with open(os.path.join(compare_output_path, items[0], "similarities.csv"), 'r') as f: + data = f.read() + self.assertEqual(hashlib.md5(data.encode("utf-8")).hexdigest(), "20441d98b8c8d14ee6f896ea29d01b14") diff --git a/debug/accuracy_tools/grad_tool/utils.py b/debug/accuracy_tools/grad_tool/utils.py index 4e563d9928112bb95017ba7b8ec7bd281cb0832d..8d0ed36ce30bdb502b26a6c5d7d0c1276ee8cc74 100644 --- a/debug/accuracy_tools/grad_tool/utils.py +++ b/debug/accuracy_tools/grad_tool/utils.py @@ -1,10 +1,11 @@ import os -import time import yaml import torch import pandas as pd from ptdbg_ascend.src.python.ptdbg_ascend.common.file_check_util import FileOpen, create_directory, \ - check_link, FileChecker, FileCheckConst + FileChecker, FileCheckConst +from ptdbg_ascend.src.python.ptdbg_ascend.common.utils import check_file_or_directory_path, print_info_log, \ + print_warn_log class ListCache(list): @@ -21,9 +22,9 @@ class ListCache(list): if len(self) == 0: return if not self._output_file: - print("dumpfile path is not setted") + print_warn_log("dumpfile path is not setted") write_csv(self._output_file, self, []) - print(f"write {len(self)} items to {self._output_file} the {self._dump_count} time") + print_info_log(f"write {len(self)} items to {self._output_file} the {self._dump_count} time") self.clear() def append(self, data): @@ -55,7 +56,7 @@ def write_csv(filepath, content_list, header): def make_file_safety(file_path: str, permission=0o640): if os.path.islink(file_path): - raise RuntimeError("Invalid soft link path: {}".format(file_path)) + raise RuntimeError(f"Invalid soft link path: {file_path}") file_real_path = os.path.realpath(file_path) if os.path.exists(file_real_path): return @@ -63,7 +64,7 @@ def make_file_safety(file_path: str, permission=0o640): if not os.path.exists(parent_path): create_directory(parent_path) if not os.access(parent_path, os.W_OK): - raise PermissionError("The path {} is not writable!".format(parent_path)) + raise PermissionError(f"The path {parent_path} is not writable!") try: os.close(os.open(file_real_path, os.O_WRONLY | os.O_CREAT, permission)) except OSError as e: @@ -82,18 +83,6 @@ def check_numeral_list_ascend(lst): raise Exception("The input list should be ascending") -def localtime_str(): - return time.strftime("%Y%m%d%H%M%S", time.localtime()) - - -def make_localtime_dir(path): - if not os.path.isdir(path): - create_directory(path) - localtime_dir = os.path.join(path, localtime_str()) - create_directory(localtime_dir) - return localtime_dir - - def get_tensor_rank(x): if isinstance(x, (list, tuple)): if len(x) > 0: @@ -115,3 +104,7 @@ def get_rank_id(tensor): if rank is not None: return rank return os.getpid() + + +def path_check(path, isdir=False): + check_file_or_directory_path(path, isdir) diff --git a/debug/accuracy_tools/ptdbg_ascend/CMakeLists.txt b/debug/accuracy_tools/ptdbg_ascend/CMakeLists.txt index 258a5dbe9292c10aa04bbaece447ee375897be52..582dd0c8e5a34409cf33e5563738f5292eeeafea 100644 --- a/debug/accuracy_tools/ptdbg_ascend/CMakeLists.txt +++ b/debug/accuracy_tools/ptdbg_ascend/CMakeLists.txt @@ -16,4 +16,4 @@ add_custom_target(ptdbg_ascend ALL VERBATIM ) -install(CODE "execute_process(COMMAND ${PYTHON_BIN_PATH} -m pip install ${CMAKE_BINARY_DIR}/ptdbg_ascend/dist/ptdbg_ascend-5.0.T3-py3-none-any.whl --upgrade)") +install(CODE "execute_process(COMMAND ${PYTHON_BIN_PATH} -m pip install ${CMAKE_BINARY_DIR}/ptdbg_ascend/dist/ptdbg_ascend-5.0.T4-py3-none-any.whl --upgrade)") diff --git a/debug/accuracy_tools/ptdbg_ascend/README.md b/debug/accuracy_tools/ptdbg_ascend/README.md index 103e40d2f009c2f45cf9e587bd4728aa65b7902a..3f71267650fd8197d043a48923a8e1e65a888d2c 100644 --- a/debug/accuracy_tools/ptdbg_ascend/README.md +++ b/debug/accuracy_tools/ptdbg_ascend/README.md @@ -10,6 +10,7 @@ | ptdbg_ascend版本 | 发布日期 | 支持PyTorch版本 | 下载链接 | 参考指南 | 校验码 | | ---------------- | ---------- | -------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | + | 5.0.T4 | 2024-03-15 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T4-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T4-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T4](doc/ptdbg_ascend精度工具功能说明_v5.0.T4.md) | e350da21be8836dad7db4e7bc797f7aae93337b7fc743ab195595516b6251970 | | 5.0.T3 | 2024-02-23 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T3-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T3-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T3](doc/ptdbg_ascend精度工具功能说明_v5.0.T3.md) | 6cbc38046eb0eaefdf1e698db2a8d9710e79b26b835f4c72fd399fee5817504c | | 5.0.T2 | 2024-02-07 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T2-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T2-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T2](doc/ptdbg_ascend精度工具功能说明_v5.0.T2.md) | 088173a5815070ec4d0b0b9391c823793e11e8cbd9751ac58ceaf02b004ac1e5 | | 5.0.T1 | 2024-01-24 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T1-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T1-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T1](doc/ptdbg_ascend精度工具功能说明_v5.0.T1.md) | ba9aee303d4342889c1988026be37526dedf25c2de64ad12e10ee2a02b7112bd | @@ -24,7 +25,7 @@ 2. 进入whl包所在目录,执行如下命令。 - ``` + ```bash sha256sum {name}.whl ``` @@ -32,7 +33,7 @@ 若回显呈现对应版本whl包一致的**校验码**,则表示下载了正确的ptdbg_ascend精度工具whl安装包。示例如下: - ``` + ```bash sha256sum ptdbg_ascend-4.0-py3-none-any.whl ba7ff7a1acffb1a2fab02fea76b6f957b2868bc6b66d72365622f6a8950406c6 *ptdbg_ascend-4.0-py3-none-any.whl ``` @@ -120,6 +121,7 @@ ptdbg_ascend精度工具的安装方式包括:**下载whl包安装**和**源 | ptdbg_ascend版本 | 发布日期 | 支持PyTorch版本 | 下载链接 | 参考指南 | 校验码 | | ---------------- | ---------- | -------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | + | 5.0.T4 | 2024-03-15 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T4-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T4-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T4](doc/ptdbg_ascend精度工具功能说明_v5.0.T4.md) | e350da21be8836dad7db4e7bc797f7aae93337b7fc743ab195595516b6251970 | | 5.0.T3 | 2024-02-23 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T3-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T3-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T3](doc/ptdbg_ascend精度工具功能说明_v5.0.T3.md) | 6cbc38046eb0eaefdf1e698db2a8d9710e79b26b835f4c72fd399fee5817504c | | 5.0.T2 | 2024-02-07 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T2-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T2-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T2](doc/ptdbg_ascend精度工具功能说明_v5.0.T2.md) | 088173a5815070ec4d0b0b9391c823793e11e8cbd9751ac58ceaf02b004ac1e5 | | 5.0.T1 | 2024-01-24 | 1.11.0/2.0/2.1 | [ptdbg_ascend-v5.0.T1-py3-none-any.whl](https://ptdbg.obs.myhuaweicloud.com/package/ptdbg_ascend/5.0/ptdbg_ascend-5.0.T1-py3-none-any.whl) | [ptdbg_ascend精度工具功能说明_v5.0.T1](doc/ptdbg_ascend精度工具功能说明_v5.0.T1.md) | ba9aee303d4342889c1988026be37526dedf25c2de64ad12e10ee2a02b7112bd | diff --git a/debug/accuracy_tools/ptdbg_ascend/RELEASE.md b/debug/accuracy_tools/ptdbg_ascend/RELEASE.md index 1917bec65a1f79b51057a89c49056f367ae96ba7..f4725220f1e418343f4f3424446a3b1b732714d4 100644 --- a/debug/accuracy_tools/ptdbg_ascend/RELEASE.md +++ b/debug/accuracy_tools/ptdbg_ascend/RELEASE.md @@ -1,4 +1,4 @@ -# Release 5.0.T3 +# Release 5.0.T4 This is the initial release of Pytorch precision compare tools which was designed by the researchers and engineers in Huawei Technologies Co.,Ltd. \ No newline at end of file diff --git a/debug/accuracy_tools/ptdbg_ascend/__init__.py b/debug/accuracy_tools/ptdbg_ascend/__init__.py index 8400fd5ecd1246eaee795cebfccfacc80a94f08c..92fb62c7566d9b3da72e9dfb87486dc361a3919e 100644 --- a/debug/accuracy_tools/ptdbg_ascend/__init__.py +++ b/debug/accuracy_tools/ptdbg_ascend/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, Huawei Technologies Co., Ltd. +# Copyright (c) 2023-2024, Huawei Technologies Co., Ltd. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,3 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import torch +from ptdbg_ascend.src.python.ptdbg_ascend import * diff --git a/debug/accuracy_tools/ptdbg_ascend/configure.py b/debug/accuracy_tools/ptdbg_ascend/configure.py index b914b32e9f9ffbe0028dbd05184970c38d388b9d..7a3f87c08ef8973715ac016a312db1fffef4c359 100644 --- a/debug/accuracy_tools/ptdbg_ascend/configure.py +++ b/debug/accuracy_tools/ptdbg_ascend/configure.py @@ -50,7 +50,7 @@ def setup_python(env_path): """Get python install path.""" default_python_bin_path = sys.executable ask_python_bin_path = ('Please specify the location of python with valid ' - 'pytorch 1.8/1.11/2.0/2.1 site-packages installed. [Default ' + 'pytorch 1.11/2.0/2.1 site-packages installed. [Default ' 'is %s]\n(You can make this quiet by set env ' '[ADAPTER_TARGET_PYTHON_PATH]): ') % default_python_bin_path custom_python_bin_path = env_path @@ -82,7 +82,7 @@ def setup_python(env_path): (not compile_args[0].startswith(_PYTORCH_VERSION_2_0)) and \ (not compile_args[0].startswith(_PYTORCH_VERSION_2_1)): print('Currently supported Pytorch version is %s/%s, we got %s.' - % (_PYTORCH_VERSION_1_8, _PYTORCH_VERSION_1_11, _PYTORCH_VERSION_2_0, _PYTORCH_VERSION_2_1, compile_args[0])) + % (_PYTORCH_VERSION_1_11, _PYTORCH_VERSION_2_0, _PYTORCH_VERSION_2_1, compile_args[0])) continue except subprocess.CalledProcessError: print('Pytorch is not installed or does not work properly.') diff --git "a/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T3.md" "b/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T3.md" index 7cfed0192c27fd96839e40d17173c8bc242642f2..5385d74583888bd98b2f8aaf778e100d24cba5b2 100644 --- "a/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T3.md" +++ "b/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T3.md" @@ -347,7 +347,7 @@ PyTorch训练场景的精度问题分析建议参考以下思路进行精度比 ```python from ptdbg_ascend import * - compare_distributed('./npu_dump/ptdbg_dump_v4.0', './gpu_dump/ptdbg_dump_v4.0', './output') + compare_distributed('./npu_dump/ptdbg_dump_v4.0/step0', './gpu_dump/ptdbg_dump_v4.0/step0', './output') ``` 2. 执行比对: @@ -1754,8 +1754,8 @@ compare_distributed(npu_dump_dir, bench_dump_dir, output_path, **kwargs) | 参数名 | 说明 | 是否必选 | | -------------- | ------------------------------------------------------------ | -------- | -| npu_dump_dir | 配置NPU环境下的dump目录。参数示例:'./npu_dump/ptdbg_dump_v4.0'。register_hook方式可通过set_dump_path函数的dump_tag参数修改该目录名称。 | 是 | -| bench_dump_dir | 配置CPU、GPU或NPU环境下的dump目录。参数示例:'./gpu_dump/ptdbg_dump_v4.0'。register_hook方式可通过set_dump_path函数的dump_tag参数修改该目录名称。 | 是 | +| npu_dump_dir | 配置NPU环境下的dump目录。dump数据目录须指定到step级。参数示例:'./npu_dump/ptdbg_dump_v4.0/step0'。register_hook方式可通过set_dump_path函数的dump_tag参数修改该目录名称。 | 是 | +| bench_dump_dir | 配置CPU、GPU或NPU环境下的dump目录。参数示例:'./gpu_dump/ptdbg_dump_v4.0/step0'。register_hook方式可通过set_dump_path函数的dump_tag参数修改该目录名称。 | 是 | | output_path | 配置比对结果csv文件存盘目录。需要预先创建output_path目录。参数示例:'./output'。文件名称基于时间戳自动生成,格式为:`compare_result_rank{npu_ID}-rank{cpu/gpu/npu_ID}_{timestamp}.csv`。 | 是 | | **kwargs | 支持compare的所有可选参数。 | 否 | @@ -1765,7 +1765,7 @@ compare_distributed(npu_dump_dir, bench_dump_dir, output_path, **kwargs) ```python from ptdbg_ascend import * -compare_distributed('./npu_dump/ptdbg_dump_v4.0', './gpu_dump/ptdbg_dump_v4.0', './output') +compare_distributed('./npu_dump/ptdbg_dump_v4.0/step0', './gpu_dump/ptdbg_dump_v4.0/step0', './output') ``` ### compare diff --git "a/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T4.md" "b/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T4.md" new file mode 100644 index 0000000000000000000000000000000000000000..aa88541c3ed65ad8f31a333f4a759b46d465ad82 --- /dev/null +++ "b/debug/accuracy_tools/ptdbg_ascend/doc/ptdbg_ascend\347\262\276\345\272\246\345\267\245\345\205\267\345\212\237\350\203\275\350\257\264\346\230\216_v5.0.T4.md" @@ -0,0 +1,2178 @@ +# **PyTorch精度工具使用指南** + +本文主要介绍PyTorch精度工具ptdbg_ascend的使用以及精度比对场景示例。 + +ptdbg_ascend工具的原理及安装请参见《[PyTorch精度工具](https://gitee.com/ascend/att/blob/master/debug/accuracy_tools/ptdbg_ascend/README.md)》。 + +ptdbg_ascend工具主要支持PyTorch API精度数据dump、溢出检测、精度比对以及parse数据解析功能。其中dump和溢出检测功能支持使用debugger和register_hook方式进行精度数据的dump和溢出检测,推荐使用debugger方式。 + +## PyTorch精度比对总体流程 + +1. 准备CPU或GPU训练工程。 + +2. 在环境下安装ptdbg_ascend工具。 + +3. 在训练脚本内插入ptdbg_ascend工具dump接口。 + +4. 执行训练dump数据。 + +5. 将CPU或GPU训练工程迁移为NPU训练工程。 + + 请参见《[PyTorch模型迁移和训练指南](https://www.hiascend.com/document/detail/zh/canncommercial/63RC1/modeldevpt/ptmigr/ptmigr_0001.html)》。 + +6. 在NPU环境下安装ptdbg_ascend工具。 + +7. 在NPU训练脚本内插入ptdbg_ascend工具dump接口。 + +8. NPU环境下执行训练dump数据。 + +9. 创建并配置精度比对脚本,例如compare.py。 + +10. 执行CPU或GPU dump与NPU dump数据的精度比对。 + +11. 比对结果分析。 + +## 快速入门(debugger方式) + +本章节主要介绍通过ptdbg_ascend工具进行精度比对和分析,主要使用“**debugger方式dump和溢出检测**”和“**CPU或GPU与NPU精度数据比对**”章节中介绍的ptdbg_ascend工具接口。 + +### 单卡场景精度比对 + +**精度分析建议** + +PyTorch训练场景的精度问题分析建议参考以下思路进行精度比对和比对结果分析: + +1. 整网比对:dump整网数据并进行精度比对,初步定位异常范围。 +2. 缩小范围:根据Accuracy Reached or Not找出不符合精度标准的API。 +3. 范围比对:对不符合精度标准的API重新dump详细信息。 +4. 分析原因并优化:分析API精度不符合标准的原因并进行优化调整。 +5. 整网比对:重新进行整网比对,判断优化后的API是否已符合精度标准以及是否出现新的精度问题。 +6. 重复1~5步,直到不存在精度问题为止。 + +**精度分析示例** + +1. dump整网数据。 + + 分别dump CPU或GPU以及NPU数据,在PyTorch训练脚本插入dump接口,示例代码如下(下面以NPU为例,CPU或GPU dump基本相同): + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./npu_dump", hook_name="dump", step=[0]) + debugger.configure_hook(mode="api_stack") + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + +2. 比对整网数据。 + + 第1步中的NPU dump数据目录为npu_dump,假设GPU dump数据目录为gpu_dump;dump将生成pkl数据文件api_stack_dump.pkl和npy数据目录api_stack_dump。 + + 创建并配置精度比对脚本,以创建compare.py为例,示例代码如下: + + ```python + from ptdbg_ascend import * + dump_result_param={ + "npu_pkl_path": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", + "bench_pkl_path": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", + "npu_dump_data_dir": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump", + "bench_dump_data_dir": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump", + "is_print_compare_log": True + } + compare(dump_result_param, "./output", stack_mode=True) + ``` + + 执行比对: + + ```bash + python3 compare.py + ``` + + 在output目录下生成结果文件,包括:`compare_result_{timestamp}.csv`和`advisor_{timestamp}.txt` + +3. 找出存在问题的API。 + + 1. 根据`advisor_{timestamp}.txt`或打屏信息的提示,可找到存在精度问题的算子(Suspect Nodes)和专家建议(Expert Advice) + + ![auto_analyze_log](img/auto_analyze_log.png) + + 2. 根据第2步结果文件`compare_result_{timestamp}.csv`中的Accuracy Reached or No字段显示为NO的API,针对该API执行后续比对操作,分析该API存在的精度问题。 + +4. (可选)提取指定API的堆栈信息和dump数据统计信息。 + + 通过parse接口可以清晰的显示特定API的堆栈信息和dump数据统计信息,结合堆栈信息分析代码中可能存在的精度问题。 + + 创建并配置提取脚本,以创建parse.py为例,示例代码如下: + + ```python + from ptdbg_ascend import * + + # 提取dump信息中第1次调用的API:Torch_batch_normal的堆栈信息及数据统计信息 + parse("./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", "Torch_batch_normal_1_forward") + ``` + + 执行提取: + + ```bash + python3 parse.py + ``` + + + +5. (可选)指定API对其底层ACL数据进行dump。 + + - dump指定前向API的ACL级别数据 + + ```python + debugger = PrecisionDebugger(dump_path="./npu_dump", hook_name="dump", step=[0]) + debugger.configure_hook(mode="acl", scope=["Tensor_permute_1_forward"], acl_config='./dump.json') + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + + - dump指定反向API的ACL级别数据 + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./npu_dump", hook_name="dump", step=[0]) + # dump指定反向API的ACL级别数据、bool和整型的tensor以及浮点、bool和整型的标量 + debugger.configure_hook(mode="acl", scope=["Functional_conv2d_1_backward"], acl_config="./dump.json", backward_input=["./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump/Functional_conv2d_1_backward_input.0.npy"]) + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + +6. (可选)重新比对。 + + 根据第4或5步的dump数据重新配置compare.py并执行比对,可以对单API模型进行问题复现。 + +**注意事项** + +* dump_mode="acl"场景下,会增加npu的内存消耗,请谨慎开启。 +* 部分API存在调用嵌套关系,比如functional.batch_norm实际调用torch.batch_norm,该场景会影响acl init初始化多次,导致功能异常。 + +### 溢出检测场景 + +溢出检测是针对NPU的PyTorch API,检测是否存在溢出的情况。当前仅支持识别aicore浮点溢出。 + +溢出检测原理:针对溢出阶段,开启acl dump模式,重新对溢出阶段执行,落盘数据。 + +建议按照如下步骤操作: + +1. 在NPU环境下安装ptdbg_ascend工具。 + +2. 在NPU训练脚本内插入ptdbg_ascend工具溢出检测接口。 + + - 示例1:全量溢出检测 + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./overflow_dump", hook_name="overflow_check", step=[0]) + debugger.configure_hook(overflow_nums=-1) + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + + 多卡使用时各卡单独计算溢出次数。 + + - 示例2:dump指定前向API的ACL级别溢出数据 + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./overflow_dump", hook_name="overflow_check", step=[0]) + debugger.configure_hook(mode="acl", acl_config="./dump.json") + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + + - 示例3:dump指定反向API的ACL级别的溢出数据 + + 1. 进行全量溢出检测 + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./overflow_dump", hook_name="overflow_check", step=[0]) + debugger.configure_hook(overflow_nums=-1) + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + + + + 2. dump指定反向API的ACL级别的溢出数据 + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./overflow_dump", hook_name="dump", step=[0]) + debugger.configure_hook(mode="acl", scope=["Functional_conv2d_1_backward"], acl_config="./dump.json", backward_input=["./overflow_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump/Functional_conv2d_1_backward_input.0.npy"]) + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + + 针对前向溢出API,可以通过overflow_nums,配置允许的溢出次数,并将每次溢出API的全部ACL数据dump下来,到达指定溢出次数后停止,停止后会看到堆栈打印包含如下字段。 + + ```bash + ValueError: [overflow xxx times]: dump file is saved in 'xxxxx.pkl'. + ``` + + 其中xxx times为用户设置的次数,xxxxx.pkl为文件生成路径。 + +3. NPU环境下执行训练dump溢出数据。 + + 针对输入正常但输出存在溢出的API,会训练执行目录下将溢出的API信息dump并保存为`forward_info_{pid}.json`和`backward_info_{pid}.json`,通过[Ascend模型精度预检工具](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/api_accuracy_checker)对json文件进行解析,输出溢出API为正常溢出还是非正常溢出,从而帮助用户快速判断。 + + 精度预检工具执行命令如下: + + ```bash + # 下载att代码仓后执行如下命令 + export PYTHONPATH=$PYTHONPATH:$ATT_HOME/debug/accuracy_tools/ + cd $ATT_HOME/debug/accuracy_tools/api_accuracy_checker/run_ut + python run_overflow_check.py -forward ./forward_info_0.json + ``` + + 反向过程溢出的API暂不支持精度预检功能。 + + 当重复执行溢出检测dump操作时,需要删除上一次dump目录下的溢出检测dump数据,否则将因重名而报错。 + +**注意事项** + +* dump_mode="acl"场景下,会增加npu的内存消耗,请谨慎开启。 +* 部分API存在调用嵌套关系,比如functional.batch_norm实际调用torch.batch_norm,该场景会影响acl init初始化多次,导致功能异常。 +* 混合精度动态loss scale场景下,正常训练会有"Gradient overflow. SKipping step"日志,添加溢出检测后日志消失,可以通过设置环境变量export OVERFLOW_DEBUG_MODE_ENABLE=1,并将register_hook位置调整amp.initialize之前解决。此功能需要cann包配套支持,不支持版本执行报错EZ3003。 + +## 场景化示例 + +本章节主要介绍通过ptdbg_ascend工具进行精度比对和分析,主要使用“**CPU或GPU及NPU精度数据dump**”和“**CPU或GPU与NPU精度数据比对**”章节中介绍的ptdbg_ascend工具接口。 + +### 多卡场景精度比对 + +精度工具支持多卡场景的精度比对,多卡场景的dump步骤与单卡场景完全一致,请参见“**单卡场景精度比对**”章节,不同的是多卡数据精度比对时需要使用“compare_distributed”函数进行比对。 + +**大模型场景下dump推荐使用debugger方式的手动模式。** + +如下示例: + +说明:多机多卡场景需要每个设备单独执行比对操作。 + +假设NPU dump npy数据目录为npu_dump/ptdbg_dump_v4.0,GPU dump npy数据目录为gpu_dump/ptdbg_dump_v4.0。 + +1. 创建比对脚本,例如compare_distributed.py,拷贝如下代码。 + + ```python + from ptdbg_ascend import * + compare_distributed('./npu_dump/ptdbg_dump_v4.0/step0', './gpu_dump/ptdbg_dump_v4.0/step0', './output') + ``` + + dump数据目录须指定到step级。 + +2. 执行比对: + + ```bash + python3 compare_distributed.py + ``` + +两次运行须用相同数量的卡,传入`compare_distributed`的两个文件夹下须有相同个数的rank文件夹,且不包含其他无关文件,否则将无法比对。 + +**多卡set_dump_path注意事项** + +多卡一般为多进程,须保证每个进程都正确调用PrecisionDebugger或set_dump_path,或把PrecisionDebugger或set_dump_path插入到import语句后,如: + +```python +from ptdbg_ascend import * +debugger = PrecisionDebugger(dump_path="./npu_dump", hook_name="dump", step=[0]) +``` + +或 + +```python +from ptdbg_ascend import * +seed_all() +set_dump_path('./dump_resnet') +``` + +如此可保证set_dump_path在每个进程都被调用。 + +**多卡register_hook注意事项** + +register_hook需要在set_dump_path之后调用,也需要在每个进程上被调用,建议在搬运模型数据到卡之后调用。识别方法如下: + +- 找到训练代码中遍历epoch的for循环或遍历数据集的for循环,把register_hook放到循环开始前即可。 +- 找到训练代码中调用DDP或者DistributedDataParallel的代码行,把register_hook放到该代码行所在的代码块之后。 +- 若代码中均无以上两种情况,需要保证register_hook在模型定义之后插入,并配置rank参数。rank参数获取rank_id请参见“**[rank_id获取方法](https://gitee.com/ascend/att/blob/master/debug/accuracy_tools/ptdbg_ascend/doc/rank_id获取方法.md)**”。 + +### NPU vs NPU精度比对 + +对于NPU vs NPU场景,是针对同一模型,进行迭代(模型、API版本升级或设备硬件升级)时存在的精度下降问题,对比相同模型在迭代前后版本的API计算数值,进行问题定位。 + +一般情况下迭代涉及NPU自定义算子,因此,可以仅dump NPU自定义算子进行比对。比对精度问题分析请参见“**单卡场景精度比对**”章节。 + +工具当前支持dump NPU自定义算子如下: + +| 序号 | NPU自定义算子 | +| :--- | ----------------------------------------------- | +| 1 | torch_npu.one_ | +| 2 | torch_npu.npu_sort_v2 | +| 3 | torch_npu.npu_transpose | +| 4 | torch_npu.npu_broadcast | +| 5 | torch_npu.npu_dtype_cast | +| 6 | torch_npu.empty_with_format | +| 7 | torch_npu.npu_one_hot | +| 8 | torch_npu.npu_stride_add | +| 9 | torch_npu.npu_ps_roi_pooling | +| 10 | torch_npu.npu_roi_align | +| 11 | torch_npu.npu_nms_v4 | +| 12 | torch_npu.npu_iou | +| 13 | torch_npu.npu_nms_with_mask | +| 14 | torch_npu.npu_pad | +| 15 | torch_npu.npu_bounding_box_encode | +| 16 | torch_npu.npu_bounding_box_decode | +| 17 | torch_npu.npu_batch_nms | +| 18 | torch_npu.npu_slice | +| 19 | torch_npu._npu_dropout | +| 20 | torch_npu.npu_indexing | +| 21 | torch_npu.npu_ifmr | +| 22 | torch_npu.npu_max | +| 23 | torch_npu.npu_scatter | +| 24 | torch_npu.npu_layer_norm_eval | +| 25 | torch_npu.npu_alloc_float_status | +| 26 | torch_npu.npu_confusion_transpose | +| 27 | torch_npu.npu_bmmV2 | +| 28 | torch_npu.fast_gelu | +| 29 | torch_npu.npu_sub_sample | +| 30 | torch_npu.npu_deformable_conv2d | +| 31 | torch_npu.npu_mish | +| 32 | torch_npu.npu_anchor_response_flags | +| 33 | torch_npu.npu_yolo_boxes_encode | +| 34 | torch_npu.npu_grid_assign_positive | +| 35 | torch_npu.npu_normalize_batch | +| 36 | torch_npu.npu_masked_fill_range | +| 37 | torch_npu.npu_linear | +| 38 | torch_npu.npu_bert_apply_adam | +| 39 | torch_npu.npu_giou | +| 40 | torch_npu.npu_ciou | +| 41 | torch_npu.npu_diou | +| 42 | torch_npu.npu_sign_bits_pack | +| 43 | torch_npu.npu_sign_bits_unpack | +| 44 | torch_npu.npu_flash_attention | +| 45 | torch_npu.npu_scaled_masked_softmax | +| 46 | torch_npu.npu_rotary_mul | +| 47 | torch_npu.npu_roi_align | +| 48 | torch_npu.npu_roi_alignbk | +| 49 | torch_npu.npu_ptiou | +| 50 | torch_npu.npu_fusion_attention | +| 51 | torch_npu.npu_dropout_with_add_softmax | +| 52 | torch_npu.npu_random_choice_with_mask | +| 53 | torch_npu.npu_rotated_iou | +| 54 | torch_npu.npu_conv2d | +| 55 | torch_npu.npu_conv3d | +| 56 | torch_npu.npu_softmax_cross_entropy_with_logits | +| 57 | torch_npu.npu_all_gather_base_mm | +| 58 | torch_npu.npu_swiglu | +| 59 | torch_npu.npu_rms_norm | +| 60 | torch_npu.npu_mm_reduce_scatter_base | +| 61 | torch_npu.npu_mm_all_reduce_base | +| 62 | torch_npu.npu_conv_transpose2d | +| 63 | torch_npu.npu_convolution | +| 64 | torch_npu.npu_convolution_transpose | +| 65 | torch_npu.npu_min | +| 66 | torch_npu.npu_nms_rotated | +| 67 | torch_npu.npu_reshape | +| 68 | torch_npu.npu_rotated_box_decode | +| 69 | torch_npu.npu_rotated_box_encode | +| 70 | torch_npu.npu_rotated_overlaps | +| 71 | torch_npu.npu_silu | +| 72 | torch_npu.npu_fused_attention_score | +| 73 | torch_npu.npu_multi_head_attention | +| 74 | torch_npu.npu_gru | +| 75 | torch_npu.npu_incre_flash_attention | +| 76 | torch_npu.npu_prompt_flash_attention | +| 77 | torch_npu.npu_lstm | +| 78 | torch_npu.npu_apply_adam | + +### 通信API的数据dump + +通信类API数据可以使用全量dump方式获取,若只dump通信类API数据,可以使用如下示例: + +```python +debugger.configure_hook(mode="api_list", api_list=["distributed"]) +``` + +或 + +```python +set_dump_switch("ON", mode="api_list", api_list=["distributed"]) +``` + +通信类API支持列表: + +| 序号 | Distributed | +| :--- | -------------------- | +| 1 | send | +| 2 | recv | +| 3 | broadcast | +| 4 | all_reduce | +| 5 | reduce | +| 6 | all_gather | +| 7 | gather | +| 8 | isend | +| 9 | irecv | +| 10 | scatter | +| 11 | reduce_scatter | +| 12 | _reduce_scatter_base | +| 13 | _all_gather_base | + +### 单卡场景精度比对(register_hook方式) + +**精度分析建议** + +PyTorch训练场景的精度问题分析建议参考以下思路进行精度比对和比对结果分析: + +1. 整网比对:dump整网数据并进行精度比对,初步定位异常范围。 +2. 缩小范围:根据Accuracy Reached or Not找出不符合精度标准的API。 +3. 范围比对:对不符合精度标准的API重新dump。 +4. 分析原因并优化:分析API精度不符合标准的原因并进行优化调整。 +5. 整网比对:重新进行整网比对,判断优化后的API是否已符合精度标准以及是否出现新的精度问题。 +6. 重复1~5步,直到不存在精度问题为止。 + +**精度分析示例** + +1. dump整网数据。 + + 分别dump CPU或GPU以及NPU数据,在PyTorch训练脚本插入dump接口,示例代码如下(下面以NPU为例,CPU或GPU dump基本相同): + + ```python + from ptdbg_ascend import * + + # 在main函数开始前固定随机数 + seed_all() + + # 配置dump数据目录路径和名称 + set_dump_path("./npu_dump", dump_tag='all') + + # 注册dump回调函数 + register_hook(model, acc_cmp_dump) + + ... + + # 在第一个迭代开始的位置开启dump和堆栈模式,同时为保证数据完整性开启dump bool和整型的tensor以及浮点、bool和整型的标量 + set_dump_switch("ON", mode="api_stack", filter_switch="OFF") + + ... + + # 在第一个迭代结束的位置关闭dump + set_dump_switch("OFF") + ``` + +2. 比对整网数据。 + + 第1步中的NPU dump数据文件为npu_dump.pkl,假设NPU dump npy数据目录为npu_dump,GPU dump数据文件为gpu_dump.pkl,GPU dump npy数据目录为gpu_dump。 + + 创建并配置精度比对脚本,以创建compare.py为例,示例代码如下: + + ```python + from ptdbg_ascend import * + dump_result_param={ + "npu_pkl_path": "./npu_dump/all_v4.0/step0/rank0/api_stack_dump.pkl", + "bench_pkl_path": "./gpu_dump/all_v4.0/step0/rank0/api_stack_dump.pkl", + "npu_dump_data_dir": "./npu_dump/all_v4.0/step0/rank0/api_stack_dump", + "bench_dump_data_dir": "./gpu_dump/all_v4.0/step0/rank0/api_stack_dump", + "is_print_compare_log": True + } + compare(dump_result_param, "./output", stack_mode=True) + ``` + + 执行比对: + + ```bash + python3 compare.py + ``` + + 在output目录下生成结果文件,包括:`compare_result_{timestamp}.csv`和`advisor_{timestamp}.txt` + +3. 找出存在问题的API。 + + 1. 根据`advisor_{timestamp}.txt`或打屏信息的提示,可找到存在精度问题的算子(Suspect Nodes)和专家建议(Expert Advice) + + ![auto_analyze_log](img/auto_analyze_log.png) + + 2. 根据第2步结果文件`compare_result_{timestamp}.csv`中的Accuracy Reached or No字段显示为NO的API,针对该API执行后续比对操作,分析该API存在的精度问题。 + +4. (可选)提取指定API的堆栈信息和dump数据统计信息。 + + 通过parse接口可以清晰的显示特定API的堆栈信息和dump数据统计信息,结合堆栈信息分析代码中可能存在的精度问题。 + + 创建并配置提取脚本,以创建parse.py为例,示例代码如下: + + ```python + from ptdbg_ascend import * + + # 提取dump信息中第1次调用的API:Torch_batch_normal的堆栈信息及数据统计信息 + parse("./npu_dump/all_v4.0/step0/rank0/api_stack_dump.pkl", "Torch_batch_normal_1_forward") + ``` + + 执行提取: + + ```bash + python3 parse.py + ``` + + + +5. (可选)指定API对其底层ACL数据进行dump。 + + - dump指定前向API的ACL级别数据 + + ```python + from ptdbg_ascend import * + + # 固定随机数,开启确定性计算 + seed_all(mode=True) + set_dump_path("./dump_path", dump_tag='forward') + register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') + + # dump指定前向API的ACL级别数据、bool和整型的tensor以及浮点、bool和整型的标量 + set_dump_switch("ON", mode="acl", scope=["Tensor_permute_1_forward"], filter_switch="OFF") + + ... + + set_dump_switch("OFF") + ``` + + - dump指定反向API的ACL级别数据 + + ```python + from ptdbg_ascend import * + + # 固定随机数,开启确定性计算 + seed_all(mode=True) + set_dump_path("./dump_path", dump_tag='backward') + register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') + + # dump指定反向API的ACL级别数据、bool和整型的tensor以及浮点、bool和整型的标量 + set_dump_switch("ON", mode="acl", scope=["Functional_conv2d_1_backward"], filter_switch="OFF") + set_backward_input(["./npu_dump/all_v4.0/step0/rank0/api_stack_dump/Functional_conv2d_1_backward_input.0.npy"]) + + ... + + set_dump_switch("OFF") + ``` + +6. (可选)重新比对。 + + 根据第4或5步的dump数据重新配置compare.py并执行比对,可以对单API模型进行问题复现。 + +**注意事项** + +* dump_mode="acl"场景下,会增加npu的内存消耗,请谨慎开启。 +* 部分API存在调用嵌套关系,比如functional.batch_norm实际调用torch.batch_norm,该场景会影响acl init初始化多次,导致功能异常。 + +### 溢出检测场景(register_hook方式) + +溢出检测是针对NPU的PyTorch API,检测是否存在溢出的情况。当前仅支持识别aicore浮点溢出。 + +溢出检测原理:针对溢出阶段,开启acl dump模式,重新对溢出阶段执行,落盘数据。 + +建议按照如下步骤操作: + +1. 在NPU环境下安装ptdbg_ascend工具。 + +2. 在NPU训练脚本内插入ptdbg_ascend工具溢出检测接口。 + + - 示例1:全量溢出检测 + + ```python + from ptdbg_ascend import * + seed_all() + # 配置溢出数据目录路径和名称 + set_dump_path("./overflow_dump") + ... + # 设置检测到3次溢出后退出训练 + register_hook(model, overflow_check, overflow_nums=3) + + ... + ``` + + 多卡使用时各卡单独计算溢出次数。 + + - 示例2:dump指定API的ACL级别溢出数据 + + ```python + from ptdbg_ascend import * + seed_all() + # 配置溢出数据目录路径和名称 + set_dump_path("./overflow_dump") + ... + # dump指定API的ACL级别溢出数据 + register_hook(model, overflow_check, dump_mode='acl', dump_config='./dump.json') + + # 在期望溢出检测的step位置开始前打开溢出检测开关 + set_overflow_check_switch("ON") + + ... + + # 在step结束的位置关闭溢出检测开关 + set_overflow_check_switch("OFF") + + ... + ``` + + - 示例3:dump指定反向API的ACL级别的溢出数据 + + 1. 进行全量溢出检测 + + ```python + from ptdbg_ascend import * + seed_all() + # 配置溢出数据目录路径和名称 + set_dump_path("./overflow_dump") + ... + # 设置检测到3次溢出后退出训练 + register_hook(model, overflow_check) + + ... + ``` + + 2. dump指定反向API的ACL级别的溢出数据 + + ```python + from ptdbg_ascend import * + seed_all() + # 配置溢出数据目录路径和名称 + set_dump_path("./overflow_dump") + ... + # dump指定反向API的ACL级别溢出数据 + register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') + set_dump_switch("ON", mode="acl", scope=["Functional_conv2d_1_backward"]) + set_backward_input(["./npu_dump/ptdbg_dump_v4.0/step0/rank0/dump/Functional_conv2d_1_backward_input.0.npy"]) + ``` + + 针对前向溢出API,可以通过overflow_nums,配置允许的溢出次数,并将每次溢出API的全部ACL数据dump下来,到达指定溢出次数后停止,停止后会看到堆栈打印包含如下字段。 + + ```bash + ValueError: [overflow xxx times]: dump file is saved in 'xxxxx.pkl'. + ``` + + 其中xxx times为用户设置的次数,xxxxx.pkl为文件生成路径。 + +3. NPU环境下执行训练dump溢出数据。 + + 针对输入正常但输出存在溢出的API,会训练执行目录下将溢出的API信息dump并保存为`forward_info_{pid}.json`和`backward_info_{pid}.json`,通过 [Ascend模型精度预检工具](https://gitee.com/ascend/att/tree/master/debug/accuracy_tools/api_accuracy_checker)对json文件进行解析,输出溢出API为正常溢出还是非正常溢出,从而帮助用户快速判断。 + + 精度预检工具执行命令如下: + + ```bash + # 下载att代码仓后执行如下命令 + export PYTHONPATH=$PYTHONPATH:$ATT_HOME/debug/accuracy_tools/ + cd $ATT_HOME/debug/accuracy_tools/api_accuracy_checker/run_ut + python run_overflow_check.py -forward ./forward_info_0.json + ``` + + 反向过程溢出的API暂不支持精度预检功能。 + + 当重复执行溢出检测dump操作时,需要删除上一次dump目录下的溢出检测dump数据,否则将因重名而报错。 + +**注意事项** + +* dump_mode="acl"场景下,会增加npu的内存消耗,请谨慎开启。 +* 部分API存在调用嵌套关系,比如functional.batch_norm实际调用torch.batch_norm,该场景会影响acl init初始化多次,导致功能异常。 +* 混合精度动态loss scale场景下,正常训练会有"Gradient overflow. SKipping step"日志,添加溢出检测后日志消失,可以通过设置环境变量export OVERFLOW_DEBUG_MODE_ENABLE=1,并将register_hook位置调整amp.initialize之前解决。此功能需要cann包配套支持,不支持版本执行报错EZ3003。 + +## debugger方式dump和溢出检测(推荐) + +### PrecisionDebugger模块 + +**功能说明** + +PrecisionDebugger模块包含dump和溢出检测功能的总体配置项。可以指定dump目录,设置dump或溢出检测功能,指定dump的卡和迭代。 + +可以在from ptdbg_ascend import *和模型初始化之间的任意位置添加该模块。 + +**原型** + +```python +PrecisionDebugger(dump_path=None, hook_name=None, rank=None, step=[], enable_dataloader=False, model=None): +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ----------------- | ------------------------------------------------------------ | -------- | +| dump_path | 设置dump数据目录路径,参数示例:"./dump_path"。
默认在dump_path目录下生成`ptdbg_dump_{version}`目录,并在该目录下生成`dump.pkl`文件以及`dump`数据文件保存目录。
当**configure_hook**函数配置了mode参数时,`dump.pkl`文件以及`dump`数据文件保存目录名称添加mode参数值为前缀,详情请参见“**dump数据存盘说明**”。
未配置dump_path时,也可以通过环境变量ASCEND_WORK_PATH配置dump路径,此时dump数据将落盘在${ASCEND_WORK_PATH}/dump_data下,自定义配置dump_path优先级高于环境变量,dump_path和环境变量需要二选一。 | 否 | +| hook_name | dump模式,可取值dump和overflow_check,表示dump和溢出检测功能,二选一。 | 是 | +| rank | 指定对某张卡上的数据进行dump或溢出检测,默认未配置(表示dump所有卡的数据),须根据实际卡的Rank ID配置。应配置为大于0的正整数,且须根据实际卡的Rank ID配置,若所配置的值大于实际训练所运行的卡的Rank ID,则dump数据为空,比如当前环境Rank ID为0~7,实际训练运行0~3卡,此时若配置Rank ID为4或不存在的10等其他值,此时dump数据为空。 | 否 | +| step | 指定dump某个step的数据,默认未配置,表示dump所有step数据。dump特定step时,须指定为训练脚本中存在的step。step为list格式,可配置逐个step,例如:step=[0,1,2];也可以配置step范围,例如:step=list(range(0,9)),表示dump第0到第8个step。 | 否 | +| enable_dataloader | 自动控制开关,可取值True(开启)或False(关闭),默认为False。配置为True后自动识别dump step参数指定的迭代,并在该迭代执行完成后退出训练,此时start和stop函数可不配置,开启该开关要求训练脚本是通过torch.utils.data.dataloader方式加载数据;配置为False则需要配置start和stop函数,并在最后一个stop函数后或一个step结束的位置添加debugger.step()。 | 否 | +| model | 开启init dump模式,传入网络模型实例化的对象,配置该参数后,dump操作仅dump网络中init方法里调用的方法(nn.Module类),不会对所有API进行dump。参数示例: model=net,net为网络模型实例化的对象名称。默认未配置。
配置该参数时,PrecisionDebugger模块请在模型实例化之后调用。
该模式不支持“溢出检测”、”ACL级别数据dump“和“模块级精度数据dump”。此模式下dump文件名前缀为网络中定义的模块名或层名。 | 否 | + +#### init dump模式示例代码和数据落盘说明 + +**示例代码** + +```python +import os +import torch +import torch.nn as nn +import torch_npu +from ptdbg_ascend import * + +torch.npu.set_device("npu:0") + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2) + self.relu1 = nn.ReLU() + self.bn1 = nn.BatchNorm2d(16) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + output = self.relu1(x) + return output + +if __name__ == "__main__": + net = Net().npu() + # model参数传入net, 开启init dump 功能 + debugger = PrecisionDebugger(dump_path="./dump", hook_name="dump", model=net) + debugger.configure_hook(mode="api_stack") + debugger.start() + x = torch.randn(1, 1, 28, 28).npu() + out = net(x) + loss = out.sum() + loss.backward() + debugger.stop() +``` + +**落盘数据说明** + +该模式下dump数据命名格式为:`{Layer_name}_{Module_name}_{call_num}_{forward/backward}_{input/output}.npy` + +``` +# 按照上述用例代码进行dump,落盘数据命名示例如下: +conv1_Conv2d_0_forward_input.0.npy +conv1_Conv2d_0_forward_output.npy +relu1_ReLU_0_forward_input.0.npy +....... +bn1_BatchNorm2d_0_backward_output.2.npy +``` + +### configure_hook函数(可选) + +**功能说明** + +设置dump范围。 + +建议在**PrecisionDebugger**模块与模型初始化之间的任意位置添加,不添加此函数时默认使用mode="api_stack" dump整网数据。 + +**原型** + +dump: + +```python +debugger.configure_hook(mode="api_stack", scope=[], api_list=[], filter_switch="OFF", acl_config=None, backward_input=[], input_output_mode=["all"], summary_only=False) +``` + +溢出检测: + +```python +debugger.configure_hook(mode=None, acl_config=None, overflow_nums=1, need_replicate=False) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ----------------- | ------------------------------------------------------------ | -------- | +| mode | dump模式。可取值"all"、"list"、"range"、"stack"、"acl"、"api_list"、"api_stack",各参数含义请参见本节的“**函数示例**”。参数示例:mode="list"。默认为api_stack。该参数配置值将作为dump数据文件名的前缀,详情请参见“**dump数据存盘说明**”。 | 否 | +| scope或api_list | dump范围。根据model配置的模式选择dump的API范围,mode="api_list"时,需要配置api_list=[],其他模式有需要时配置scope=[]。参数示例:scope=["Tensor_permute_1_forward", "Tensor_transpose_2_forward"]、api_list=["relu"]。默认为空。 | 否 | +| filter_switch | dump bool和整型的tensor以及浮点、bool和整型的标量的过滤开关。可取值"ON"(表示开启过滤,即不dump)或"OFF"(表示关闭过滤)。参数示例:filter_switch="ON"。默认不配置,即filter_switch="OFF",表示dump上述数据。 | 否 | +| acl_config | acl dump的配置文件。mode="acl"时,该参数必选;mode为其他值时,该参数不选。参数示例:acl_config='./dump.json'。dump.json配置文件详细介绍请参见“**dump.json配置文件说明**”。 | 否 | +| backward_input | 该输入文件为首次运行训练dump得到反向API输入的.npy文件。例如若需要dump Functional_conv2d_1 API的反向过程的输入输出,则需要在dump目录下查找命名包含Functional_conv2d_1、backward和input字段的.npy文件。 | 否 | +| input_output_mode | dump数据过滤。可取值"all"、"forward"、"backward"、"input"和"output",表示仅保存dump的数据中文件名包含"forward"、"backward"、"input"和"output"的前向、反向、输入或输出的.npy文件。参数示例input_output_mode=["backward"]或input_output_mode=["forward", "backward"]。默认为all,即保存所有dump的数据。除了all参数只能单独配置外,其他参数可以自由组合。 | 否 | +| summary_only | dump npy文件过滤,可取值True或False,配置为True后仅dump保存API统计信息的pkl文件,参数示例:summary_only=False,默认为False。 | 否 | +| summary_mode | 控制dump文件输出的模式,可取值md5(dump仅输出包含md5值的pkl文件,用于验证数据的完整性)、summary(dump仅输出包含API统计信息的pkl文件)、all(dump输出包含API统计信息的pkl文件以及具体的npy文件),参数示例:summary_mode=md5,默认为all。summary_only=True时,不允许配置该参数。 | 否 | +| overflow_nums | 控制溢出次数,表示第N次溢出时,停止训练,过程中检测到溢出API对应ACL数据均dump。参数示例:overflow_nums=3。配置overflow_check时可配置,默认不配置,即检测到1次溢出,训练停止,配置为-1时,表示持续检测溢出直到训练结束。 | 否 | +| need_replicate | 过程dump数据生成开关,执行溢出检测时,dump目录下会生成forward_real_data和backward_real_data的过程dump数据目录,可取值True(生成)或False(不生成),默认不生成。 | 否 | + +**函数示例** + +configure_hook可配置多种dump模式,示例如下: + +说明: + +以下均以dump部分API数据为例,API名可以从首次dump整网数据的结果csv文件中的NPU Name或Bench Name列获取。 + +以下仅为该函数配置示例,完整代码请参见“**示例代码**”章节。 + +- 示例1:dump指定API列表 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="list", scope=["Tensor_permute_1_forward", "Tensor_transpose_2_forward", "Torch_relu_3_backward"]) + ``` + +- 示例2:dump指定范围 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="range", scope=["Tensor_abs_1_forward", "Tensor_transpose_3_forward"]) + ``` + +- 示例3:STACK模式,只dump堆栈信息 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="stack", scope=["Tensor_abs_1_forward", "Tensor_transpose_3_forward"]) + ``` + +- 示例4:dump指定前向API的ACL级别数据 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="acl", scope=["Tensor_permute_1_forward"], acl_config="./dump.json") + ``` + +- 示例5:dump指定反向API的ACL级别数据 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="acl", scope=["Functional_conv2d_1_backward"], acl_config="./dump.json", backward_input=["./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump/Functional_conv2d_1_backward_input.0.npy"]) + ``` + +- 示例6:dump指定某一类API的API级别输入输出数据 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="api_list", api_list=["relu"]) + ``` + + mode="api_list"时不配置scope。 + +- 示例7:dump全部API级别输入输出数据以及相应堆栈信息 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(mode="api_stack") + ``` + + mode="api_stack"时不配置scope。 + +- 示例8: dump全部API级别输入输出数据并包含bool和整型的tensor以及浮点、bool和整型的标量,配置为OFF,会dump bool和整型数据 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(filter_switch="OFF") + ``` + + 配置filter_switch="OFF"同时也可以配置mode、scope和api_list,除dump ACL级别数据。 + +- 示例9:仅保存dump的数据文件名包含“backward”的反向.npy文件 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(input_output_mode=["backward"]) + ``` + +- 示例10:仅dump pkl文件 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + debugger.configure_hook(summary_only=True) + ``` + +- 示例11:溢出检测dump + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="overflow_check", step=[0]) + debugger.configure_hook(overflow_nums=1) + ``` + + dump执行时会在**PrecisionDebugger**模块的dump_path参数指定的目录下生成ptdbg_dump_{version}目录,保存溢出数据。 + + 多卡场景时,需要检测到至少有一张卡溢出次数达到overflow_nums时,训练结束。 + + 仅支持NPU环境。 + +- 示例11:dump溢出API的ACL级别数据 + + ```python + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="overflow_check", step=[0]) + debugger.configure_hook(mode="acl", acl_config="./dump.json") + ``` + + 该场景会在原有数据基础上,额外在dump.json文件配置的dump_path目录下生成一份ACL算子数据,该数据可通过“**ptdbg_ascend.parse**”工具进行解析。 + + 仅支持NPU环境。 + +### start函数(可选) + +**功能说明** + +dump或溢出检测启动函数。 + +在模型初始化之后的任意位置添加。 + +**原型** + +```python +debugger.start() +``` + +该函数为类函数,可以使用debugger.start()也可以使用PrecisionDebugger.start()。 + +### stop函数(可选) + +**功能说明** + +dump或溢出检测停止函数。 + +在**start**函数之后的任意位置添加。 + +**原型** + +```python +debugger.stop() +``` + +该函数为类函数,可以使用debugger.stop()也可以使用PrecisionDebugger.stop()。 + +### 示例代码(自动模式) + +**需要保证用户训练代码是通过torch.utils.data.dataloader方式加载数据。** + +- 示例1:开启dump + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0,2], enable_dataloader=True) + # 请勿将以上初始化流程插入到循环代码中 + ``` + +- 示例2:开启溢出检测dump + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="overflow_check", step=[0,2], enable_dataloader=True) + # 请勿将以上初始化流程插入到循环代码中 + ``` + +### 示例代码(手动模式) + +一般情况下使用自动模式可以快速方便进行dump操作,但个别大模型可能在部分卡的训练操作中没有调用dataloader,这会导致自动模式无法dump指定迭代的数据,此时需要关闭自动模式手动在迭代前后插入start()和stop()函数,并在最后一个stop函数后或一个step结束的位置添加debugger.step()以标识dump结束。 + +- 示例1:开启dump + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="dump", step=[0]) + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + +- 示例2:开启溢出检测dump + + ```python + from ptdbg_ascend import * + debugger = PrecisionDebugger(dump_path="./dump_path", hook_name="overflow_check", step=[0]) + # 请勿将以上初始化流程插入到循环代码中 + + # 模型初始化 + # 下面代码也可以用PrecisionDebugger.start()和PrecisionDebugger.stop() + debugger.start() + + # 需要dump的代码片段1 + + debugger.stop() + debugger.start() + + # 需要dump的代码片段2 + + debugger.stop() + debugger.step() + ``` + +## register_hook方式dump和溢出检测 + +### 总体说明 + +- 本节主要介绍CPU或GPU及NPU精度数据dump和溢出检测所需要的函数以及示例。 + +- ptdbg_ascend工具默认情况下仅dump PyTorch模型的API输入输出数据进行精度比对,若在比对结果中发现某个API下可能存在ACL的精度问题,那么可以选择dump该API的ACL级别数据进行精度分析。 + +- 某些torch api的输出不是Tensor类型的数据。对于此类API的反向过程进行ACL dump,工具会在运行日志中给出对应的Warning(is not of tensor type and cannot be automatically derived)提示。如若想要进行该类API反向ACL dump,可以通过手动构建单API用例的方式进行ACL dump,具体用例可参见“**[反向ACL dump用例说明](https://gitee.com/ascend/att/blob/master/debug/accuracy_tools/ptdbg_ascend/doc/%E5%8F%8D%E5%90%91ACL%20dump%E7%94%A8%E4%BE%8B%E8%AF%B4%E6%98%8E.md)**”。 + +- 工具性能:dump数据量较小时(小于5G),参考dump速度0.1GB/s;dump数据量较大时,参考dump速度0.2GB/s。 + 推荐环境配置:独占环境,CPU核心数192,固态硬盘(IO速度参考:固态硬盘 > 500MB/s,机械硬盘60 ~ 170MB/s)。 + + 用户环境性能弱于标准约束或非独占使用的比对速度酌情向下浮动。Dump速度的计算方式:Dump数据量/(单个step添加Dump耗时-原始单个step耗时)。 + +### 约束 +- 进行CPU或GPU数据dump时,请安装torch包而非torch_npu包,避免工具无法识别使用场景,导致失败。 + +- TASK_QUEUE_ENABLE环境变量会导致API下发和执行异步进行,因此在ACL dump前需要将TASK_QUEUE_ENABLE关闭,即export TASK_QUEUE_ENABLE=0。 + +- 不建议在PyTorch训练脚本中同时添加dump接口和性能数据采集(如Ascend PyThon Profiler)接口,二者可能相互影响导致数据不准确。 + +### seed_all + +**功能说明** + +固定随机数。通过固定随机数保证模型的输入或输出一致。在训练主函数开始前调用,避免随机数固定不全。 + +使用form ptdbg import *后自动导入该函数,代码无需再次添加,若需要修改随机数种子和确定性计算模式,则需要通过添加该函数修改。 + +**函数原型** + +```python +seed_all(seed=1234, mode=False) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ------ | ------------------------------------------------------------ | -------- | +| seed | 随机数种子。参数示例:seed=1000。默认值为:1234。 | 否 | +| mode | 确定性计算模式。可配置True或False。参数示例:mode=True。默认为False。
即使在相同的硬件和输入下,API多次执行的结果也可能不同,开启确定性计算是为了保证在相同的硬件和输入下,API多次执行的结果相同。
确定性计算会导致API执行性能降低,建议在发现模型多次执行结果不同的情况下开启。
rnn类算子、ReduceSum、ReduceMean等算子可能与确定性计算存在冲突,若开启确定性计算后多次执行的结果不相同,则考虑存在这些算子。 | 否 | + +**函数示例** + +seed_all函数的随机数种子,取默认值即可,无须配置;第二个参数默认关闭,不开启确定性计算时也无须配置。 + +- 示例1:仅固定随机数,不开启确定性计算 + + ```python + seed_all() + ``` + +- 示例2:固定随机数,开启确定性计算 + + ```python + seed_all(mode=True) + ``` + +**固定随机数范围** + +seed_all函数可固定随机数的范围如下表。 + +| API | 固定随机数 | +| ---------------------------------------- | --------------------------- | +| os.environ['PYTHONHASHSEED'] = str(seed) | 禁止Python中的hash随机化 | +| random.seed(seed) | 设置random随机生成器的种子 | +| np.random.seed(seed) | 设置numpy中随机生成器的种子 | +| torch.manual_seed(seed) | 设置当前CPU的随机种子 | +| torch.cuda.manual_seed(seed) | 设置当前GPU的随机种子 | +| torch.cuda.manual_seed_all(seed) | 设置所有GPU的随机种子 | +| torch_npu.npu.manual_seed(seed) | 设置当前NPU的随机种子 | +| torch_npu.npu.manual_seed_all(seed) | 设置所有NPU的随机种子 | +| torch.backends.cudnn.enable=False | 关闭cuDNN | +| torch.backends.cudnn.benchmark=False | cuDNN确定性地选择算法 | +| torch.backends.cudnn.deterministic=True | cuDNN仅使用确定性的卷积算法 | + +需要保证CPU或GPU以及NPU的模型输入完全一致,dump数据的比对才有意义,seed_all并不能保证模型输入完全一致,如下表所示场景需要保证输入的一致性。 + +| 场景 | 固定方法 | +| --------------- | ------------- | +| 数据集的shuffle | 关闭shuffle。 | +| dropout | 关闭dropout。 | + +关闭shuffle示例: + +```python +train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size = batch_size, + shuffle = False, + num_workers = num_workers +) +``` + +关闭dropout: + +在使用from ptdbg import *后,工具会自动将torch.nn.functional.dropout、torch.nn.functional.dropout2d、torch.nn.functional.dropout3d、torch.nn.Dropout、torch.nn.Dropout2d、torch.nn.Dropout3d的接口参数p置为0。 + +### set_dump_path + +**功能说明** + +设置数据保存目录。建议在seed_all函数之后调用且需要保证训练进程能够调用该函数;多卡时须保证每个进程都能调用该函数。 + +**函数原型** + +```python +set_dump_path(fpath=None, dump_tag='ptdbg_dump') +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| -------- | ------------------------------------------------------------ | -------- | +| fpath | 设置数据目录路径。参数示例:'./dump_path'。
默认在dump_path目录下生成`ptdbg_dump_{version}`目录,并在该目录下生成`dump.pkl`文件以及`dump`数据文件保存目录。
当set_dump_switch函数配置了mode参数时,`dump.pkl`文件以及`dump`数据文件保存目录名称添加mode参数值为前缀,详情请参见“**dump数据存盘说明**”。
未配置fpath时,也可以通过环境变量ASCEND_WORK_PATH配置dump路径,此时数据将落盘在${ASCEND_WORK_PATH}/dump_data下,自定义配置dump_path优先级高于环境变量,fpath和环境变量需要二选一。 | 否 | +| dump_tag | 设置数据目录名称。参数示例:dump_tag='dump_conv2d'。默认数据目录命名为ptdbg_dump_{version}。
{version}为当前安装ptdbg_ascend工具版本。目录结构参见“**dump数据存盘说明**”。
配置该参数会将生成的`ptdbg_dump_{version}`目录名称变更为dump_tag配置的值,如`dump_conv2d_{version}`。 | 否 | + +**函数示例** + +- 示例1:设置数据目录路径 + + ```python + set_dump_path('./dump_path') + ``` + +- 示例2:设置数据目录名称 + + ```python + set_dump_path('./dump_path', dump_tag='dump_conv2d') + ``` + + +若以相同的数据目录多次dump,则会因同名导致覆盖;多次dump建议配置不同的dump_tag。 + +### register_hook + +**功能说明** + +注册工具钩子函数。在set_dump_path之后调用。 + +dump操作必选。 + +**函数原型** + +```python +register_hook(model, hook, overflow_nums=overflow_nums, dump_mode=dump_mode, dump_config=dump_config_file) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ------------- | ------------------------------------------------------------ | -------- | +| model | 传入网络模型实例化的对象。参数示例: model=net,net为网络模型实例化的对象名称。 | 是 | +| hook | 注册工具的dump和溢出检测钩子。可取值overflow_check(表示溢出检测)和acc_cmp_dump(表示dump数据),二选一。 | 是 | +| overflow_nums | 控制溢出次数,表示第N次溢出时,停止训练,过程中检测到溢出API对应ACL数据均dump。参数示例:overflow_nums=3。配置overflow_check时可配置,默认不配置,即检测到1次溢出,训练停止,配置为-1时,表示持续检测溢出直到训练结束。 | 否 | +| dump_mode | 控制针对溢出API的dump模式,可取值"acl"或"api"。配置acl时,表示dump ACL级别的溢出数据,此时set_dump_path参数不生效,dump数据目录由dump_config的.json文件配置。参数示例:dump_mode="acl"。默认不配置,即dump API级别的溢出数据。 | 否 | +| dump_config | acl dump的配置文件。dump_mode="acl"时,该参数必选;dump_mode="api"时,该参数不选。参数示例:dump_config='./dump.json'。 | 否 | + +**函数示例** + +- 示例1:注册工具钩子函数 + + ```python + register_hook(model, acc_cmp_dump) + ``` + +- 示例2:dump指定API的ACL级别数据 + + ```python + register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') + ``` + + 需要配置set_dump_switch的mode="acl"以及scope指定为前向或反向API,请参见“**set_dump_switch”**的示例。 + + 该场景set_dump_path不生效,由dump_config中的dump.json文件配置dump数据目录。 + +- 示例3:溢出检测dump + + ```python + register_hook(model, overflow_check, overflow_nums=3) + ``` + + dump执行时会在set_dump_path的fpath参数指定的目录下生成ptdbg_dump_{version}目录,保存溢出数据。 + + 多卡场景时,需要检测到至少有一张卡溢出次数达到overflow_nums时,训练结束。 + + 仅支持NPU环境。 + +- 示例4:dump指定API的ACL级别溢出数据 + + ```python + register_hook(model, overflow_check, dump_mode='acl', dump_config='./dump.json') + ``` + + 该场景会在原有数据基础上,额外在dump.json文件配置的dump_path目录下生成一份ACL算子数据,该数据可通过“**ptdbg_ascend.parse**”工具进行解析。 + + 仅支持NPU环境。 + +### set_dump_switch + +**功能说明** + +设置dump范围。建议在register_hook函数之后的脚本内任意位置插入,但进行精度问题排查建议参照“场景化示例 > 单卡场景精度比对”章节的顺序,先从第一个迭代开始的位置调用并dump整网数据。 + +dump操作必选。 + +**函数原型** + +```python +def set_dump_switch(switch, mode="all", scope=[], api_list=[], filter_switch="OFF", dump_mode=["all"], summary_only=False): +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| --------------- | ------------------------------------------------------------ | -------- | +| switch | dump开关。可取值"ON"或"OFF"。须在选定dump开始的位置配置set_dump_switch("ON");dump结束的位置设置set_dump_switch("OFF")。 | 是 | +| mode | dump模式。可取值"all"、"list"、"range"、"stack"、"acl"、"api_list"、"api_stack",各参数含义请参见本节的“**函数示例**”。参数示例:mode="list"。默认为all。该参数配置值将作为dump数据文件名的前缀,详情请参见“**dump数据存盘说明**”。 | 否 | +| scope或api_list | dump范围。根据model配置的模式选择dump的API范围。参数示例:scope=["Tensor_permute_1_forward", "Tensor_transpose_2_forward"]、api_list=["relu"]。默认为空。 | 否 | +| filter_switch | dump bool和整型的tensor以及浮点、bool和整型的标量的过滤开关。可取值"ON"或"OFF"。参数示例:filter_switch="ON"。默认不配置,即filter_switch="OFF",表示dump上述数据。 | 否 | +| dump_mode | dump数据过滤。可取值"all"、"forward"、"backward"、"input"和"output",表示仅保存dump的数据中文件名包含"forward"、"backward"、"input"和"output"的前向、反向、输入或输出的.npy文件。参数示例dump_mode=["backward"]或dump_mode=["forward", "backward"]。默认为all,即保存所有dump的数据。除了all参数只能单独配置外,其他参数可以自由组合。 | 否 | +| summary_only | dump npy文件过滤,可取值True或False,配置为True后仅dump保存API统计信息的pkl文件,参数示例:summary_only=False,默认为False。 | 否 | + +**推荐配置** + +```python +set_dump_switch("ON", mode="api_stack", filter_switch="OFF") +``` + +开启dump数据和堆栈模式,同时为保证数据完整性开启dump bool和整型的tensor以及浮点、bool和整型的标量。 + +**函数示例** + +set_dump_switch可配置多种dump模式,示例如下: + +说明:以下均以dump部分API数据为例,API名可以从首次dump整网数据的结果csv文件中的NPU Name或Bench Name列获取。 + +- 示例1:dump指定API列表 + + ```python + set_dump_switch("ON", mode="list", scope=["Tensor_permute_1_forward", "Tensor_transpose_2_forward", "Torch_relu_3_backward"]) + ``` + +- 示例2:dump指定范围 + + ```python + set_dump_switch("ON", mode="range", scope=["Tensor_abs_1_forward", "Tensor_transpose_3_forward"]) + ``` + +- 示例3:STACK模式,只dump堆栈信息 + + ```python + set_dump_switch("ON", mode="stack", scope=["Tensor_abs_1_forward", "Tensor_transpose_3_forward"]) + ``` + +- 示例4:dump指定前向API的ACL级别数据 + + ```python + register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') + set_dump_switch("ON", mode="acl", scope=["Tensor_permute_1_forward"]) + ``` + + 需要配置register_hook的dump_mode='acl'和dump_config配置文件。 + +- 示例4:dump指定反向API的ACL级别数据 + + ```python + register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') + set_dump_switch("ON", mode="acl", scope=["Functional_conv2d_1_backward"]) + set_backward_input(["./npu_dump/dump_conv2d_v4.0/step0/rank0/dump/Functional_conv2d_1_backward_input.0.npy"]) + ``` + + 需要配置register_hook的dump_mode='acl'和dump_config配置文件,并通过set_backward_input设置反向API输入的.npy文件。 + +- 示例5:dump指定某一类API的API级别输入输出数据 + + ```python + set_dump_switch("ON", mode="api_list", api_list=["relu"]) + ``` + + mode="api_list"时不配置scope。 + +- 示例6:dump全部API级别输入输出数据以及相应堆栈信息 + + ```python + set_dump_switch("ON", mode="api_stack") + ``` + + mode="api_stack"时不配置scope。 + +- 示例7: dump全部API级别输入输出数据并包含bool和整型的tensor以及浮点、bool和整型的标量,配置为OFF,会dump bool和整型数据 + + ```python + set_dump_switch("ON", filter_switch="OFF") + ``` + + 配置filter_switch="OFF"同时也可以配置mode、scope和api_list,除dump ACL级别数据。 + +- 示例8:仅保存dump的数据文件名包含“backward”的反向.npy文件 + + ```python + set_dump_switch("ON", dump_mode=["backward"]) + ``` + +- 示例9:仅dump pkl文件 + + ```python + set_dump_switch("ON", summary_only=True) + ``` + +以上示例均需要在结束dump的位置插入set_dump_switch("OFF")。 + +set_dump_switch配置mode为all或api_stack时,结束dump后,在dump目录下会自动生成compare_data.py比对脚本模板,示例如下: + +```python +from ptdbg_ascend import compare + +pkl_path = "%s" +dump_data_dir = "%s" + +dump_path_param = { + "npu_pkl_path": , + "bench_pkl_path": , + "npu_dump_data_dir": , + "bench_dump_data_dir": , + "is_print_compare_log": True +} + +compare(dump_path_param, output_path="", stack_mode="%s") +``` + +pkl_path和dump_data_dir字段会自动识别pkl和dump目录的路径,用户需要判断当前dump的环境是NPU、CPU或GPU,并将pkl_path和dump_data_dir字段填入下方dump_path_param函数对应的字段中,例如当前设备为NPU,那么填写方式如下: + +```python +from ptdbg_ascend import compare + +pkl_path = "%s" +dump_data_dir = "%s" + +dump_path_param = { + "npu_pkl_path": pkl_path, + "bench_pkl_path": , + "npu_dump_data_dir": dump_data_dir, + "bench_dump_data_dir": , + "is_print_compare_log": True +} + +compare(dump_path_param, output_path="", stack_mode="%s") +``` + +此时,另一侧数据的路径,需要用户另外识别并填入。 + +### set_overflow_check_switch + +**功能说明** + +置溢出检测范围。默认不配置该函数,全量进行溢出检测。 + +仅支持NPU环境。 + +**函数原型** + +```python +set_overflow_check_switch(switch, filter_switch='OFF') +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ------------- | ------------------------------------------------------------ | -------- | +| switch, | 检测开关。可取值"ON"或"OFF"。如果只在特定的step溢出检测,则在期望溢出检测的step位置开始前插入set_overflow_check_switch("ON"),在step结束的位置插入set_overflow_check_switch("OFF")。 | 是 | +| filter_switch | dump bool和整型的tensor以及浮点、bool和整型的标量的过滤开关。可取值"ON"或"OFF"。参数示例:filter_switch="ON"。默认不配置,即filter_switch="OFF",表示dump上述数据。 | 否 | + +**函数示例** + +- 示例1:指定范围溢出检测 + + ```python + register_hook(model, overflow_check) + set_overflow_check_switch("ON") + + ... + + set_overflow_check_switch("OFF") + ``` + + 该场景set_dump_path不生效,dump执行时会在当前目录自动生成ptdbg_dump_{version}目录,保存溢出数据。 + +- 示例2:前向API的ACL级别范围溢出检测 + + ```python + register_hook(model, overflow_check, dump_mode='acl', dump_config='./dump.json') + set_overflow_check_switch("ON") + + ... + + set_overflow_check_switch("OFF") + ``` + + 该场景set_dump_path不生效,由dump_config中的dump.json文件配置溢出数据目录。 + +### set_backward_input + +**功能说明** + +设置反向ACL级别dump时需要的反向输入的.npy文件。 + +**函数原型** + +```python +set_backward_input(backward_input) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| -------------- | ------------------------------------------------------------ | -------- | +| backward_input | 该输入文件为首次运行训练dump得到反向API输入的.npy文件。例如若需要dump Functional_conv2d_1 API的反向过程的输入输出,则需要在dump目录下查找命名包含Functional_conv2d_1、backward和input字段的.npy文件。 | 是 | + +**函数示例** + +```python +register_hook(model, acc_cmp_dump, dump_mode='acl', dump_config='./dump.json') +set_dump_switch("ON", mode="acl", scope=["Functional_conv2d_1_backward"]) +set_backward_input(["./npu_dump/dump_conv2d_v4.0/step0/rank0/dump/Functional_conv2d_1_backward_input.0.npy"]) +``` + +## dump.json配置文件说明 + +**dump.json配置示例** + +```python +{ + "dump": + { + "dump_list":[], + "dump_path":"./dump/output", + "dump_mode":"all", + "dump_op_switch":"on" + } +} +``` + +**dump.json参数说明** + +| 字段名 | 说明 | +| -------------- | ------------------------------------------------------------ | +| dump_list | 待dump数据的API模型。为空,无需配置。 | +| dump_path | dump数据文件存储到运行环境的目录,主要用于指定ACL dump数据路径。支持配置绝对路径或相对路径。dump_path须为已存在目录。 | +| dump_mode | dump数据模式,配置如下:
- output:dump API的输出数据。默认值。
- input:dump API的输入数据。
- all:dump API的输入、输出数据。 | +| dump_op_switch | 单API模型dump数据开关,配置如下: * off:关闭单API模型dump,默认值。 * on:开启单API模型dump。 | + +**dump目录说明** + +配置register_hook的dump_config后,采集的dump数据会在{dump_path}/{time}/{deviceid}/{model_id}目录下生成,例如“/home/HwHiAiUser/output/20200808163566/0/0” + +```bash +├── 20230131172437 +│   └── 1 +│   ├── 0 +│   │   ├── Add.Add.45.0.1675157077183551 +│   │   ├── Cast.trans_Cast_0.31.0.1675157077159449 +│   │   ├── Cast.trans_Cast_5.43.0.1675157077180129 +│   │   ├── MatMul.MatMul.39.0.1675157077172961 +│   │   ├── Mul.Mul.29.0.1675157077155731 +│   │   ├── NPUAllocFloatStatus.NPUAllocFloatStatus.24.0.1675157077145262 +│   │   ├── TransData.trans_TransData_1.33.0.1675157077162791 +│   │   └── TransData.trans_TransData_4.41.0.1675157077176648 +│   ├── 1701737061 +│   │   └── Cast.trans_Cast_2.35.0.1675157077166214 +│   ├── 25 +│   │   └── NPUClearFloatStatus.NPUClearFloatStatus.26.0.1675157077150342 +│   └── 68 +│   └── TransData.trans_TransData_3.37.0.1675157077169473 +``` + +## 模块级精度数据dump + +### 总体说明 + +大模型场景下,通常不是简单的利用自动迁移能力实现GPU到NPU的训练脚本迁移,而是会对NPU网络进行一系列针对性的适配,因此,常常会造成迁移后的NPU模型存在部分子结构不能与GPU原始模型完全对应。模型结构不一致导致API调用类型及数量不一致,若直接按照API粒度进行精度数据dump和比对,则无法完全比对所有的API。 + +本节介绍的功能是对模型中的大粒度模块进行数据dump,使其比对时,对于无法以API粒度比对的模块可以直接以模块粒度进行比对。 + +模块指的是继承自nn.Module类模块,通常情况下这类模块就是一个小模型,可以被视为一个整体,dump数据时以模块为粒度进行dump。 + +### module_dump + +**功能说明** + +开启模块级精度数据dump。 + +模块级精度数据dump时必选。 + +**函数原型** + +```python +module_dump(module, module_name) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ----------- | ------------------------------------------------------------ | -------- | +| module | 网络中实例化好的nn.Module类模块的model对象。 | 是 | +| module_name | 用户自定义的该model名称。主要用于dump数据文件的命名,便于在比对时识别模块级数据。 | 是 | + +### module_dump_end + +**功能说明** + +结束模块级精度数据dump。 + +模块级精度数据dump时必选。 + +**函数原型** + +```python +module_dump_end() +``` + +### 示例代码 + +```python +# 根据需要import包 +import os +import torch +import torch.nn as nn +import torch_npu +import torch.nn.functional as F +from ptdbg_ascend import * + +torch.npu.set_device("npu:0") +# 定义一个简单的网络 +class ModuleOP(nn.Module): + def __init__(self) -> None: + super().__init__() + self.linear_1 = nn.Linear(in_features=8, out_features=4) + self.linear_2 = nn.Linear(in_features=4, out_features=2) + def forward(self, x): + x1 = self.linear_1(x) + x2 = self.linear_2(x1) + r1 = F.relu(x2) + return r1 + +if __name__ == "__main__": + module = ModuleOP() + + # 注册工具 + pdbg = PrecisionDebugger("./dump_data/npu", hook_name="dump") + pdbg.start() + + x = torch.randn(10, 8) + module_dump(module, "MyModuleOP") # 开启模块级精度数据dump + out = module(x) + module_dump_end() # 结束模块级精度数据dump + loss = out.sum() + loss.backward() + pdbg.stop() +``` + +## dump数据存盘说明 + +dump结果目录结构示例如下: + +```bash +├── dump_path +│ └── ptdbg_dump_{version} +│ ├── step0 +│ | ├── rank0 +│ | │ ├── dump +| | | | ├── Tensor_permute_1_forward.npy +| | | | ├── MyModule_0_forward_input.npy # 开启模块级精度数据dump时存在模块级的dump数据文件 +| | | | ... +| | | | └── Fcuntion_linear_5_backward_output.npy +│ | │ └── dump.pkl +│ | ├── rank1 +| | | ├── dump +| | | | └── ... +| | | └── dump.pkl +│ | ├── ... +│ | | +| | └── rank7 +│ ├── step1 +│ | ├── ... +│ ├── step2 +``` + +dump过程中,npy文件在对应算子或者模块被执行后就会落盘,而pkl文件则需要在正常执行PrecisionDebugger.stop()或set_dump_switch("OFF")后才会被落盘保存,异常的程序终止会保存终止前被执行算子的相关npy文件,但是不会生成pkl文件。 + +其中`ptdbg_dump_{version}`为默认命名,debugger方式dump不支持修改该文件夹名称,使用set_dump_path函数则支持通过dump_tag参数修改文件夹名称;rank为设备上各卡的ID,每张卡上dump的数据会生成对应dump目录。 + +**精度比对dump场景** + +精度比对dump场景的结果如下: + +* dump.pkl文件:包含dump数据的API名称(命名格式为:`{api_type}_{api_name}_{API调用次数}_{前向反向}_{input/output}.{参数序号}`)、dtype、 shape、各数据的max、min、mean、L2norm统计信息以及当配置summary_mode=md5时的md5数据。 + + 其中,“参数序号”表示该API下的第n个参数,例如1,则为第一个参数,若该参数为list格式,则根据list继续排序,例如1.1,表示该API的第1个参数的第1个子参数;L2norm表示2范数(平方根)。 + +* dump目录:目录下为npy格式的dump数据。 + + npy文件保存的前缀和PyTorch对应关系如下 + + | 前缀 | Torch模块 | + | ----------- | ------------------- | + | Tensor | torch.Tensor | + | Torch | torch | + | Functional | torch.nn.functional | + | NPU | NPU亲和算子 | + | VF | torch._VF | + | Aten | torch.ops.aten | + | Distributed | torch.distributed | + +当configure_hook或set_dump_switch配置mode参数(例如:mode="api_stack" )时,dump结果的文件名会添加api_stack前缀,dump结果如下: + +* api_stack_dump.pkl +* api_stack_dump目录 + +**溢出检测dump场景** + +PrecisionDebugger模块的hook_name参数或register_hook函数设置了overflow_check时,检测API溢出,dump结果的文件名格式为:`{api_type}_{api_name}_{API调用次数}_{前向反向}_{当前溢出次数}`,dump结果示例如下: + +* `Tensor_add_1_forward_1.pkl` +* `Tensor_add_1_forward_1`目录 + +## 工具支持的API列表 + +ptdbug_ascend工具维护固定的API支持列表,若需要删除或增加dump的API,可以在[support_wrap_ops.yaml](../src/python/ptdbg_ascend/hook_module/support_wrap_ops.yaml)文件内手动修改,如下示例: + +```bash +functional: # functional为算子类别,找到对应的类别,在该类别下按照下列格式删除或添加API + - conv1d + - conv2d + - conv3d +``` + +## CPU或GPU与NPU精度数据比对 + +### 总体说明 + +- 本节主要介绍CPU或GPU与NPU精度数据比对的函数以及示例。 + +- 比对函数均通过单独创建精度比对脚本执行,可支持单卡和多卡场景的精度数据比对。 + +- 工具性能:比对数据量较小时(参考值单份文件小于10GB),参考比对速度0.1GB/s;比对数据量较大时,参考比对速度0.3GB/s。 + 推荐环境配置:独占环境,CPU核心数192,固态硬盘(IO速度参考:固态硬盘 > 500MB/s,机械硬盘60 ~ 170MB/s)。 + + 用户环境性能弱于标准约束或非独占使用的比对速度酌情向下浮动。比对速度的计算方式:两份比对文件大小/比对耗时。 + +### 约束 + +- NPU自研API,在CPU或GPU若没有对应的API,该API的dump数据不比对。 + +- NPU与CPU或GPU的计算结果误差可能会随着模型的执行不断累积,最终会出现同一个API因为输入的数据差异较大而无法比对的情况。 + +- CPU或GPU与NPU中两个相同的API会因为调用次数不同导致无法比对或比对到错误的API,不影响整体运行,该API忽略。 + +### compare_distributed + +**功能说明** + +将CPU或GPU与NPU的dump文件进行比对,支持单卡和多卡,可同时比对多卡的dump数据。多机场景需要每个设备单独执行比对操作。可自动检索和匹配对应卡和进程所dump的数据文件,再调用compare进行比对。单机单卡时与compare函数二选一。 + +**函数原型** + +```python +compare_distributed(npu_dump_dir, bench_dump_dir, output_path, **kwargs) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| -------------- | ------------------------------------------------------------ | -------- | +| npu_dump_dir | 配置NPU环境下的dump目录。dump数据目录须指定到step级。参数示例:'./npu_dump/ptdbg_dump_v4.0/step0'。register_hook方式可通过set_dump_path函数的dump_tag参数修改该目录名称。 | 是 | +| bench_dump_dir | 配置CPU、GPU或NPU环境下的dump目录。参数示例:'./gpu_dump/ptdbg_dump_v4.0/step0'。register_hook方式可通过set_dump_path函数的dump_tag参数修改该目录名称。 | 是 | +| output_path | 配置比对结果csv文件存盘目录。需要预先创建output_path目录。参数示例:'./output'。文件名称基于时间戳自动生成,格式为:`compare_result_rank{npu_ID}-rank{cpu/gpu/npu_ID}_{timestamp}.csv`。 | 是 | +| **kwargs | 支持compare的所有可选参数。 | 否 | + +**函数示例** + +创建比对脚本,例如compare_distributed.py,拷贝如下代码,具体参数请根据实际环境修改。 + +```python +from ptdbg_ascend import * +compare_distributed('./npu_dump/ptdbg_dump_v4.0/step0', './gpu_dump/ptdbg_dump_v4.0/step0', './output') +``` + +dump数据目录须指定到step级。 + +### compare + +**功能说明** + +将CPU或GPU与NPU的dump文件进行比对,仅支持单机单卡。 + +**函数原型** + +```python +compare(input_param, output_path, stack_mode=False, auto_analyze=True, fuzzy_match=False) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ------------ | ------------------------------------------------------------ | -------- | +| input_param | 配置dump数据文件及目录。配置参数包括:
- "npu_pkl_path":指定NPU dump目录下的.pkl文件。参数示例:"npu_pkl_path": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl"。必选。
- "bench_pkl_path":指定CPU、GPU或NPU dump目录下的.pkl文件。参数示例:"bench_pkl_path": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl"。必选。
- "npu_dump_data_dir":"指定NPU dump目录下的dump数据目录。参数示例:"npu_dump_data_dir": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump"。可选,仅比对pkl文件时不选。
- "bench_dump_data_dir":"指定CPU、GPU或NPU dump目录下的dump数据目录。参数示例:"npu_dump_data_dir": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump"。可选,仅比对pkl文件时不选。
- "is_print_compare_log":配置是否开启日志打屏。可取值True或False。可选。 | 是 | +| output_path | 配置比对结果csv文件存盘目录。参数示例:'./output'。文件名称基于时间戳自动生成,格式为:`compare_result_{timestamp}.csv`。 | 是 | +| stack_mode | 配置stack_mode的开关。仅当dump数据时配置debugger.configure_hook或set_dump_switch的mode="api_stack"时需要开启。参数示例:stack_mode=True,默认为False。 | 否 | +| auto_analyze | 自动精度分析,开启后工具自动针对比对结果进行分析,识别到第一个精度不达标节点(在比对结果文件中的“Accuracy Reached or Not”列显示为No),并给出问题可能产生的原因(打屏展示并生成advisor_{timestamp}.txt文件)。可取值True或False,参数示例:auto_analyze=False,默认为True。 | 否 | +| fuzzy_match | 模糊匹配。开启后,对于网络中同一层级且命名仅调用次数不同的API,可匹配并进行比对。可取值True或False,参数示例:fuzzy_match=True,默认为False。 | 否 | + +**函数示例** + +单机单卡场景下创建比对脚本,例如compare.py,拷贝如下代码,具体参数请根据实际环境修改。 + +```python +from ptdbg_ascend import * +dump_result_param={ +"npu_pkl_path": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", +"bench_pkl_path": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", +"npu_dump_data_dir": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump", +"bench_dump_data_dir": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump", +"is_print_compare_log": True +} +compare(dump_result_param, output_path="./output", stack_mode=True) +``` + +### pkl文件比对 + +若使用**compare**或**compare_distributed**函数创建的比对脚本中,input_param参数只配置了npu_pkl_path和bench_pkl_path或使用summary_only方式dump时,可以进行pkl文件的比对,此时比对dump.pkl文件中的统计值,开启后的比对结果文件生成Max diff、Min diff和Mean diff,表示NPU dump数据中API的输入或输出与标杆数据输入或输出的最大最小平均值的差。可以通过该值判断API是否存在精度问题:当某个API的输入和输出的Max diff、Min diff、Mean diff和L2norm diff均为0或无限趋于0,那么可以判断该API无精度问题,反之则可能存在精度问题。 + +**比对脚本示例** + +```python +from ptdbg_ascend import compare +dump_result_param={ +"npu_pkl_path": "./npu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", +"bench_pkl_path": "./gpu_dump/ptdbg_dump_v4.0/step0/rank0/api_stack_dump.pkl", +"is_print_compare_log": True +} +compare(dump_result_param, output_path="./output", stack_mode=True) +``` + +### parse + +**功能说明** + +解析并提取dump信息中的堆栈信息及数据统计信息。 + +**函数原型** + +```python +parse(pkl_file, moudule_name_prefix) +``` + +**参数说明** + +| 参数名 | 说明 | 是否必选 | +| ------------------- | ------------------------------------------------------------ | -------- | +| pkl_file | 指定dump数据文件中的pkl文件名。参数示例:"./npu_dump/ptdbg_dump_v4.0/step0/rank0/dump.pkl"。 | 是 | +| moudule_name_prefix | 指定待提取的API接口前缀。参数示例:"Torch_norm_1_forward"。 | 是 | + +**函数示例** + +创建堆栈信息及数据统计信息提取脚本,例如parse.py,拷贝如下代码,具体参数请根据实际环境修改。 + +```python +from ptdbg_ascend import * +parse("./npu_dump/ptdbg_dump_v4.0/step0/rank0/dump.pkl", "Torch_batch_normal_1_forward") +``` + +### 计算精度评价指标 + +PyTorch精度比对是以CPU或GPU的计算结果为标杆,计算Cosine(余弦相似度)、MaxAbsErr(最大绝对误差)和MaxRelativeErr(最大相对误差),根据这两个结果判断API在运行时是否存在精度问题。 + +计算精度评价指标: + +1. Cosine:通过计算两个向量的余弦值来判断其相似度,数值越接近于1说明计算出的两个张量越相似,实际可接受阈值为大于0.99。在计算中可能会存在nan,主要由于可能会出现其中一个向量为0。 + +2. MaxAbsErr:当最大绝对误差越接近0表示其计算的误差越小,实际可接受阈值为小于0.001。 + +3. MaxRelativeErr:当最大相对误差越接近0表示其计算的误差越小。 + + 当dump数据中存在0或Nan时,比对结果中最大相对误差则出现inf或Nan的情况,属于正常现象。 + +4. One Thousandth Err Ratio(双千分之一)、Five Thousandths Err Ratio(双千分之五)精度指标:是指NPU的Tensor中的元素逐个与对应的标杆数据对比,相对误差大于千分之一、千分之五的比例占总元素个数的比例小于千分之一、千分之五。该数据仅作为精度下降趋势的参考,并不参与计算精度是否通过的判定。 + +精度比对结果csv文件中只需要通过Accuracy Reached or Not来判断计算精度是否达标,判断标准如下: + +1. Cosine < 0.99 且 MaxAbsError > 0.001时,精度不达标,标记为“No”。 +2. Cosine < 0.9,精度不达标,标记为“No”。 +3. MaxAbsError > 1,精度不达标,标记为“No”。 +5. 其余情况下记为精度达标,标记为“Yes”。 + +## ptdbg_ascend.parse数据解析功能 + +ptdbg_ascend.parse为命令行交互式界面解析工具,提供更多的数据解析功能并且展示结果。 + +使用场景:本工具主要用于比对前后两次NPU ACL层级dump数据的一致性。 + +### 进入parse交互式界面 + +安装ptdbg_ascend工具后,可以通过使用命令 **python -m ptdbg_ascend.parse** 进入交互式界面,如下所示: + +```bash +python -m ptdbg_ascend.parse +Parse >>> +``` + +可在parse的界面中执行Shell命令,以及如下场景的相关解析命令: + +- 支持指定ACL层级算子数据比对。 +- 支持指定ACL层级算子数据转换及展示。 +- 支持交互式指定pkl文件中API对应dump数据查看。 +- 支持API进行可选层级比对和打印(统计级和像素级)。 + +Ctrl+C可以退出parse交互式界面。不退出parse交互式界面若需要执行非该界面下的内置Shell命令,且命令与parse交互式界面命令冲突时,非该界面命令需要使用run命令,在相关命令前加上run前缀,如下示例: + +```bash +python -m ptdbg_ascend.parse +Parse >>> run vim cli.py +Parse >>> vim cli.py +``` + +以上各场景详细介绍请参见下文章节。 + +### ACL层级算子数据批量转换 + +本功能会将原有待比对dump数据目录下的dump数据按照算子名和时间戳进行梳理并分类,之后再将dump数据转为为npy文件。 + +依赖:CANN包中的msaccucmp工具,需要安装Ascend-CANN-toolkit,详见《[CANN 软件安装指南](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fdocument%2Fdetail%2Fzh%2Fcanncommercial%2F700%2Fenvdeployment%2Finstg%2Finstg_0001.html)》。 + +输入以下比对命令进行数据转换。 + +```bash +cad -m my_dump_path [-out output_path] [-asc msaccucmp_path] +``` + +| 参数名称 | 说明 | 是否必选 | +| -------- | ------------------------------------------------------------ | -------- | +| -m | 待转换ACL dump数据目录。需要指定到ACL dump数据的deviceid级目录。 | 是 | +| -out | 结果输出目录,须指定已存在的目录,默认为./parse_data/acl_batch_convert。未指定时保存在默认路径下,比对结束后会打印log提示输出结果存放路径。 | 否 | +| -asc | 指定msaccucmp路径,默认路径为:/usr/local/Ascend/ascend-toolkit/latest/tools/operator_cmp/compare/msaccucmp.py。 | 否 | + +**示例** + +```bash +# 传入待比对数据目录 +Parse >>> cad -m /home/xxx/my_dump_path/20000124003856/0 +# 转换结果打印 +...... +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +# 转换前的dump文件 +│ SrcFile: /home/xxx/my_dump_path/20000124003856/0/272/TransData.trans_TransData_22.112.21.948645536672764 │ +# 转换后的npy文件 +│ - TransData.trans_TransData_22.112.21.948645536672764.output.0.npy │ +│ - TransData.trans_TransData_22.112.21.948645536672764.input.0.npy │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +...... +[INFO] The comparison result have been written to "./parse_data/acl_batch_convert". +``` + +输出结果: + +原dump数据目录: + +```bash +├── /home/xxx/my_dump_path/20000124003856/0/ +│ ├── 272 +│ │ ├── {op_type}.{op_name}.{task_id}.{stream_id}.{timestamp} +│ │ ... +│ ├── 512 +│ ... +``` + +转换后: + +```bash +├── ./parse_data/acl_batch_convert/{timestamp} +│ ├── {op_name1} +│ │ ├── {timestamp1} +│ │ | ├── {op_type}.{op_name}.{task_id}.{stream_id}.{timestamp}.{input/output}.{参数序号}.npy +│ │ | │ ... +│ │ ├── {timestamp2} +│ │ | ... +│ ├── {op_name2} +│ ├── ... +``` + +### ACL层级算子数据比对 + +本功能主要用于比对前后两次NPU ACL层级dump数据的一致性。 + +本功能支持批量比对,若需要进行批量比对,需要先将两份待比对的NPU ACL层级dump数据进行“**ACL层级算子数据批量转换**”,可以使两份数据更好的匹配;若直接进行dump数据的比对,建议只比对单个dump数据文件。 + +输入以下比对命令进行数据比对。 + +```bash +vc -m my_dump_path -g golden_dump_path [-out output_path] +``` + +| 参数名称 | 说明 | 是否必选 | +| -------- | ------------------------------------------------------------ | -------- | +| -m | 待比对ACL dump数据目录。如果比对单个算子,需要指定到ACL dump数据的model_id级目录;如果批量比对,则指定到cad转换后的timestamp级目录。 | 是 | +| -g | 标杆ACL dump数据目录。如果比对单个算子,需要指定到ACL dump数据的model_id级目录;如果批量比对,则指定到cad转换后的timestamp级目录。 | 是 | +| -out | 结果输出目录,须指定已存在的目录,默认为./parse_data/acl_batch_comapre。未指定时保存在默认路径下,比对结束后会打印log提示输出结果存放路径。 | 否 | + +输出结果:batch_compare_{timestamp}.csv文件。 + +**示例** + +```bash +# 传入待比对数据目录以及标杆数据目录 +Parse >>> vc -m ./my_dump_path -g ./golden_data_path +[INFO]Compare result is saved in : parse_data/acl_batch_comapre/batch_compare_1707271118.csv +``` + +### ACL算子数据的npy转换 + +依赖:CANN包中的msaccucmp工具,需要安装Ascend-CANN-toolkit,详见《[CANN 软件安装指南](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fdocument%2Fdetail%2Fzh%2Fcanncommercial%2F700%2Fenvdeployment%2Finstg%2Finstg_0001.html)》。 + +输入以下转换命令进行数据转换, 将ACL级别dump数据转为npy文件。 + +```bash +dc -n file_name/file_path [-f format] [-out output_path] +``` + +| 参数名称 | 说明 | 是否必选 | +| --------- | ------------------------------------------------------------ | -------- | +| -n | 需转换的dump数据文件或dump数据文件目录。 | 是 | +| -f | 开启format转换,指定该参数时需要配置format格式。当前内置的Format转换支持如下类型:
FRACTAL_NZ转换NCHW
FRACTAL_NZ转换成NHWC
FRACTAL_NZ转换ND
HWCN转换FRACTAL_Z
HWCN转换成NCHW
HWCN转换成NHWC
NC1HWC0转换成HWCN
NC1HWC0转换成NCHW
NC1HWC0转换成NHWC
NCHW转换成FRACTAL_Z
NCHW转换成NHWC
NHWC转换成FRACTAL_Z
NHWC转换成HWCN
NHWC转换成NCHW
NDC1HWC0转换成NCDHW | 否 | +| -out | 结果输出目录。 | 否 | +| -cmp_path | 指定msaccucmp路径,默认路径为:/usr/local/Ascend/ascend-toolkit/latest/tools/operator_cmp/compare/msaccucmp.py | 否 | + +[^]: 若传入单个dump文件,则转换单个文件,若传入dump文件目录则转换目录下所有dump文件。 + +- 输出结果:npy文件。 +- 若指定-out参数需要用户传入输出路径,并且路径需要已存在。 +- 若未指定输出目录, 则比对结束后将结果保存在默认目录 “./parse_data/convert_result”中,比对结束后会打印log提示输出结果存放路径及转换结果。 + +- 输入以下命令,展示npy数据统计信息。 + + ```bash + pt -n file_path + ``` + + | 参数名称 | 说明 | 是否必选 | + | -------- | ------------- | -------- | + | -n | npy文件路径。 | 是 | + + 打印统计信息:shape, dtype, max, min和mean。默认在npy文件路径下将该数据保存为txt文件。 + +**示例1** + +```bash +# 传入需转换的dump文件目录 +Parse >>> dc -n ./dump_data/ +...... +# 转换结果 +╭──────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ SrcFile: ./dump_data/ +│ - Add.fp32_vars_add_2fp32_vars_Relu_9.31.5.1636595794731103.input.0.npy │ +│ - Add.fp32_vars_add_1fp32_vars_Relu_6.24.5.1636595794631347.output.0.npy │ +│ - Add.fp32_vars_add_2fp32_vars_Relu_9.31.5.1636595794731103.input.1.npy │ +│ - Add.fp32_vars_add_1fp32_vars_Relu_6.24.5.1636595794631347.input.1.npy │ +│ - Add.fp32_vars_add_3fp32_vars_Relu_12.40.5.1636595794846124.input.1.npy │ +│ - Add.fp32_vars_add_1fp32_vars_Relu_6.24.5.1636595794631347.input.0.npy │ +│ - Add.fp32_vars_add_3fp32_vars_Relu_12.40.5.1636595794846124.input.0.npy │ +│ - Add.fp32_vars_add_2fp32_vars_Relu_9.31.5.1636595794731103.output.0.npy │ +│ - Add.fp32_vars_add_3fp32_vars_Relu_12.40.5.1636595794846124.output.0.npy │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯ +``` + +**示例2** + +```bash +# 查看某个dump数据块的数据信息 +# 默认会将数据中的tensor保存成 txt +Parse >>> pt -n ./parse_data/dump_convert/Add.fp32_vars_add_1fp32_vars_Relu_6.24.5.1636595794631347.output.0.npy +...... +# 打印统计信息 +[Shape: (1, 16, 56, 56, 16)] [Dtype: float16] [Max: 452.0] [Min: -408.5] [Mean: -3.809] +Path: ./parse_data/dump_convert/Add.fp32_vars_add_1fp32_vars_Relu_6.24.5.1636595794631347.input.0.npy +TextFile:./parse_data/dump_convert/Add.fp32_vars_add_1fp32_vars_Relu_6.24.5.1636595794631347.input.0.npy.txt +``` + +### pkl文件中指定API的dump数据信息查看 + +输入以下命令,解析并输出pkl文件中指定api的统计信息。 + +```bash +pk -f pkl_path -n api_name +``` + +| 参数名称 | 说明 | 是否必选 | +| -------- | ----------------- | -------- | +| -f | 指定pkl文件路径。 | 是 | +| -n | 指定API名称。 | 是 | + +- 输出结果:打印统计信息(shape, dtype, max和min mean)。 +- 若pkl文件中存在相应的堆栈信息,则会打印堆栈信息。 + +**示例** + +```bash +# 传入pkl文件及api名称 +Parse >>> pk -f ./torch_dump/ptdbg_v3.2/rank0/api_stack_dump.pkl -n Functional_conv2d_0_forward +...... +# 打印统计信息及堆栈(pkl文件不包含堆栈则不会打印堆栈) + +Statistic Info: + [Functional_conv2d_0_forward_input.0][dtype: torch.float32][shape: [2, 1, 2, 2]][max: 1.576936960220337][min: -0.9757485389709473][mean: 0.4961632490158081] + [Functional_conv2d_0_forward_input.1][dtype: torch.float32][shape: [2, 1, 2, 2]][max: 0.20064473152160645][min: -0.47102075815200806][mean: -0.20796933770179749] + [Functional_conv2d_0_forward_input.2][dtype: torch.float32][shape: [2]][max: 0.17380613088607788][min: -0.16853803396224976][mean: 0.0026340484619140625] + [Functional_conv2d_0_forward_output][dtype: torch.float32][shape: [2, 2, 1, 1]][max: 0.02364911139011383][min: -1.762906551361084][mean: -0.6710853576660156] +``` + +### API可选层级比对 + +输入以下命令, 进行统计级和像素级比对。 + +```bash +cn -m my_data*.npy -g gloden*.npy [-p num] [-al atol] [-rl rtol] +``` + +- 统计级比对:对tensor整体进行余弦值及相对误差的计算。 +- 像素级比对:对输入的两个npy文件进行逐元素比对。若两个tensor对应元素的相对误差或绝对误差大于**误差阈值**(-al和-rl配置)则被标记为错误数据。 + +| 参数名称 | 说明 | 是否必选 | +| -------- | ----------------------------------------------- | -------- | +| -m | 待比对数据。 | 是 | +| -g | 标杆数据。 | 是 | +| -p | 设置比对结束后打印错误元素的个数,默认值20。 | 否 | +| -al | 判定数据存在精度问题的绝对误差阈值,默认0.001。 | 否 | +| -rl | 判定数据存在精度问题的相对误差阈值,默认0.001。 | 否 | +| -s | 将npy文件保存成txt文件,用于查看,默认开启。 | 否 | + +输出结果: + +- 统计级比对结果。 +- 两个文件的统计信息(shape, dtype, max, min和mean)。 +- 错误数据打印表格。 + +**示例** + +```bash +# 对比两个tensor的数据 +Parse >>> cn -m Add.InceptionV3_InceptionV3_Mixed_7a_Branch_0_add_3.323.1619494134703053.output.0.npy -g InceptionV3_InceptionV3_Mixed_7a_Branch_0_add_3.0.1619492699305998.npy -p 10 -s -al 0.002 -rl 0.005 + Error Item Table Top Item Table +┏━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓ ┏━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Index ┃ Left ┃ Right ┃ Diff ┃ ┃ Index ┃ Left ┃ Right ┃ Diff ┃ +┡━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩ ┡━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ 155 │ 0.024600908 │ 0.022271132 │ 0.002329776 │ │ 0 │ -0.9206961 │ -0.9222216 │ 0.0015255213 │ +│ 247 │ 0.015752593 │ 0.017937578 │ 0.0021849852 │ │ 1 │ -0.6416973 │ -0.64051837 │ 0.0011789203 │ +│ 282 │ -0.0101207765 │ -0.007852031 │ 0.0022687456 │ │ 2 │ -0.35383835 │ -0.35433492 │ 0.0004965663 │ +│ 292 │ 0.019581757 │ 0.02240482 │ 0.0028230622 │ │ 3 │ -0.18851271 │ -0.18883198 │ 0.00031927228 │ +│ 640 │ -0.06593232 │ -0.06874806 │ 0.0028157383 │ │ 4 │ -0.43508735 │ -0.43534422 │ 0.00025686622 │ +│ 1420 │ 0.09293677 │ 0.09586689 │ 0.0029301196 │ │ 5 │ 1.4447614 │ 1.4466647 │ 0.0019032955 │ +│ 1462 │ -0.085207745 │ -0.088047795 │ 0.0028400496 │ │ 6 │ -0.3455438 │ -0.3444429 │ 0.0011008978 │ +│ 1891 │ -0.03433288 │ -0.036525503 │ 0.002192624 │ │ 7 │ -0.6560242 │ -0.6564579 │ 0.0004336834 │ +│ 2033 │ 0.06828873 │ 0.07139922 │ 0.0031104907 │ │ 8 │ -2.6964858 │ -2.6975214 │ 0.0010356903 │ +│ 2246 │ -0.06376442 │ -0.06121233 │ 0.002552092 │ │ 9 │ -0.73746175 │ -0.73650354 │ 0.00095820427 │ +└───────┴───────────────┴──────────────┴──────────────┘ └───────┴─────────────┴─────────────┴───────────────┘ +╭───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Left: | +│ |- NpyFile: ./dump/temp/decode/Add.InceptionV3_InceptionV3_Mixed_7a_Branch_0_add_3.323.1619494134703053.output.0.npy | +│ |- TxtFile: ./dump/temp/decode/Add.InceptionV3_InceptionV3_Mixed_7a_Branch_0_add_3.323.1619494134703053.output.0.npy.txt | +│ |- NpySpec: [Shape: (32, 8, 8, 320)] [Dtype: float32] [Max: 5.846897] [Min: -8.368301] [Mean: -0.72565556] | +│ DstFile: │ +│ |- NpyFile: ./dump/cpu/InceptionV3_InceptionV3_Mixed_7a_Branch_0_add_3.0.1619492699305998.npy | +│ |- TxtFile: ./dump/cpu/InceptionV3_InceptionV3_Mixed_7a_Branch_0_add_3.0.1619492699305998.npy.txt | +│ |- NpySpec: [Shape: (32, 8, 8, 320)] [Dtype: float32] [Max: 5.8425903] [Min: -8.374472] [Mean: -0.7256237] │ +│ NumCnt: 655360 │ +│ AllClose: False │ +│ CosSim: 0.99999493 │ +│ ErrorPer: 0.023504638671875 (rl= 0.005, al= 0.002) │ +╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +``` + +## FAQ + +[FAQ](https://gitee.com/ascend/att/blob/master/debug/accuracy_tools/ptdbg_ascend/doc/FAQ.md) diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/file_check_util.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/file_check_util.py index aaffc38c7af9acdfe486a945cbee73b145da6470..61fc4ddf94c8e295b08c395f21776ac0f05f5c61 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/file_check_util.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/file_check_util.py @@ -247,8 +247,8 @@ def _user_interactive_confirm(message): def check_path_owner_consistent(path): file_owner = os.stat(path).st_uid if file_owner != os.getuid(): - _user_interactive_confirm('The file path %s may be insecure because is does not belong to you.' - 'Do you want to continue?' % path) + print_error_log('The file path %s may be insecure because is does not belong to you.' % path) + raise FileCheckException(FileCheckException.INVALID_PERMISSION_ERROR) def check_path_pattern_vaild(path): @@ -300,13 +300,12 @@ def create_directory(dir_path): when invalid data throw exception """ dir_path = os.path.realpath(dir_path) - if not os.path.exists(dir_path): - try: - os.makedirs(dir_path, mode=FileCheckConst.DATA_DIR_AUTHORITY) - except OSError as ex: - print_error_log( - 'Failed to create {}.Please check the path permission or disk space .{}'.format(dir_path, str(ex))) - raise FileCheckException(FileCheckException.INVALID_PATH_ERROR) from ex + try: + os.makedirs(dir_path, mode=FileCheckConst.DATA_DIR_AUTHORITY, exist_ok=True) + except OSError as ex: + print_error_log( + 'Failed to create {}.Please check the path permission or disk space .{}'.format(dir_path, str(ex))) + raise FileCheckException(FileCheckException.INVALID_PATH_ERROR) from ex def change_mode(path, mode): diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/utils.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/utils.py index f732a38513e26d9f9de404d9243fe4b2274c41c4..01759056764959d8ac9bbef3b2b19f41ddb961fa 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/utils.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/common/utils.py @@ -25,6 +25,7 @@ import sys import time import zlib import json +from json.decoder import JSONDecodeError from datetime import datetime, timezone from functools import wraps from pathlib import Path @@ -103,6 +104,8 @@ class Const: FILE_NAME_LENGTH = 255 DIRECTORY_LENGTH = 4096 DISTRIBUTED_PREFIX_LENGTH = 60 + SUMMARY_COLUMN_NUM = 6 + STACK_COLUMN_NUM = 2 # env dump path ASCEND_WORK_PATH = "ASCEND_WORK_PATH" DUMP_DIR = "dump_data" @@ -396,7 +399,11 @@ def is_summary_compare(input_param): def is_md5_compare(input_parma): with FileOpen(input_parma.get("npu_pkl_path"), "r") as npu_pkl: - line = json.loads(npu_pkl.readline()) + pkl_lines = npu_pkl.readline() + try: + line = json.loads(pkl_lines) + except JSONDecodeError as err: + raise CompareException(CompareException.INVALID_FILE_ERROR) from err if len(line) < 3: return False if line[2]: @@ -698,7 +705,7 @@ def parameter_adapter(func): indices = getattr(torch._C._VariableFunctionsClass, "nonzero")(indices, as_tuple=True) return getattr(torch._C._TensorBase, "__getitem__")(input_tensor, indices) elif indices.dtype != torch.bool: - if len(indices.shape) == 1: + if not indices.shape or len(indices.shape) == 1: return func(self, input_tensor, indices.tolist()) elif len(indices.shape) == 2: result = [func(self, input_tensor, index) for index in indices.tolist()] diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/compare/acc_compare.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/compare/acc_compare.py index bb39809891611ea0b3d17e660f5841849195b1ff..68fe6c0d7ea6442bc01dfea19cbc97295e174016 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/compare/acc_compare.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/compare/acc_compare.py @@ -237,9 +237,15 @@ def merge_tensor(tensor_list): for tensor in tensor_list: if tensor[0].find("stack_info") != -1: + if len(tensor) != Const.STACK_COLUMN_NUM: + print_error_log(f"This stack_info data is not complete. {tensor}") + raise CompareException(CompareException.INVALID_DATA_ERROR) op_dict["stack_info"].append(tensor[1]) break op_dict["op_name"].append(tensor[0]) + if len(tensor) != Const.SUMMARY_COLUMN_NUM: + print_error_log(f"This summary data is not complete. {tensor}") + raise CompareException(CompareException.INVALID_DATA_ERROR) if tensor[0].find("input") != -1: op_dict["input_struct"].append((tensor[3], tensor[4], tensor[2])) elif tensor[0].find("output") != -1: diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/debugger_config.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/debugger_config.py index b08b7d661117a4341f6480d1d92cc5094f7a5d60..59d41bea94ca1fd8b3fd1289d28747bb979fe513 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/debugger_config.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/debugger_config.py @@ -7,7 +7,7 @@ class DebuggerConfig: self.dump_path = dump_path self.hook_name = hook_name self.rank = rank - self.step = step or [] + self.step = step if step is not None else [] self.check() if self.step: self.step.sort() @@ -35,4 +35,3 @@ class DebuggerConfig: for s in self.step: if not isinstance(s, int): raise ValueError(f"step element {s} should be int") - diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/precision_debugger.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/precision_debugger.py index f71a9d4c545fed7138c88eb87e91bf19d83609b1..d9bbf6f4060e29fab0164c66ccbd121f22944c31 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/precision_debugger.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/debugger/precision_debugger.py @@ -1,8 +1,9 @@ import os +from concurrent.futures import ThreadPoolExecutor import torch from ..common.utils import Const, check_switch_valid, generate_compare_script, check_is_npu, print_error_log, \ CompareException, print_warn_log -from ..dump.dump import DumpUtil, acc_cmp_dump, write_to_disk, get_pkl_file_path, reset_module_count, GLOBAL_THREAD_POOL +from ..dump.dump import DumpUtil, acc_cmp_dump, write_to_disk, get_pkl_file_path, reset_module_count from ..dump.utils import set_dump_path, set_dump_switch_print_info, generate_dump_path_str, \ set_dump_switch_config, set_backward_input from ..overflow_check.utils import OverFlowUtil @@ -32,7 +33,6 @@ class PrecisionDebugger: err_msg = "You must provide hook_name argument to PrecisionDebugger\ when config is not provided." raise Exception(err_msg) - step = step or [] self.config = DebuggerConfig(dump_path, hook_name, rank, step) self.configure_hook = self.get_configure_hook(self.config.hook_name) self.configure_hook() @@ -58,9 +58,9 @@ class PrecisionDebugger: if mode == "acl" and self.model is not None: print_error_log("Init dump does not support ACL dump mode.") raise CompareException(CompareException.INVALID_DUMP_MODE) - scope = scope or [] - api_list = api_list or [] - backward_input = backward_input or [] + scope = scope if scope is not None else [] + api_list = api_list if api_list is not None else [] + backward_input = backward_input if backward_input is not None else [] if summary_only: if summary_mode is not None: @@ -108,6 +108,7 @@ class PrecisionDebugger: register_hook_core(instance.hook_func, instance.model) instance.first_start = False DumpUtil.dump_switch = "ON" + DumpUtil.dump_thread_pool = ThreadPoolExecutor() OverFlowUtil.overflow_check_switch = "ON" dump_path_str = generate_dump_path_str() set_dump_switch_print_info("ON", DumpUtil.dump_switch_mode, dump_path_str) @@ -130,8 +131,8 @@ class PrecisionDebugger: dump_path_str = generate_dump_path_str() set_dump_switch_print_info("OFF", DumpUtil.dump_switch_mode, dump_path_str) write_to_disk() - if DumpUtil.is_single_rank: - GLOBAL_THREAD_POOL.shutdown(wait=True) + if DumpUtil.is_single_rank and DumpUtil.dump_thread_pool: + DumpUtil.dump_thread_pool.shutdown(wait=True) if check_is_npu() and DumpUtil.dump_switch_mode in [Const.ALL, Const.API_STACK, Const.LIST, Const.RANGE, Const.API_LIST]: generate_compare_script(DumpUtil.dump_data_dir, get_pkl_file_path(), DumpUtil.dump_switch_mode) diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/dump.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/dump.py index a6b769ff2a41955aa6363f08d086a091335bf5f1..2e49a9743b6a317ee401b2b8f0b31fb2ea68c07a 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/dump.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/dump.py @@ -47,7 +47,6 @@ pkl_name = "" rank = os.getpid() + 100000 multi_output_apis = ["_sort_", "npu_flash_attention"] module_count = {} -GLOBAL_THREAD_POOL = ThreadPoolExecutor() class APIList(list): @@ -151,14 +150,15 @@ def dump_tensor(x, prefix, dump_step): if x.is_meta: print_info_log(f"Meta tensor {prefix} is skipped.") return - if x.numel() == 0 or len(x.shape) == 0 or not x.is_floating_point(): + x_clone = x.clone().detach() + if x_clone.numel() == 0 or len(x_clone.shape) == 0 or not x_clone.is_floating_point(): if DumpUtil.dump_filter_switch == Const.OFF: - data_info = get_not_float_tensor_info(x) + data_info = get_not_float_tensor_info(x_clone) dump_data_by_rank_count(dump_step, prefix, data_info) else: return else: - data_info = get_float_tensor_info(x) + data_info = get_float_tensor_info(x_clone) dump_data_by_rank_count(dump_step, prefix, data_info) elif DumpUtil.dump_filter_switch == Const.OFF: @@ -186,12 +186,12 @@ def dump_data(prefix, data_info): def thread_dump_data(prefix, data_info): - GLOBAL_THREAD_POOL.submit(dump_data, prefix, data_info) + DumpUtil.dump_thread_pool.submit(dump_data, prefix, data_info) def dump_data_by_rank_count(dump_step, prefix, data_info): print_info_log(f"ptdbg is analyzing rank{rank} api: {prefix}" + " " * 10, end='\r') - if DumpUtil.is_single_rank: + if DumpUtil.is_single_rank and DumpUtil.dump_thread_pool: thread_dump_data(prefix, data_info) else: dump_data(prefix, data_info) @@ -338,6 +338,7 @@ def forward_acl_dump(module, module_name): global backward_init_status if not forward_init_status and not backward_init_status: forward_init_status = True + torch_npu.npu.synchronize() torch_npu.npu.init_dump() torch_npu.npu.set_dump(DumpUtil.dump_config) torch_npu.npu.synchronize() @@ -347,6 +348,7 @@ def forward_acl_dump(module, module_name): module.forward(*module.input_args, **module.input_kwargs) torch_npu.npu.synchronize() torch_npu.npu.finalize_dump() + torch_npu.npu.synchronize() del module.input_args del module.input_kwargs forward_init_status = False @@ -404,7 +406,12 @@ def module_count_func(name, name_template): module_count[module_name][-1].append(module_count[module_name][0]) index = module_count[module_name][0] else: - index = module_count[module_name][-1].pop() + backward_stack = module_count[module_name][-1] if module_name in module_count else [] + if not backward_stack: + print_warn_log("The backward stack of {} is empty.".format(module_name)) + index = "abnormal" + else: + index = backward_stack.pop() return index diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/utils.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/utils.py index bb0ae82c4110d568e31cd21a261f5875548c8672..bea18501aefaba4916cba7ad3b7e455d0c29033c 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/utils.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/dump/utils.py @@ -87,6 +87,7 @@ class DumpUtil(object): need_replicate = False summary_mode = "all" is_single_rank = None + dump_thread_pool = None @staticmethod diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/hook_module/wrap_distributed.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/hook_module/wrap_distributed.py index ed28dbe5d6712a76e5d0fc249a24911cf3e50b63..48e92faa1b9294c7905be86e391fa54a1f7b153f 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/hook_module/wrap_distributed.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/hook_module/wrap_distributed.py @@ -16,7 +16,7 @@ """ import os - +from functools import wraps import torch.distributed as dist import yaml @@ -60,9 +60,11 @@ class DistributedOPTemplate(HOOKModule): def wrap_distributed_op(op_name, hook): + @wraps(DistributedOPTemplate) def distributed_op_template(*args, **kwargs): return DistributedOPTemplate(op_name, hook)(*args, **kwargs) + distributed_op_template.__name__ = op_name return distributed_op_template diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/online_dispatch/utils.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/online_dispatch/utils.py index 57f9258feb50a8f609ae2ee207824afcf02a4cdb..82fc275da3b53fd4796a2e7b9cd74d5f5dd9df1a 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/online_dispatch/utils.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/online_dispatch/utils.py @@ -113,6 +113,8 @@ def data_to_cpu(data, deep, data_cpu): elif isinstance(data, torch._C.device): return cpu_device else: + if deep == 0: + data_cpu.append(data) return data diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/overflow_check/info_dump.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/overflow_check/info_dump.py index 31a207a3f80a9e1b090c8c78bb55c8d1c95392c9..ab83c3f3e560a3a036629368f7a9f9c2371000c7 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/overflow_check/info_dump.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/overflow_check/info_dump.py @@ -8,7 +8,7 @@ import numpy as np import torch from ..common.utils import print_error_log, get_time -from ..common.file_check_util import FileOpen +from ..common.file_check_util import FileOpen, FileCheckConst, change_mode special_torch_object = ["memory_format"] @@ -210,6 +210,7 @@ def write_json(file_path, data, indent=None): if not os.path.exists(file_path): with FileOpen(file_path, 'w') as f: f.write("{\n}") + change_mode(file_path, FileCheckConst.DATA_FILE_AUTHORITY) lock.acquire() with FileOpen(file_path, 'a+') as f: fcntl.flock(f, fcntl.LOCK_EX) diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/parse_tool/lib/utils.py b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/parse_tool/lib/utils.py index aa69a4780c96d942853696d4a4cc63f2447d393f..fffe022e622dac941a0de254adb871c9e6c4eaf2 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/parse_tool/lib/utils.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/ptdbg_ascend/parse_tool/lib/utils.py @@ -31,6 +31,7 @@ from ...common.file_check_util import change_mode, check_other_user_writable,\ check_path_executable, check_path_owner_consistent from ...common.file_check_util import FileCheckConst from ...common.file_check_util import FileOpen +from ...common.utils import check_file_or_directory_path try: from rich.traceback import install @@ -297,8 +298,7 @@ class Util: def check_npy_files_valid_in_dir(self, dir_path): for file_name in os.listdir(dir_path): file_path = os.path.join(dir_path, file_name) - if not self.check_path_valid(file_path): - return False + check_file_or_directory_path(file_path) _, file_extension = os.path.splitext(file_path) if not file_extension == '.npy': return False @@ -310,9 +310,14 @@ class Util: return md5_hash.hexdigest() def write_csv(self, data, filepath): + need_change_mode = False + if not os.path.exists(filepath): + need_change_mode = True with FileOpen(filepath, 'a') as f: writer = csv.writer(f) writer.writerows(data) + if need_change_mode: + change_mode(filepath, FileCheckConst.DATA_FILE_AUTHORITY) def deal_with_dir_or_file_inconsistency(self, output_path): if os.path.exists(output_path): diff --git a/debug/accuracy_tools/ptdbg_ascend/src/python/setup.py b/debug/accuracy_tools/ptdbg_ascend/src/python/setup.py index b357d97c986b2e167341b480fe394fc9656a73ea..fb5b8ff0007ca9e25b99720c0a30d6c5f01975cf 100644 --- a/debug/accuracy_tools/ptdbg_ascend/src/python/setup.py +++ b/debug/accuracy_tools/ptdbg_ascend/src/python/setup.py @@ -20,7 +20,7 @@ import stat from pathlib import Path import setuptools -VERSION = '5.0.T3' +VERSION = '5.0.T4' def generate_ptdbg_ascend_version(): diff --git a/debug/accuracy_tools/setup.py b/debug/accuracy_tools/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..886d230906476909b7e88eade5424e8d20aa883a --- /dev/null +++ b/debug/accuracy_tools/setup.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +# Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from setuptools import setup, find_packages + +setup( + name='ascend_training_accuracy_tools', + version='0.0.1', + description='This is a pytorch precision comparison tools', + long_description='This is a pytorch precision comparison tools, include ptdbg and api accuracy checker', + packages=find_packages(), + install_requires=[ + "wheel", + "numpy", + "pandas >= 1.3.5", + "pyyaml", + "rich", + "tqdm" + ], + include_package_data=True, + ext_modules=[], + zip_safe=False, + entry_points={ + 'console_scripts' : ['atat=atat.atat:main'], + },) \ No newline at end of file