From b9a0ae17970e5799113dd6275a3cb2ef080026f9 Mon Sep 17 00:00:00 2001 From: zcr1997110 <1412618721@qq.com> Date: Mon, 24 Jun 2024 15:03:25 +0800 Subject: [PATCH 1/3] update ads to mxDriving update 2.1.0 to 6.0.RC2 --- .../built-in/autonoumous_driving/BEVFormer/README.md | 2 +- .../mmdet3d_plugin/bevformer/modules/decoder.py | 4 ++-- .../bevformer/modules/spatial_cross_attention.py | 4 ++-- .../bevformer/modules/temporal_self_attention.py | 4 ++-- .../built-in/autonoumous_driving/OpenPCDet/README.md | 6 +++--- .../pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py | 4 ++-- .../built-in/autonoumous_driving/SurroundOcc/README.md | 2 +- .../surroundocc/modules/spatial_cross_attention.py | 2 +- PyTorch/built-in/autonoumous_driving/UniAD/README.md | 2 +- .../UniAD/mmcv_need/multi_scale_deform_attn.py | 4 ++-- .../motion_head_plugin/motion_deformable_attn.py | 4 ++-- .../projects/mmdet3d_plugin/uniad/modules/decoder.py | 4 ++-- .../uniad/modules/spatial_cross_attention.py | 4 ++-- .../uniad/modules/temporal_self_attention.py | 4 ++-- .../cv/detection/GroundingDINO_for_Pytorch/README.md | 10 +++++----- .../groundingdino_npu/adapter_groundingdino.py | 4 ++-- .../groundingdino_npu/finetune_refcoco.sh | 4 ++-- .../groundingdino_npu/inference.sh | 4 ++-- .../autonoumous_driving/BEVDet_for_PyTorch/README.md | 6 +++--- .../mmdet3d/models/necks/view_transformer.py | 2 +- .../TPVFormer_for_PyTorch/README.md | 6 +++--- .../tpvformer04/modules/cross_view_hybrid_attention.py | 4 ++-- .../tpvformer04/modules/image_cross_attention.py | 4 ++-- .../tpvformer10/modules/cross_view_hybrid_attention.py | 4 ++-- .../tpvformer10/modules/image_cross_attention.py | 4 ++-- 25 files changed, 51 insertions(+), 51 deletions(-) diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/README.md b/PyTorch/built-in/autonoumous_driving/BEVFormer/README.md index 538f630904..fad2b53bb0 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/README.md +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/README.md @@ -74,7 +74,7 @@ BEVFormer 通过提取环视相机采集到的图像特征,并将提取的环 ``` pip install -r requirements.txt ``` - 6. 安装ADS加速库,并将环境变量添加至 test/env_npu.sh 文件中 + 6. 安装mxDriving加速库,并将环境变量添加至 test/env_npu.sh 文件中 ## 准备数据集 diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py index b6ae75e9f3..edd4a12c93 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py @@ -23,7 +23,7 @@ import math from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) -import ads.common +import mx_driving.common def inverse_sigmoid(x, eps=1e-5): @@ -319,7 +319,7 @@ class CustomMSDeformableAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py index d55b8c9066..d3fefb8cac 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py @@ -20,7 +20,7 @@ import math from mmcv.runner import force_fp32 from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from projects.mmdet3d_plugin.models.utils.bricks import run_time -import ads.common +import mx_driving.common bev_mask_global = torch.tensor([]).npu() indexes_global = None @@ -387,7 +387,7 @@ class MSDeformableAttention3D(BaseModule): if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py index 75c6216d2a..731702f61c 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py @@ -17,7 +17,7 @@ import math from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) -import ads.common +import mx_driving.common @ATTENTION.register_module() @@ -236,7 +236,7 @@ class TemporalSelfAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md b/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md index 40af6f9ded..681fb93ad9 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md @@ -137,12 +137,12 @@ cd ../ && pip install pytorch_scatter/dist/torch_scatter-*.whl python -c "import torch_scatter" ``` -#### 2.5 编译安装ads -参考ADS官方gitee仓README安装编译构建并安装ADS包:[参考链接](https://gitee.com/ascend/ads) +#### 2.5 编译安装mxDriving +参考mxDriving官方gitee仓README安装编译构建并安装mxDriving包:[参考链接](https://gitee.com/ascend/mxDriving) 【注意】安装完毕后建议运行以下命令,如无报错,证明安装无误,可继续安装流程 ```shell -python -c "import ads" +python -c "import mx_driving" ``` #### 2.6 编译安装OpenPCDet diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py index 94f541d562..eef47a691f 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py @@ -37,8 +37,8 @@ def points_in_boxes_gpu(points, boxes): assert boxes.shape[2] == 7 and points.shape[2] == 3 batch_size, num_points, _ = points.shape - import ads.common - box_idxs_of_pts = ads.common.npu_points_in_box(boxes.contiguous().npu(), points.contiguous().npu()) + import mx_driving.common + box_idxs_of_pts = mx_driving.common.npu_points_in_box(boxes.contiguous().npu(), points.contiguous().npu()) return box_idxs_of_pts diff --git a/PyTorch/built-in/autonoumous_driving/SurroundOcc/README.md b/PyTorch/built-in/autonoumous_driving/SurroundOcc/README.md index 9f5f8f2c7e..669ead7ee1 100644 --- a/PyTorch/built-in/autonoumous_driving/SurroundOcc/README.md +++ b/PyTorch/built-in/autonoumous_driving/SurroundOcc/README.md @@ -110,7 +110,7 @@ bash replace_patch.sh --packages_path=location_path ``` -- 安装ADS加速库,安装方法参考[原仓](https://gitee.com/ascend/ads),安装后手动source环境变量或将其配置在test/env_npu.sh中。 +- 安装mxDriving加速库,安装方法参考[原仓](https://gitee.com/ascend/mxDriving),安装后手动source环境变量或将其配置在test/env_npu.sh中。 ### 准备数据集 diff --git a/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py b/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py index 92eda9d074..8bd0ee3539 100644 --- a/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py +++ b/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py @@ -40,7 +40,7 @@ from projects.mmdet3d_plugin.models.utils.bricks import run_time ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) import pdb -from ads.common import npu_multi_scale_deformable_attn_function +from mx_driving.common import npu_multi_scale_deformable_attn_function @ATTENTION.register_module() class SpatialCrossAttention(BaseModule): diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/README.md b/PyTorch/built-in/autonoumous_driving/UniAD/README.md index d0ebfd89c1..1f8226f7bf 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/README.md +++ b/PyTorch/built-in/autonoumous_driving/UniAD/README.md @@ -122,7 +122,7 @@ pip install -r requirements.txt ``` -- 安装ADS加速库,安装方法参考[原仓](https://gitee.com/ascend/ads),安装后手动source环境变量或将其配置在test/env_npu.sh中。 +- 安装mxDriving加速库,安装方法参考[原仓](https://gitee.com/ascend/mxDriving),安装后手动source环境变量或将其配置在test/env_npu.sh中。 ### 准备数据集 diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py b/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py index 9141bf6ba6..dfa111e74b 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py @@ -16,7 +16,7 @@ from mmcv.cnn.bricks.registry import ATTENTION from mmcv.runner import BaseModule from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE, IS_NPU_AVAILABLE from ..utils import ext_loader -import ads.common +import mx_driving.common ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -365,7 +365,7 @@ class MultiScaleDeformableAttention(BaseModule): if ((IS_CUDA_AVAILABLE and value.is_cuda) or (IS_MLU_AVAILABLE and value.is_mlu) or (IS_NPU_AVAILABLE and value.device.type == 'npu')): - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py index 4835d2ade0..b07883f4a0 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py @@ -19,7 +19,7 @@ from mmcv.cnn.bricks.drop import build_dropout from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import ConfigDict, deprecated_api_warning from projects.mmdet3d_plugin.uniad.modules.multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32 -import ads.common +import mx_driving.common @TRANSFORMER_LAYER.register_module() @@ -454,7 +454,7 @@ class MotionDeformableAttention(BaseModule): f' 2 or 4, but get {reference_trajs.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py index fe2206da76..b95eb0acbb 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py @@ -26,7 +26,7 @@ from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, from mmcv.utils import ext_loader from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ MultiScaleDeformableAttnFunction_fp16 -import ads.common +import mx_driving.common ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -325,7 +325,7 @@ class CustomMSDeformableAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py index 0aa8d2fc52..5714fa2db8 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py @@ -23,7 +23,7 @@ from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import ext_loader from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ MultiScaleDeformableAttnFunction_fp16 -import ads.common +import mx_driving.common ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -384,7 +384,7 @@ class MSDeformableAttention3D(BaseModule): # if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py index 65a69adba0..5a6b997947 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py @@ -17,7 +17,7 @@ from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) from mmcv.utils import ext_loader -import ads.common +import mx_driving.common ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -238,7 +238,7 @@ class TemporalSelfAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = ads.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: diff --git a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/README.md b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/README.md index 16df9643e2..2c26c2662a 100644 --- a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/README.md +++ b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/README.md @@ -66,7 +66,7 @@ Grounding DINO是一个开放集目标检测模型,可以根据输入文本检 | CANN | 8.0.RC1 | | 昇腾NPU固件 | 24.1.RC1 | | 昇腾NPU驱动 | 24.1.RC1 | -| ADS | v2.1.0 | +| Mx_Driving | 6.0.RC2 | ### 安装模型环境 @@ -88,8 +88,8 @@ Grounding DINO是一个开放集目标检测模型,可以根据输入文本检 pip install torch-2.1.0-cp38-cp38m-linux_aarch64.whl pip install torch_npu-2.1.0.XXX-cp38-cp38m-linux_aarch64.whl - # 安装 ads-accelerator - pip install ads_accelerator-*-cp38-cp38-linux_aarch64.whl + # 安装 mx_driving-accelerator + pip install mx_driving_accelerator-*-cp38-cp38-linux_aarch64.whl # 修改 ascend-toolkit 路径 source /usr/local/Ascend/ascend-toolkit/set_env.sh @@ -249,8 +249,8 @@ python tools/dataset_converters/refcoco2odvg.py refcoco/mdetr_annotations # 修改 Python 路径 PYTHON_PATH="Python Env Path" - export ADS_PYTHON_PATH=${PYTHON_PATH}/lib/python3.8 - export ASCEND_CUSTOM_OPP_PATH=${ADS_PYTHON_PATH}/site-packages/ads/packages/vendors/customize + export Mx_Driving_PYTHON_PATH=${PYTHON_PATH}/lib/python3.8 + export ASCEND_CUSTOM_OPP_PATH=${Mx_Driving_PYTHON_PATH}/site-packages/mx_driving/packages/vendors/customize export LD_LIBRARY_PATH=${ASCEND_CUSTOM_OPP_PATH}/op_api/lib/:$LD_LIBRARY_PATH python groundingdino_npu/image_demo_npu.py \ demo/demo.jpg \ diff --git a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py index 41e9a7c3c1..d47910cc74 100644 --- a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py +++ b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py @@ -24,7 +24,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch from mmdet.models.detectors.deformable_detr import DeformableDETR from mmdet.models.layers.transformer.deformable_detr_layers import DeformableDetrTransformerEncoder -import ads.common +import mx_driving.common @no_type_check @@ -128,7 +128,7 @@ def msda_forward(self, f'Last dim of reference_points must be' f' 2 or 4, but get {reference_points.shape[-1]} instead.') - output = ads.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.common.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, diff --git a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/finetune_refcoco.sh b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/finetune_refcoco.sh index 91f2fbce48..b69f707025 100644 --- a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/finetune_refcoco.sh +++ b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/finetune_refcoco.sh @@ -2,8 +2,8 @@ source groundingdino_npu/env_npu.sh PYTHON_PATH="Python Env Path" -export ADS_PYTHON_PATH=${PYTHON_PATH}/lib/python3.8 -export ASCEND_CUSTOM_OPP_PATH=${ADS_PYTHON_PATH}/site-packages/ads/packages/vendors/customize +export Mx_Driving_PYTHON_PATH=${PYTHON_PATH}/lib/python3.8 +export ASCEND_CUSTOM_OPP_PATH=${Mx_Driving_PYTHON_PATH}/site-packages/mx_driving/packages/vendors/customize export LD_LIBRARY_PATH=${ASCEND_CUSTOM_OPP_PATH}/op_api/lib/:$LD_LIBRARY_PATH export HCCL_CONNECT_TIMEOUT=1200 export COMBINED_ENABLE=1 diff --git a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/inference.sh b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/inference.sh index 6ce6022b90..51992f4480 100644 --- a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/inference.sh +++ b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/inference.sh @@ -1,8 +1,8 @@ #!/bin/bash source groundingdino_npu/env_npu.sh PYTHON_PATH="Python Env Path" -export ADS_PYTHON_PATH=${PYTHON_PATH}/lib/python3.8 -export ASCEND_CUSTOM_OPP_PATH=${ADS_PYTHON_PATH}/site-packages/ads/packages/vendors/customize +export Mx_Driving_PYTHON_PATH=${PYTHON_PATH}/lib/python3.8 +export ASCEND_CUSTOM_OPP_PATH=${Mx_Driving_PYTHON_PATH}/site-packages/mx_driving/packages/vendors/customize export LD_LIBRARY_PATH=${ASCEND_CUSTOM_OPP_PATH}/op_api/lib/:$LD_LIBRARY_PATH python groundingdino_npu/image_demo_npu.py \ diff --git a/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/README.md b/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/README.md index 6551d21b74..6f81423ed3 100644 --- a/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/README.md +++ b/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/README.md @@ -61,14 +61,14 @@ | 三方库 | 支持版本 | |:--------------:|:------:| | PyTorch | 2.1 | - | ADS-Accelerator | latest | + | Mx_Driving-Accelerator | latest | | mmcv | 1.x | | mmdet | 2.28.2 | | mmsegmentation | 0.30.0 | -- 安装ADS-Accelerator +- 安装Mx_Driving-Accelerator - 请参考昇腾[ads](https://gitee.com/ascend/ads)代码仓说明编译安装ADS-Accelerator + 请参考昇腾[mxDriving](https://gitee.com/ascend/mxDriving)代码仓说明编译安装Mx_Driving-Accelerator - 安装基础依赖 diff --git a/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py b/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py index b8905f38f8..175ef1b1f0 100644 --- a/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py +++ b/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py @@ -21,7 +21,7 @@ from mmcv.runner import BaseModule, force_fp32 from torch.cuda.amp.autocast_mode import autocast from torch.utils.checkpoint import checkpoint -from ads.perception.fused import bev_pool_v2 +from mx_driving.perception.fused import bev_pool_v2 from mmdet.models.backbones.resnet import BasicBlock from ..builder import NECKS diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/README.md b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/README.md index 02273ef0d6..6b817fc68e 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/README.md +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/README.md @@ -61,14 +61,14 @@ | 三方库 | 支持版本 | |:--------------:|:------:| | PyTorch | 2.1 | - | ADS-Accelerator | latest | + | Mx_Driving-Accelerator | latest | | mmcv | 1.x | | mmdet | 2.28.2 | | mmsegmentation | 0.30.0 | -- 安装ADS-Accelerator +- 安装Mx_Driving-Accelerator - 请参考昇腾[ads](https://gitee.com/ascend/ads)代码仓说明编译安装ADS-Accelerator + 请参考昇腾[mxDriving](https://gitee.com/ascend/mxDriving)代码仓说明编译安装Mx_Driving-Accelerator - 安装基础依赖 diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py index b68dd22ff0..49f8d5c72f 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py @@ -17,7 +17,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import warnings import torch import torch_npu -import ads.common +import mx_driving.common import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -229,7 +229,7 @@ class TPVCrossViewHybridAttention(BaseModule): f'Last dim of reference_points must be' f' 2 or 4, but get {reference_points.shape[-1]} instead.') - output = ads.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.common.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) # output shape (bs*num_tpv_queue, num_query, embed_dims) diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py index fc4e3da27f..9f2902bc94 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py @@ -16,7 +16,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import warnings import torch import torch_npu -import ads.common +import mx_driving.common import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -442,7 +442,7 @@ class TPVMSDeformableAttention3D(BaseModule): # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points - output = ads.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.common.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) output = self.reshape_output(output, query_lens) diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py index 9652e077f5..3cb2a51510 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py @@ -17,7 +17,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import math import torch import torch_npu -import ads.common +import mx_driving.common import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -202,7 +202,7 @@ class TPVCrossViewHybridAttention(BaseModule): f' 2, but get {reference_points.shape[-1]} instead.') - output = ads.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.common.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) outputs = self.reshape_output(output, query_lens) diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py index e497d70c60..a78d14568b 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py @@ -16,7 +16,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import warnings import torch import torch_npu -import ads.common +import mx_driving.common import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -424,7 +424,7 @@ class TPVMSDeformableAttention3D(BaseModule): # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points - output = ads.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.common.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) output = self.reshape_output(output, query_lens) -- Gitee From c3152eef8802d3e56083e8cffd117ad991be5adb Mon Sep 17 00:00:00 2001 From: zhangchenrui Date: Thu, 15 Aug 2024 10:06:14 +0800 Subject: [PATCH 2/3] add npu adaption updates of CenterPoint2D --- .../map_to_bev/pointpillar_scatter.py | 16 +++++++++---- .../models/backbones_3d/vfe/pillar_vfe.py | 23 +++++++++++-------- .../tools/test/train_centerpoint_full_8p.sh | 3 +++ .../test/train_centerpoint_performance_8p.sh | 3 +++ 4 files changed, 31 insertions(+), 14 deletions(-) diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py index c57cda8677..ceb5779085 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py @@ -9,22 +9,28 @@ class PointPillarScatter(nn.Module): self.model_cfg = model_cfg self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES self.nx, self.ny, self.nz = grid_size + self.nums = self.nz * self.nx * self.ny assert self.nz == 1 def forward(self, batch_dict, **kwargs): pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords'] + coords_dim1_length = coords.shape[1] + coords_0, coords_1, coords_2, coords_3, _ = torch.split(coords, [1, 1, 1, 1, coords_dim1_length-4], dim=1) + coords_0 = coords_0.squeeze(1) + coords_1 = coords_1.squeeze(1) + coords_2 = coords_2.squeeze(1) + coords_3 = coords_3.squeeze(1) batch_spatial_features = [] - batch_size = coords[:, 0].max().int().item() + 1 + batch_size = coords_0.max().int().item() + 1 for batch_idx in range(batch_size): spatial_feature = torch.zeros( self.num_bev_features, - self.nz * self.nx * self.ny, + self.nums, dtype=pillar_features.dtype, device=pillar_features.device) - batch_mask = coords[:, 0] == batch_idx - this_coords = coords[batch_mask, :] - indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3] + batch_mask = coords_0 == batch_idx + indices = coords_1[batch_mask] + coords_2[batch_mask] * self.nx + coords_3[batch_mask] indices = indices.type(torch.long) pillars = pillar_features[batch_mask, :] pillars = pillars.t() diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_3d/vfe/pillar_vfe.py b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_3d/vfe/pillar_vfe.py index a162a83e8e..c84215f293 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_3d/vfe/pillar_vfe.py +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/models/backbones_3d/vfe/pillar_vfe.py @@ -39,7 +39,7 @@ class PFNLayer(nn.Module): x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x torch.backends.cudnn.enabled = True x = F.relu(x) - x_max = torch.max(x, dim=1, keepdim=True)[0] + x_max = torch.max(x.transpose(0,1), dim=0)[0].unsqueeze(0).transpose(0,1) if self.last_vfe: return x_max @@ -94,13 +94,18 @@ class PillarVFE(VFETemplate): def forward(self, batch_dict, **kwargs): voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords'] - points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1) - f_cluster = voxel_features[:, :, :3] - points_mean - - f_center = torch.zeros_like(voxel_features[:, :, :3]) - f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset) - f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset) - f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset) + voxel_features_3 = voxel_features[:, :, :3] + voxel_features_dim2_length = voxel_features.shape[2] + voxel_features_0, voxel_features_1, voxel_features_2, _ = torch.split(voxel_features, [1, 1, 1, voxel_features_dim2_length-3], dim=2) + coords_dim1_length = coords.shape[1] + coords_0, coords_1, coords_2, coords_3, _ = torch.split(coords, [1, 1, 1, 1, coords_dim1_length-4], dim=1) + points_mean = voxel_features_3.sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1) + f_cluster = voxel_features_3 - points_mean + + f_center = torch.zeros_like(voxel_features_3) + f_center[:, :, 0] = voxel_features_0.squeeze(2) - (coords_3.to(voxel_features.dtype) * self.voxel_x + self.x_offset) + f_center[:, :, 1] = voxel_features_1.squeeze(2) - (coords_2.to(voxel_features.dtype) * self.voxel_y + self.y_offset) + f_center[:, :, 2] = voxel_features_2.squeeze(2) - (coords_1.to(voxel_features.dtype) * self.voxel_z + self.z_offset) if self.use_absolute_xyz: features = [voxel_features, f_cluster, f_center] @@ -108,7 +113,7 @@ class PillarVFE(VFETemplate): features = [voxel_features[..., 3:], f_cluster, f_center] if self.with_distance: - points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True) + points_dist = torch.norm(voxel_features_3, 2, 2, keepdim=True) features.append(points_dist) features = torch.cat(features, dim=-1) diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_full_8p.sh b/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_full_8p.sh index 39f897f571..7e1d3c8c3d 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_full_8p.sh +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_full_8p.sh @@ -17,6 +17,9 @@ fi export RANK_SIZE=8 RANK_ID_START=0 +#绑核 +export CPU_AFFINITY_CONF=1 + #基础参数,需要模型审视修改 #网络名称,同目录名称 Network="CenterPoint" diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_performance_8p.sh b/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_performance_8p.sh index ddab756d69..5ce973a68c 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_performance_8p.sh +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/tools/test/train_centerpoint_performance_8p.sh @@ -17,6 +17,9 @@ fi export RANK_SIZE=8 RANK_ID_START=0 +#绑核 +export CPU_AFFINITY_CONF=1 + #基础参数,需要模型审视修改 #网络名称,同目录名称 Network="CenterPoint" -- Gitee From 92409e8072f5026425b131aa7e3f8adca257bb97 Mon Sep 17 00:00:00 2001 From: zhangchenrui Date: Tue, 27 Aug 2024 15:40:48 +0800 Subject: [PATCH 3/3] update CenterPoint2d README --- PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md b/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md index 3456cf43b0..3cbeca26c8 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/README.md @@ -209,15 +209,15 @@ python setup.py develop 训练精度结果展示表 | Exp | mATE | mASE | mAOE | mAVE | mAAE | mAP | NDS | | - | - | - | - | - | - | - | - | -| 8p-竞品A | 32.59 | 26.35 | 44.26 | 24.50 | 19.30 | 49.46 | 60.03 | -| 8p-Atlas 800T A2 | 32.50 | 26.34 | 45.05 | 24.23 | 19.39 | 49.57 | 60.03 | +| 8p-竞品A | 32.59 | 26.35 | 44.26 | 24.50 | 19.30 | 49.20 | 59.78 | +| 8p-Atlas 800T A2 | 32.50 | 26.34 | 45.05 | 24.23 | 19.39 | 50.06 | 60.45 | #### 性能 训练性能结果展示表 | Exp | FPS | | - | - | -| 8p-竞品A | 7.251 | -| 8p-Atlas 800T A2| 7.533 | +| 8p-竞品A | 10.714 | +| 8p-Atlas 800T A2| 8.270 | ## PointPillar -- Gitee