From edfee1a55cc31c3369c226075baf7259e60f806e Mon Sep 17 00:00:00 2001 From: lilinsiman Date: Fri, 6 Sep 2024 11:30:41 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E6=9B=B4=E6=96=B0mx=5Fdriving=E7=AE=97?= =?UTF-8?q?=E5=AD=90=E8=B0=83=E7=94=A8=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../autonoumous_driving/BEVFormer/mmdet_need/resnet.py | 3 ++- .../projects/mmdet3d_plugin/bevformer/modules/decoder.py | 4 ++-- .../bevformer/modules/spatial_cross_attention.py | 4 ++-- .../bevformer/modules/temporal_self_attention.py | 4 ++-- .../pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py | 4 ++-- .../surroundocc/modules/spatial_cross_attention.py | 2 +- .../UniAD/mmcv_need/multi_scale_deform_attn.py | 4 ++-- .../dense_heads/motion_head_plugin/motion_deformable_attn.py | 4 ++-- .../UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py | 4 ++-- .../mmdet3d_plugin/uniad/modules/spatial_cross_attention.py | 4 ++-- .../mmdet3d_plugin/uniad/modules/temporal_self_attention.py | 4 ++-- .../groundingdino_npu/adapter_groundingdino.py | 4 ++-- .../tpvformer04/modules/cross_view_hybrid_attention.py | 4 ++-- .../tpvformer04/modules/image_cross_attention.py | 4 ++-- .../tpvformer10/modules/cross_view_hybrid_attention.py | 4 ++-- .../tpvformer10/modules/image_cross_attention.py | 4 ++-- 16 files changed, 31 insertions(+), 30 deletions(-) diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/mmdet_need/resnet.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/mmdet_need/resnet.py index e324c0048d..ac4b2bd1a4 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/mmdet_need/resnet.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/mmdet_need/resnet.py @@ -8,6 +8,7 @@ from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm import mx_driving.common +import mx_driving.fused import torch import torch_npu @@ -637,7 +638,7 @@ class ResNet(BaseModule): x = self.conv1(x) x = self.norm1(x) x = self.relu(x) - x = mx_driving.common.npu_max_pool2d(x, 3, 2, 1) + x = mx_driving.fused.npu_max_pool2d(x, 3, 2, 1) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py index edd4a12c93..c76b7b79c3 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/decoder.py @@ -23,7 +23,7 @@ import math from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) -import mx_driving.common +import mx_driving.fused def inverse_sigmoid(x, eps=1e-5): @@ -319,7 +319,7 @@ class CustomMSDeformableAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py index c64425191b..e7d06cb956 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py @@ -20,7 +20,7 @@ import math from mmcv.runner import force_fp32 from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from projects.mmdet3d_plugin.models.utils.bricks import run_time -import mx_driving.common +import mx_driving.fused bev_mask_global = torch.tensor([]).npu() indexes_global = None @@ -390,7 +390,7 @@ class MSDeformableAttention3D(BaseModule): if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py index 731702f61c..51e31847dd 100644 --- a/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py +++ b/PyTorch/built-in/autonoumous_driving/BEVFormer/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py @@ -17,7 +17,7 @@ import math from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) -import mx_driving.common +import mx_driving.fused @ATTENTION.register_module() @@ -236,7 +236,7 @@ class TemporalSelfAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: diff --git a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py index eef47a691f..b552cf1a59 100644 --- a/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py +++ b/PyTorch/built-in/autonoumous_driving/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py @@ -37,8 +37,8 @@ def points_in_boxes_gpu(points, boxes): assert boxes.shape[2] == 7 and points.shape[2] == 3 batch_size, num_points, _ = points.shape - import mx_driving.common - box_idxs_of_pts = mx_driving.common.npu_points_in_box(boxes.contiguous().npu(), points.contiguous().npu()) + import mx_driving.data + box_idxs_of_pts = mx_driving.data.npu_points_in_box(boxes.contiguous().npu(), points.contiguous().npu()) return box_idxs_of_pts diff --git a/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py b/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py index 8bd0ee3539..78396b6841 100644 --- a/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py +++ b/PyTorch/built-in/autonoumous_driving/SurroundOcc/projects/mmdet3d_plugin/surroundocc/modules/spatial_cross_attention.py @@ -40,7 +40,7 @@ from projects.mmdet3d_plugin.models.utils.bricks import run_time ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) import pdb -from mx_driving.common import npu_multi_scale_deformable_attn_function +from mx_driving.fused import npu_multi_scale_deformable_attn_function @ATTENTION.register_module() class SpatialCrossAttention(BaseModule): diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py b/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py index dfa111e74b..243d0efb38 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/mmcv_need/multi_scale_deform_attn.py @@ -16,7 +16,7 @@ from mmcv.cnn.bricks.registry import ATTENTION from mmcv.runner import BaseModule from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE, IS_NPU_AVAILABLE from ..utils import ext_loader -import mx_driving.common +import mx_driving.fused ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -365,7 +365,7 @@ class MultiScaleDeformableAttention(BaseModule): if ((IS_CUDA_AVAILABLE and value.is_cuda) or (IS_MLU_AVAILABLE and value.is_mlu) or (IS_NPU_AVAILABLE and value.device.type == 'npu')): - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py index b07883f4a0..5efadfc5bc 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/dense_heads/motion_head_plugin/motion_deformable_attn.py @@ -19,7 +19,7 @@ from mmcv.cnn.bricks.drop import build_dropout from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import ConfigDict, deprecated_api_warning from projects.mmdet3d_plugin.uniad.modules.multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32 -import mx_driving.common +import mx_driving.fused @TRANSFORMER_LAYER.register_module() @@ -454,7 +454,7 @@ class MotionDeformableAttention(BaseModule): f' 2 or 4, but get {reference_trajs.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py index b95eb0acbb..8af5c01f8f 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/decoder.py @@ -26,7 +26,7 @@ from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, from mmcv.utils import ext_loader from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ MultiScaleDeformableAttnFunction_fp16 -import mx_driving.common +import mx_driving.fused ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -325,7 +325,7 @@ class CustomMSDeformableAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py index 5714fa2db8..4eb3d1ec5f 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/spatial_cross_attention.py @@ -23,7 +23,7 @@ from mmcv.runner.base_module import BaseModule, ModuleList, Sequential from mmcv.utils import ext_loader from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ MultiScaleDeformableAttnFunction_fp16 -import mx_driving.common +import mx_driving.fused ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -384,7 +384,7 @@ class MSDeformableAttention3D(BaseModule): # if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: output = multi_scale_deformable_attn_pytorch( diff --git a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py index 5a6b997947..44b63fec1b 100644 --- a/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py +++ b/PyTorch/built-in/autonoumous_driving/UniAD/projects/mmdet3d_plugin/uniad/modules/temporal_self_attention.py @@ -17,7 +17,7 @@ from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) from mmcv.utils import ext_loader -import mx_driving.common +import mx_driving.fused ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @@ -238,7 +238,7 @@ class TemporalSelfAttention(BaseModule): f' 2 or 4, but get {reference_points.shape[-1]} instead.') if torch.cuda.is_available() and value.is_cuda: - output = mx_driving.common.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, + output = mx_driving.fused.npu_multi_scale_deformable_attn_function(value, spatial_shapes, level_start_index, sampling_locations, attention_weights) else: diff --git a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py index d47910cc74..9ea849cc71 100644 --- a/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py +++ b/PyTorch/built-in/cv/detection/GroundingDINO_for_Pytorch/groundingdino_npu/adapter_groundingdino.py @@ -24,7 +24,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch from mmdet.models.detectors.deformable_detr import DeformableDETR from mmdet.models.layers.transformer.deformable_detr_layers import DeformableDetrTransformerEncoder -import mx_driving.common +import mx_driving.fused @no_type_check @@ -128,7 +128,7 @@ def msda_forward(self, f'Last dim of reference_points must be' f' 2 or 4, but get {reference_points.shape[-1]} instead.') - output = mx_driving.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.fused.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py index 49f8d5c72f..b8d0820ede 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/cross_view_hybrid_attention.py @@ -17,7 +17,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import warnings import torch import torch_npu -import mx_driving.common +import mx_driving.fused import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -229,7 +229,7 @@ class TPVCrossViewHybridAttention(BaseModule): f'Last dim of reference_points must be' f' 2 or 4, but get {reference_points.shape[-1]} instead.') - output = mx_driving.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.fused.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) # output shape (bs*num_tpv_queue, num_query, embed_dims) diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py index 9f2902bc94..099f32e577 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer04/modules/image_cross_attention.py @@ -16,7 +16,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import warnings import torch import torch_npu -import mx_driving.common +import mx_driving.fused import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -442,7 +442,7 @@ class TPVMSDeformableAttention3D(BaseModule): # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points - output = mx_driving.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.fused.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) output = self.reshape_output(output, query_lens) diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py index 3cb2a51510..75c4b162b1 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/cross_view_hybrid_attention.py @@ -17,7 +17,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import math import torch import torch_npu -import mx_driving.common +import mx_driving.fused import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -202,7 +202,7 @@ class TPVCrossViewHybridAttention(BaseModule): f' 2, but get {reference_points.shape[-1]} instead.') - output = mx_driving.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.fused.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) outputs = self.reshape_output(output, query_lens) diff --git a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py index a78d14568b..d621530c50 100644 --- a/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py +++ b/PyTorch/contrib/autonoumous_driving/TPVFormer_for_PyTorch/tpvformer10/modules/image_cross_attention.py @@ -16,7 +16,7 @@ from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch import warnings import torch import torch_npu -import mx_driving.common +import mx_driving.fused import torch.nn as nn from mmcv.cnn import xavier_init, constant_init from mmcv.cnn.bricks.registry import ATTENTION @@ -424,7 +424,7 @@ class TPVMSDeformableAttention3D(BaseModule): # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points - output = mx_driving.common.npu_multi_scale_deformable_attn_function( + output = mx_driving.fused.npu_multi_scale_deformable_attn_function( value, spatial_shapes, level_start_index, sampling_locations, attention_weights) output = self.reshape_output(output, query_lens) -- Gitee From 67007630d07ce787c8be0439adb93a5cfe70f5a3 Mon Sep 17 00:00:00 2001 From: lilinsiman Date: Fri, 6 Sep 2024 14:50:53 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E6=9B=B4=E6=96=B0mx=5Fdriving=E7=AE=97?= =?UTF-8?q?=E5=AD=90=E8=B0=83=E7=94=A8=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py b/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py index 175ef1b1f0..8a7fd76eac 100644 --- a/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py +++ b/PyTorch/contrib/autonoumous_driving/BEVDet_for_PyTorch/mmdet3d/models/necks/view_transformer.py @@ -21,7 +21,7 @@ from mmcv.runner import BaseModule, force_fp32 from torch.cuda.amp.autocast_mode import autocast from torch.utils.checkpoint import checkpoint -from mx_driving.perception.fused import bev_pool_v2 +from mx_driving.point import bev_pool_v2 from mmdet.models.backbones.resnet import BasicBlock from ..builder import NECKS -- Gitee