From 5e120e5f2c16f26247b6bcbf537efa086f519d64 Mon Sep 17 00:00:00 2001 From: zhttjd Date: Tue, 17 Jun 2025 16:38:52 +0800 Subject: [PATCH] Add DiffusionDrive Model --- README.md | 1 + model_examples/DiffusionDrive/README.md | 216 +++++++ .../DiffusionDrive/migrate_to_ascend/patch.py | 596 ++++++++++++++++++ .../migrate_to_ascend/preprocess.sh | 18 + .../migrate_to_ascend/requirements.txt | 19 + .../DiffusionDrive/migrate_to_ascend/test.py | 336 ++++++++++ .../DiffusionDrive/migrate_to_ascend/train.py | 329 ++++++++++ .../migrate_to_ascend/train_8p_full.sh | 71 +++ .../migrate_to_ascend/train_8p_performance.sh | 65 ++ 9 files changed, 1651 insertions(+) create mode 100644 model_examples/DiffusionDrive/README.md create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/patch.py create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/preprocess.sh create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/requirements.txt create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/test.py create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/train.py create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/train_8p_full.sh create mode 100644 model_examples/DiffusionDrive/migrate_to_ascend/train_8p_performance.sh diff --git a/README.md b/README.md index 3808eb5e..1aa62468 100644 --- a/README.md +++ b/README.md @@ -163,6 +163,7 @@ out, argmax = scatter_max(updates, indices, out) | MagicDriveDiT | https://gitee.com/ascend/DrivingSDK/tree/master/model_examples/MagicDriveDiT |N| | SparseDrive | https://gitee.com/ascend/DrivingSDK/tree/master/model_examples/SparseDrive |N| | Diffusion-Planner | https://gitee.com/ascend/DrivingSDK/tree/master/model_examples/Diffusion-Planner |N| +| DiffusionDrive | https://gitee.com/ascend/DrivingSDK/tree/master/model_examples/DiffusionDrive |N| # 支持的产品型号 - Atlas A2 训练系列产品 diff --git a/model_examples/DiffusionDrive/README.md b/model_examples/DiffusionDrive/README.md new file mode 100644 index 00000000..5794f713 --- /dev/null +++ b/model_examples/DiffusionDrive/README.md @@ -0,0 +1,216 @@ +# DiffusionDrive + +# 目录 + +# 简介 + +## 模型介绍 + +**DiffusionDrive**是一种基于**截断扩散策略**的端到端自动驾驶模型,其核心通过**锚定高斯分布**重构多模态轨迹生成范式,显著提升了模型的推理效率与决策多样性。模型利用**级联扩散解码器**深度融合场景感知特征,仅需2步去噪即可生成物理合理的驾驶轨迹,在保证实时性的同时解决了传统扩散模型的计算瓶颈与模式重叠问题 + +## 支持任务列表 + +| 模型 | 任务列表 | 是否支持 | +| -------------- | -------- | ------------------ | +| DiffusionDrive | 训练 | :heavy_check_mark: | + +## 代码实现 + +* 参考的官方实现: + + ``` + url=https://github.com/hustvl/DiffusionDrive/tree/nusc + commit_id=ae54fd87b32b3762f20e63ffd0af91d343cade85 + ``` + +* 适配昇腾AI处理器的实现 + + ``` + url=https://gitee.com/ascend/DrivingSDK.git + code_path=model_examples/DiffusionDrive + ``` + + + +# DiffusionDrive(在研版本) + +# 准备训练环境 + +## 安装昇腾环境 + +请参考昇腾社区中《[Pytorch框架训练环境准备](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fdocument%2Fdetail%2Fzh%2FModelZoo%2Fpytorchframework%2Fptes)》文档搭建昇腾环境,本仓已支持表1中软件版本。 + +**表 1** 昇腾软件版本支持表 + +| 软件类型 | 支持版本 | +| ------------------ | -------- | +| FrameworkPTAdapter | 7.0.0 | +| CANN | 8.1.RC1 | + +## 安装模型环境 + +当前模型支持的 PyTorch 版本和已知三方库依赖如下表所示。 + +**表 2** 版本支持表 + +| 三方库 | 支持版本 | +| ----------- | -------- | +| PyTorch | 2.1.0 | +| Driving SDK | 7.0.RC1 | +| mmcv | 1.x | +| mmdet | 2.28.2 | + +- 安装Driving SDK:请参考昇腾[Driving SDK](https://gitee.com/ascend/DrivingSDK)代码仓说明编译安装Driving SDK,在完成README安装步骤后,应当完成了以下包的安装: + + - CANN包 + - torch_npu包 + - 根目录下requirements.txt里列出的依赖 + - 源码编译并安装了的drivingsdk包 + +- 源码安装geos + + ```bash + git clone https://github.com/libgeos/geos.git + cd geos + mkdir build + cd build + cmake ../ + make + DRIVING_ENV_PATH=`pip3 show mx_driving | grep "Location" | awk -F "Location: " '{print $2}' | awk -F "python" '{print $1}'` + cp lib/libgeos* ${DRIVING_ENV_PATH} + cd .. + ``` + +- 源码安装mmcv + + ``` + git clone -b 1.x https://github.com/open-mmlab/mmcv.git + cd mmcv + MMCV_WITH_OPS=1 FORCE_NPU=1 python setup.py install + ``` + +- 克隆模型官方仓 + + ```bash + # 仅以克隆至当前目录举例,实际路径选择无影响 + git clone https://github.com/hustvl/DiffusionDrive + cd DiffusionDrive + git checkout ae54fd87b32b3762f20e63ffd0af91d343cade85 + ``` + +- 拷贝迁移昇腾的补丁至已克隆本地的官方仓内 + + ``` + cp -r ../migrate_to_ascend ./ + ``` + +- 安装模型依赖与迁移依赖:一并打包进了`migrate_to_ascend`目录下的`requirements.txt` + + ```bash + cd migrate_to_ascend + pip install -r requirements.txt + cd .. + ``` + +# 准备数据集与权重 + +## Nuscenes数据集 + +用户自行获取*nuscenes*数据集,在源码目录创建软连接`data/nuscenes`指向解压后的nuscenes数据目录 +结构如下: + +```shell +DiffusionDrive/ +├── data/ +│ └── nuscenes/ # 主数据集目录 +│ ├── can_bus/ # 车辆总线信号数据 +│ ├── lidarseg/ # 激光雷达点云语义分割 +│ ├── maps/ # 高精地图 +│ ├── nuscenes_gt_database/ # 真值数据 +│ ├── samples/ # 关键帧传感器数据 +│ ├── sweeps/ # 非关键帧连续数据 +│ ├── v1.0-test/ # 测试集元数据 +│ └── v1.0-trainval/ # 训练验证集元数据 +``` + + +```bash +mkdir -p ./data/nuscenes +``` + +创建软链接: + +```bash +ln -s [path/to/nuscenes] ./data/nuscenes +``` + +例: + +``` +export DATA_PATH=[path/to/nuscenes] +ln -s $DATA_PATH/can_bus/ ./data/nuscenes/can_bus +ln -s $DATA_PATH/lidarseg/ ./data/nuscenes/lidarseg +ln -s $DATA_PATH/maps/ ./data/nuscenes/maps +ln -s $DATA_PATH/nuscenes_gt_database/ ./data/nuscenes/nuscenes_gt_database +ln -s $DATA_PATH/samples/ ./data/nuscenes/samples +ln -s $DATA_PATH/sweeps/ ./data/nuscenes/sweeps +ln -s $DATA_PATH/v1.0-test ./data/nuscenes/v1.0-test +ln -s $DATA_PATH/v1.0-trainval ./data/nuscenes/v1.0-trainval +``` + +## 数据集预处理 + +运行数据预处理脚本生成DiffusionDrive模型训练需要的pkl文件与初始锚框 + +```bash +sh ./migrate_to_ascened/preprocess.sh +``` + +## 下载权重 + +```bash +mkdir ckpts +cd ./ckpts/ +wget https://download.pytorch.org/models/resnet50-19c8e357.pth +wget https://github.com/swc-17/SparseDrive/releases/download/v1.0/sparsedrive_stage1.pth +cd .. +``` + + + +# 快速开始 + +## 训练模型 + +```bash +bash migrate_to_ascend/train_8p_full.sh +``` + +## 验证性能 + +```bash +bash migrate_to_ascend/train_8p_performance.sh +``` + + + +## 训练结果 + +**表 3** 训练结果展示表 + +| 芯片 | 卡数 | global batch size | FPS | 平均step耗时(s) | L2 | +| ------------- | ---- | ----------------- | ----- | --------------- | ------ | +| 竞品A | 8p | 48 | 37.06 | 1.295 | 0.5934 | +| Atlas 800T A2 | 8p | 48 | 29.46 | 1.629 | 0.5856 | + + + +# 版本说明 + +## 变更 + +2025.06.16:首次发布。 + +## FAQ + +暂无。 \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/patch.py b/model_examples/DiffusionDrive/migrate_to_ascend/patch.py new file mode 100644 index 00000000..ebdd1cfc --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/patch.py @@ -0,0 +1,596 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# Copyright (c) OpenMMLab. All rights reserved. +import importlib +import os +import sys +import types +from types import ModuleType +from typing import Dict +import importlib + +import mmcv +import mmcv.runner +import torch +import torch_npu + +import mx_driving +from mx_driving import deformable_aggregation +from mx_driving.patcher import PatcherBuilder, Patch +from mx_driving.patcher import index, batch_matmul, numpy_type, ddp, stream, ddp_forward +from mx_driving.patcher import resnet_add_relu, resnet_maxpool + + +def flash_attn(attention: ModuleType, options: Dict): + _in_projection_packed = attention._in_projection_packed + auto_fp16 = attention.auto_fp16 + rearrange = attention.rearrange + + # pylint: disable=too-many-arguments,huawei-too-many-arguments + @auto_fp16(apply_to=('q', 'k', 'v'), out_fp32=True) + def FlashAttention_forward(self, q, k, v, causal=False, key_padding_mask=None): + """Implements the multihead softmax attention. + Arguments + --------- + q: The tensor containing the query. (B, T, H, D) + kv: The tensor containing the key, and value. (B, S, 2, H, D) + key_padding_mask: a bool tensor of shape (B, S) + """ + + if key_padding_mask is None: + if self.softmax_scale: + scale = self.softmax_scale + else: + scale = (q.shape[-1]) ** (-0.5) + + dropout_p = self.dropout_p if self.training else 0.0 + h = q.shape[-2] + output = torch_npu.npu_fusion_attention(q, k, v, h, + input_layout="BSND", + pre_tockens=65536, + next_tockens=65536, + atten_mask=None, + scale=scale, + keep_prob=1. - dropout_p, + sync=False, + inner_precise=0)[0] + else: + pass + return output, None + + def FlashMHA_forward(self, q, k, v, key_padding_mask=None): + """x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) + key_padding_mask: bool tensor of shape (batch, seqlen) + """ + q, k, v = _in_projection_packed(q, k, v, self.in_proj_weight, self.in_proj_bias) + q = rearrange(q, 'b s (h d) -> b s h d', h=self.num_heads) + k = rearrange(k, 'b s (h d) -> b s h d', h=self.num_heads) + v = rearrange(v, 'b s (h d) -> b s h d', h=self.num_heads) + + context, attn_weights = self.inner_attn(q, k, v, key_padding_mask=key_padding_mask, causal=self.causal) + return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights + + if hasattr(attention, "FlashAttention"): + attention.FlashAttention.forward = FlashAttention_forward + + if hasattr(attention, "FlashMHA"): + attention.FlashMHA.forward = FlashMHA_forward + + +def cpu2npu(models: ModuleType, options: Dict): + + def DFA_get_weights(self, instance_feature, anchor_embed, metas=None): + bs, num_anchor = instance_feature.shape[:2] + feature = instance_feature + anchor_embed + if self.camera_encoder is not None: + camera_embed = self.camera_encoder( + metas["projection_mat"][:, :, :3].reshape( + bs, self.num_cams, -1 + ) + ) + feature = feature[:, :, None] + camera_embed[:, None] + + weights = ( + self.weights_fc(feature) + .reshape(bs, num_anchor, -1, self.num_groups) + .softmax(dim=-2) + .reshape( + bs, + num_anchor, + self.num_cams, + self.num_levels, + self.num_pts, + self.num_groups, + ) + ) + if self.training and self.attn_drop > 0: + mask = torch.rand((bs, num_anchor, self.num_cams, 1, self.num_pts, 1), + device=weights.device, dtype=weights.dtype) + weights = ((mask > self.attn_drop) * weights) / ( + 1 - self.attn_drop + ) + return weights + + if hasattr(models, "DeformableFeatureAggregation"): + models.DeformableFeatureAggregation._get_weights = DFA_get_weights + + +def detection_losses(losses: ModuleType, options: Dict): + SIN_YAW, COS_YAW = losses.SIN_YAW, losses.COS_YAW + CNS, YNS = losses.CNS, losses.YNS + X, Y, Z = losses.X, losses.Y, losses.Z + + # pylint: disable=too-many-arguments,huawei-too-many-arguments + def losses_forward( + self, + box, + box_target, + weight=None, + avg_factor=None, + prefix="", + suffix="", + quality=None, + cls_target=None, + **kwargs, + ): + # Some categories do not distinguish between positive and negative + # directions. For example, barrier in nuScenes dataset. + if self.cls_allow_reverse is not None and cls_target is not None: + if_reverse = ( + torch.nn.functional.cosine_similarity( + box_target[..., [SIN_YAW, COS_YAW]], + box[..., [SIN_YAW, COS_YAW]], + dim=-1, + ) + < 0 + ) + if_reverse = ( + torch.isin( + cls_target, cls_target.new_tensor(self.cls_allow_reverse) + ) + & if_reverse + ) + box_target[..., [SIN_YAW, COS_YAW]] = torch.where( + if_reverse[..., None], + -box_target[..., [SIN_YAW, COS_YAW]], + box_target[..., [SIN_YAW, COS_YAW]], + ) + + output = {} + box_loss = self.loss_box( + box, box_target, weight=weight, avg_factor=avg_factor + ) + output[f"{prefix}loss_box{suffix}"] = box_loss + + if quality is not None: + cns = quality[..., CNS] + yns = quality[..., YNS].sigmoid() + cns_target = torch.norm( + box_target[..., [X, Y, Z]] - box[..., [X, Y, Z]], p=2, dim=-1 + ) + cns_target = torch.exp(-cns_target) + cns_loss = self.loss_cns(cns, cns_target.detach(), avg_factor=avg_factor) + output[f"{prefix}loss_cns{suffix}"] = cns_loss + + yns_target = ( + torch.nn.functional.cosine_similarity( + box_target[..., [SIN_YAW, COS_YAW]], + box[..., [SIN_YAW, COS_YAW]], + dim=-1, + ) + > 0 + ) + yns_target = yns_target.float() + yns_loss = self.loss_yns(yns, yns_target, avg_factor=avg_factor) + output[f"{prefix}loss_yns{suffix}"] = yns_loss + return output + + if hasattr(losses, "SparseBox3DLoss"): + losses.SparseBox3DLoss.forward = losses_forward + + +def detection_target(target: ModuleType, options: Dict): + X, Y, Z = target.X, target.Y, target.Z + W, L, H = target.W, target.L, target.H + YAW = target.YAW + + def encode_reg_target(self, box_target, device=None): + sizes = [box.shape[0] for box in box_target] + boxes = torch.cat(box_target, dim=0) + output = torch.cat( + [ + boxes[..., [X, Y, Z]], + boxes[..., [W, L, H]].log(), + torch.sin(boxes[..., YAW]).unsqueeze(-1), + torch.cos(boxes[..., YAW]).unsqueeze(-1), + boxes[..., YAW + 1:], + ], + dim=-1, + ) + if device is not None: + output = output.to(device=device) + outputs = torch.split(output, sizes, dim=0) + return outputs + + def _cls_cost(self, cls_pred, cls_target): + bs = cls_pred.shape[0] + cls_pred = cls_pred.sigmoid() + neg_cost = ( + -(1 - cls_pred + self.eps).log() + * (1 - self.alpha) + * cls_pred.pow(self.gamma) + ) + pos_cost = ( + -(cls_pred + self.eps).log() + * self.alpha + * (1 - cls_pred).pow(self.gamma) + ) + cost = (pos_cost - neg_cost) * self.cls_weight + costs = [] + for i in range(bs): + if len(cls_target[i]) > 0: + costs.append( + cost[i, :, cls_target[i]] + ) + else: + costs.append(None) + return costs + + def _box_cost(self, box_pred, box_target, instance_reg_weights): + bs = box_pred.shape[0] + cost = [] + weights = box_pred.new_tensor(self.reg_weights) + for i in range(bs): + if len(box_target[i]) > 0: + cost.append( + torch.sum( + torch.abs(box_pred[i, :, None] - box_target[i][None]) + * instance_reg_weights[i][None] + * weights, + dim=-1, + ) + * self.box_weight + ) + else: + cost.append(None) + return cost + + if hasattr(target, "SparseBox3DTarget"): + target.SparseBox3DTarget._cls_cost = _cls_cost + + if hasattr(target, "SparseBox3DTarget"): + target.SparseBox3DTarget._box_cost = _box_cost + + if hasattr(target, "SparseBox3DTarget"): + target.SparseBox3DTarget.encode_reg_target = encode_reg_target + + +def map_target(target: ModuleType, options: Dict): + SparsePoint3DTarget = target.SparsePoint3DTarget + build_assigner = target.build_assigner + + def __init__( + self, + assigner=None, + num_dn_groups=0, + dn_noise_scale=0.5, + max_dn_gt=32, + add_neg_dn=True, + num_temp_dn_groups=0, + num_cls=3, + num_sample=20, + roi_size=(30, 60), + ): + super(SparsePoint3DTarget, self).__init__( + num_dn_groups, num_temp_dn_groups + ) + self.assigner = build_assigner(assigner) + self.dn_noise_scale = dn_noise_scale + self.max_dn_gt = max_dn_gt + self.add_neg_dn = add_neg_dn + + self.num_cls = num_cls + self.num_sample = num_sample + self.roi_size = roi_size + self.origin = -torch.tensor([self.roi_size[0] / 2, self.roi_size[1] / 2]).npu() + self.norm = torch.tensor([self.roi_size[0], self.roi_size[1]]).npu() + 1e-5 + + def normalize_line(self, line): + if line.shape[0] == 0: + return line + + line = line.view(line.shape[:-1] + (self.num_sample, -1)) + line = line - self.origin + # transform from range [0, 1] to (0, 1) + line = line / self.norm + line = line.flatten(-2, -1) + + return line + + if hasattr(target, "SparsePoint3DTarget"): + target.SparsePoint3DTarget.__init__ = __init__ + + if hasattr(target, "SparsePoint3DTarget"): + target.SparsePoint3DTarget.normalize_line = normalize_line + + +def motion_planning_target(target: ModuleType, options: Dict): + get_cls_target = target.get_cls_target + + # pylint: disable=too-many-return-values + def motion_sample( + self, + reg_pred, + gt_reg_target, + gt_reg_mask, + motion_loss_cache, + ): + bs, num_anchor, mode, ts, d = reg_pred.shape + reg_target = reg_pred.new_zeros((bs, num_anchor, ts, d)) + reg_weight = reg_pred.new_zeros((bs, num_anchor, ts)) + indices = motion_loss_cache['indices'] + num_pos = reg_pred.new_tensor([0]) + for i, (pred_idx, target_idx) in enumerate(indices): + if len(gt_reg_target[i]) == 0: + continue + reg_target[i, pred_idx] = gt_reg_target[i][target_idx] + reg_weight[i, pred_idx] = gt_reg_mask[i][target_idx] + num_pos += len(pred_idx) + + cls_target = get_cls_target(reg_pred, reg_target, reg_weight) + cls_weight = reg_weight.any(dim=-1) + best_reg = torch.gather(reg_pred, 2, cls_target[..., None, None, None].repeat(1, 1, 1, ts, d)).squeeze(2) + + return cls_target, cls_weight, best_reg, reg_target, reg_weight, num_pos + + # pylint: disable=too-many-arguments,huawei-too-many-arguments,too-many-return-values + def planning_sample( + self, + cls_pred, + reg_pred, + gt_reg_target, + gt_reg_mask, + data, + ): + gt_reg_target = gt_reg_target.unsqueeze(1) + gt_reg_mask = gt_reg_mask.unsqueeze(1) + + bs = reg_pred.shape[0] + bs_indices = torch.arange(bs, device=reg_pred.device) + cmd = data['gt_ego_fut_cmd'].argmax(dim=-1) + + cls_pred = cls_pred.reshape(bs, 3, 1, self.ego_fut_mode) + reg_pred = reg_pred.reshape(bs, 3, 1, self.ego_fut_mode, self.ego_fut_ts, 2) + cls_pred = cls_pred[bs_indices, cmd] + reg_pred = reg_pred[bs_indices, cmd] + cls_target = get_cls_target(reg_pred, gt_reg_target, gt_reg_mask) + cls_weight = gt_reg_mask.any(dim=-1) + best_reg = torch.gather(reg_pred, 2, cls_target[..., None, None, None].repeat(1, 1, 1, self.ego_fut_ts, 2)).squeeze(2) + + return cls_pred, cls_target, cls_weight, best_reg, gt_reg_target, gt_reg_mask + + if hasattr(target, "MotionTarget"): + target.MotionTarget.sample = motion_sample + + if hasattr(target, "PlanningTarget"): + target.PlanningTarget.sample = planning_sample + + +def get_hccl_init_dist(runner: ModuleType): + module = importlib.import_module(runner) + + if hasattr(module, "dist_utils"): + mp = module.dist_utils.mp + _init_dist_pytorch = module.dist_utils._init_dist_pytorch + _init_dist_mpi = module.dist_utils._init_dist_mpi + _init_dist_slurm = module.dist_utils._init_dist_slurm + + def hccl_init_dist(launcher: str, backend: str = 'nccl', **kwargs) -> None: + backend = 'hccl' + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + if launcher == 'pytorch': + _init_dist_pytorch(backend, **kwargs) + elif launcher == 'mpi': + _init_dist_mpi(backend, **kwargs) + elif launcher == 'slurm': + _init_dist_slurm(backend, **kwargs) + else: + raise ValueError(f'Invalid launcher type: {launcher}') + + return hccl_init_dist + + return None + + +def run_ddp_forward(parallel: ModuleType, options: Dict): + + def _run_ddp_forward(self, *inputs, **kwargs): + module_to_run = self.module + + if self.device_ids: + inputs, kwargs = self.to_kwargs( # type: ignore + inputs, kwargs, self.device_ids[0]) + return module_to_run(*inputs[0], **kwargs[0]) # type: ignore + else: + return module_to_run(*inputs, **kwargs) + + if hasattr(parallel, "MMDistributedDataParallel"): + parallel.MMDistributedDataParallel._run_ddp_forward = _run_ddp_forward + + +def instance_queue(queue: ModuleType, options: Dict): + + def prepare_motion( + self, + det_output, + mask, + ): + instance_feature = det_output["instance_feature"] + det_anchors = det_output["prediction"][-1] + + if self.period is None: + self.period = instance_feature.new_zeros(instance_feature.shape[:2]).long() + else: + instance_id = det_output['instance_id'] + prev_instance_id = self.prev_instance_id + match = instance_id[..., None] == prev_instance_id[:, None] + if self.tracking_threshold > 0: + temp_mask = self.prev_confidence > self.tracking_threshold + match = match * temp_mask.unsqueeze(1) + + # pylint: disable=consider-using-enumerate + for i in range(len(self.instance_feature_queue)): + temp_feature = self.instance_feature_queue[i] + temp_feature = torch.matmul(match.type_as(temp_feature), temp_feature) + self.instance_feature_queue[i] = temp_feature + + temp_anchor = self.anchor_queue[i] + temp_anchor = torch.matmul(match.type_as(temp_anchor), temp_anchor) + self.anchor_queue[i] = temp_anchor + + self.period = ( + match * self.period[:, None] + ).sum(dim=2) + + self.instance_feature_queue.append(instance_feature.detach()) + self.anchor_queue.append(det_anchors.detach()) + self.period += 1 + + if len(self.instance_feature_queue) > self.queue_length: + self.instance_feature_queue.pop(0) + self.anchor_queue.pop(0) + self.period = torch.clip(self.period, 0, self.queue_length) + + if hasattr(queue, "InstanceQueue"): + queue.InstanceQueue.prepare_motion = prepare_motion + + +def generate_patcher_builder(performance=False): + patcher_builder = ( + PatcherBuilder() + .add_module_patch("torch", Patch(index), Patch(batch_matmul)) + .add_module_patch("numpy", Patch(numpy_type)) + .add_module_patch("mmcv.parallel", Patch(ddp), Patch(stream), Patch(ddp_forward), Patch(run_ddp_forward)) + .add_module_patch("mmdet.models.backbones.resnet", Patch(resnet_add_relu), Patch(resnet_maxpool)) + + .add_module_patch("projects.mmdet3d_plugin.models.attention", Patch(flash_attn)) + .add_module_patch("projects.mmdet3d_plugin.models.detection3d.losses", Patch(detection_losses)) + .add_module_patch("projects.mmdet3d_plugin.models.blocks", Patch(cpu2npu)) + .add_module_patch("projects.mmdet3d_plugin.models.detection3d.target", Patch(detection_target)) + .add_module_patch("projects.mmdet3d_plugin.models.map.target", Patch(map_target)) + + .add_module_patch("projects.mmdet3d_plugin.models.motion.target", Patch(motion_planning_target)) + .add_module_patch("projects.mmdet3d_plugin.models.motion.instance_queue", Patch(instance_queue)) + + #.with_profiling('./profiling/level2', 2) + ) + if performance: + patcher_builder.brake_at(1000) + return patcher_builder + + +# pylint: disable=huawei-redefined-outer-name, lambda-assign +def block_gpu_flash_attention_dependency(): + ''' + In /projects/mmdet3d_plugin/models/attention.py + the following lines + -try: + - from flash_attn.flash_attn_interface import flash_attn_unpadded_kvpacked_func + - print('Use flash_attn_unpadded_kvpacked_func') + -except: + - from flash_attn.flash_attn_interface import flash_attn_varlen_kvpacked_func as flash_attn_unpadded_kvpacked_func + - print('Use flash_attn_varlen_kvpacked_func') + -from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis + will attempt to import flash_attn which is an external dependency implemented for GPU + + The migration to ascend will patch a NPU version of flash_attn at + -.add_module_patch("projects.mmdet3d_plugin.models.attention", Patch(flash_attn)) + The patch will replace FlashAttention.forward, where flash_attn_unpadded_kvpacked_func, unpad_input ... are used, as a whole + + Still, the imports inside /projects/mmdet3d_plugin/models/attention.py will raise import error when GPU flash_attn is + not installed, to avoid import error, here the function uses sys.module to register flash_attn module name before the + actual import takes place in order to pretend the flash_attn being imported already, avoiding import errors + ''' + + flash_attn = types.ModuleType('flash_attn') + + flash_attn_interface = types.ModuleType('flash_attn.flash_attn_interface') + flash_attn_interface.flash_attn_unpadded_kvpacked_func = lambda *args, **kwargs: None + flash_attn_interface.flash_attn_varlen_kvpacked_func = lambda *args, **kwargs: None + + bert_padding = types.ModuleType('flash_attn.bert_padding') + bert_padding.unpad_input = lambda *args, **kwargs: (None, None) + bert_padding.pad_input = lambda *args, **kwargs: None + bert_padding.index_first_axis = lambda *args, **kwargs: None + + flash_attn.flash_attn_interface = flash_attn_interface + flash_attn.bert_padding = bert_padding + + sys.modules['flash_attn'] = flash_attn + sys.modules['flash_attn.flash_attn_interface'] = flash_attn_interface + sys.modules['flash_attn.bert_padding'] = bert_padding + + +# Official model repo has missing includes, apply the fix here +def fix_missing_include(): + + mmdet3d_models_module = importlib.import_module("projects.mmdet3d_plugin.models") + + sparsedrive_v1_module = importlib.import_module("projects.mmdet3d_plugin.models.sparsedrive_v1") + V1SparseDrive = getattr(sparsedrive_v1_module, "V1SparseDrive") + + sparsedrive_head_v1_module = importlib.import_module("projects.mmdet3d_plugin.models.sparsedrive_head_v1") + V1SparseDriveHead = getattr(sparsedrive_head_v1_module, "V1SparseDriveHead") + + motion_blocks_v11_module = importlib.import_module("projects.mmdet3d_plugin.models.motion.motion_blocks_v11") + V11MotionPlanningRefinementModule = getattr(motion_blocks_v11_module, "V11MotionPlanningRefinementModule") + + motion_planning_head_v13_module = importlib.import_module("projects.mmdet3d_plugin.models.motion.motion_planning_head_v13") + V13MotionPlanningHead = getattr(motion_planning_head_v13_module, "V13MotionPlanningHead") + + missing = 'V1SparseDrive' + if missing not in mmdet3d_models_module.__all__: + mmdet3d_models_module.__all__.append(missing) + setattr(mmdet3d_models_module, missing, V1SparseDrive) + + missing = 'V1SparseDriveHead' + if missing not in mmdet3d_models_module.__all__: + mmdet3d_models_module.__all__.append(missing) + setattr(mmdet3d_models_module, missing, V1SparseDriveHead) + + missing = 'V13MotionPlanningHead' + if missing not in mmdet3d_models_module.__all__: + mmdet3d_models_module.__all__.append(missing) + setattr(mmdet3d_models_module, missing, V13MotionPlanningHead) + + missing = 'V11MotionPlanningRefinementModule' + if missing not in mmdet3d_models_module.__all__: + mmdet3d_models_module.__all__.append(missing) + setattr(mmdet3d_models_module, missing, V11MotionPlanningRefinementModule) + + +# Mock deform_aggreg in projects.mmdet3d_plugin.ops.deformable_aggregation, replace by mx_driving's deform_aggreg within the mock class +def patch_deform_aggreg(): + + + class MockDeformableAggregationFunction: + @staticmethod + def apply(*args, **kwargs): + return mx_driving.deformable_aggregation(*args, **kwargs) + + + mock_module = types.ModuleType("projects.mmdet3d_plugin.ops.deformable_aggregation") + sys.modules["projects.mmdet3d_plugin.ops.deformable_aggregation"] = mock_module + mock_module.DeformableAggregationFunction = MockDeformableAggregationFunction + + +def _init(): + # order matters + block_gpu_flash_attention_dependency() + patch_deform_aggreg() + fix_missing_include() + + mmcv.runner.init_dist = get_hccl_init_dist('mmcv.runner') + + +_init() \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/preprocess.sh b/model_examples/DiffusionDrive/migrate_to_ascend/preprocess.sh new file mode 100644 index 00000000..329dae98 --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/preprocess.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +export PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +export OPENBLAS_NUM_THREADS=2 +export GOTO_NUM_THREADS=2 +export OMP_NUM_THREADS=2 + +python tools/data_converter/nuscenes_converter.py nuscenes \ + --root-path ./data/nuscenes \ + --canbus ./data/nuscenes \ + --out-dir ./data/infos/ \ + --extra-tag nuscenes \ + --version v1.0 + +python tools/kmeans/kmeans_det.py +python tools/kmeans/kmeans_map.py +python tools/kmeans/kmeans_motion.py +python tools/kmeans/kmeans_plan.py \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/requirements.txt b/model_examples/DiffusionDrive/migrate_to_ascend/requirements.txt new file mode 100644 index 00000000..1070dbe8 --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/requirements.txt @@ -0,0 +1,19 @@ +numpy==1.23.5 +mmdet==2.28.2 +urllib3==1.26.16 +pyquaternion==0.9.9 +nuscenes-devkit==1.1.10 +yapf==0.33.0 +tensorboard==2.14.0 +motmetrics==1.1.3 +pandas==1.2.5 +opencv-python==4.8.1.78 +prettytable==3.7.0 +scikit-learn==1.3.0 + +IPython==8.12 +einops==0.8.1 +ml-dtypes +opencv-python-headless==4.6.0.66 +diffusers +torchvision==0.16.0 \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/test.py b/model_examples/DiffusionDrive/migrate_to_ascend/test.py new file mode 100644 index 00000000..b9decbb3 --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/test.py @@ -0,0 +1,336 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# Copyright (c) OpenMMLab. All rights reserved. + +import os +from os import path as osp +import warnings +import argparse + +import mmcv +import torch + +from mmcv import Config, DictAction +from mmcv.cnn import fuse_conv_bn +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import ( + get_dist_info, + init_dist, + load_checkpoint, + wrap_fp16_model, +) + +from mmdet.apis import single_gpu_test, multi_gpu_test, set_random_seed +from mmdet.datasets import replace_ImageToTensor, build_dataset +from mmdet.datasets import build_dataloader as build_dataloader_origin +from mmdet.models import build_detector + +from projects.mmdet3d_plugin.datasets.builder import build_dataloader +from projects.mmdet3d_plugin.apis.test import custom_multi_gpu_test + + +import torch_npu +from torch_npu.contrib import transfer_to_npu +from migrate_to_ascend.patch import generate_patcher_builder +import mx_driving + + + + +def parse_args(): + parser = argparse.ArgumentParser( + description="MMDet test (and eval) a model" + ) + parser.add_argument("config", help="test config file path") + parser.add_argument("checkpoint", help="checkpoint file") + parser.add_argument("--out", help="output result file in pickle format") + parser.add_argument( + "--fuse-conv-bn", + action="store_true", + help="Whether to fuse conv and bn, this will slightly increase" + "the inference speed", + ) + parser.add_argument( + "--format-only", + action="store_true", + help="Format the output results without perform evaluation. It is" + "useful when you want to format the result to a specific format and " + "submit it to the test server", + ) + parser.add_argument( + "--eval", + type=str, + nargs="+", + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC', + ) + parser.add_argument("--show", action="store_true", help="show results") + parser.add_argument( + "--show-dir", help="directory where results will be saved" + ) + parser.add_argument( + "--gpu-collect", + action="store_true", + help="whether to use gpu to collect results.", + ) + parser.add_argument( + "--tmpdir", + help="tmp directory used for collecting results from multiple " + "workers, available when gpu-collect is not specified", + ) + parser.add_argument("--seed", type=int, default=0, help="random seed") + parser.add_argument( + "--deterministic", + action="store_true", + help="whether to set deterministic options for CUDNN backend.", + ) + parser.add_argument( + "--cfg-options", + nargs="+", + action=DictAction, + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file. If the value to " + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + "Note that the quotation marks are necessary and that no white space " + "is allowed.", + ) + parser.add_argument( + "--options", + nargs="+", + action=DictAction, + help="custom options for evaluation, the key-value pair in xxx=yyy " + "format will be kwargs for dataset.evaluate() function (deprecate), " + "change to --eval-options instead.", + ) + parser.add_argument( + "--eval-options", + nargs="+", + action=DictAction, + help="custom options for evaluation, the key-value pair in xxx=yyy " + "format will be kwargs for dataset.evaluate() function", + ) + parser.add_argument( + "--launcher", + choices=["none", "pytorch", "slurm", "mpi"], + default="none", + help="job launcher", + ) + parser.add_argument( + "--performance", + action="store_true", + help="whether use performance mode", + ) + parser.add_argument("--local-rank", type=int, default=0) + parser.add_argument("--result_file", type=str, default=None) + parser.add_argument("--show_only", action="store_true") + args = parser.parse_args() + if "LOCAL_RANK" not in os.environ: + os.environ["LOCAL_RANK"] = str(args.local_rank) + + if args.options and args.eval_options: + raise ValueError( + "--options and --eval-options cannot be both specified, " + "--options is deprecated in favor of --eval-options" + ) + if args.options: + warnings.warn("--options is deprecated in favor of --eval-options") + args.eval_options = args.options + return args + + +def main(): + args = parse_args() + + if args.eval and args.format_only: + raise ValueError("--eval and --format_only cannot be both specified") + + if args.out is not None and not args.out.endswith((".pkl", ".pickle")): + raise ValueError("The output file must be a pkl file.") + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get("custom_imports", None): + from mmcv.utils import import_modules_from_strings + + import_modules_from_strings(**cfg["custom_imports"]) + + # import modules from plguin/xx, registry will be updated + if hasattr(cfg, "plugin"): + if cfg.plugin: + import importlib + + if hasattr(cfg, "plugin_dir"): + plugin_dir = cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split("/") + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + "." + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + else: + # import dir is the dirpath for the config file + _module_dir = os.path.dirname(args.config) + _module_dir = _module_dir.split("/") + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + "." + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + # set cudnn_benchmark + if cfg.get("cudnn_benchmark", False): + torch.backends.cudnn.benchmark = True + + cfg.model.pretrained = None + # in case the test dataset is concatenated + samples_per_gpu = 1 + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop("samples_per_gpu", 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline + ) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.pop("samples_per_gpu", 1) for ds_cfg in cfg.data.test] + ) + if samples_per_gpu > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == "none": + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # set random seeds + if args.seed is not None: + set_random_seed(args.seed, deterministic=args.deterministic) + + # set work dir + if cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + cfg.data.test.work_dir = cfg.work_dir + print('work_dir: ', cfg.work_dir) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + print("distributed:", distributed) + if distributed: + data_loader = build_dataloader( + dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + nonshuffler_sampler=dict(type="DistributedSampler"), + ) + else: + data_loader = build_dataloader_origin( + dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + ) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg")) + + fp16_cfg = cfg.get("fp16", None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu") + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + # old versions did not save class info in checkpoints, this walkaround is + # for backward compatibility + if "CLASSES" in checkpoint.get("meta", {}): + model.CLASSES = checkpoint["meta"]["CLASSES"] + else: + model.CLASSES = dataset.CLASSES + # palette for visualization in segmentation tasks + if "PALETTE" in checkpoint.get("meta", {}): + model.PALETTE = checkpoint["meta"]["PALETTE"] + elif hasattr(dataset, "PALETTE"): + # segmentation dataset has `PALETTE` attribute + model.PALETTE = dataset.PALETTE + + if args.result_file is not None: + + outputs = mmcv.load(args.result_file) + elif not distributed: + model = MMDataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + ) + outputs = custom_multi_gpu_test( + model, data_loader, args.tmpdir, args.gpu_collect + ) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + print(f"\nwriting results to {args.out}") + mmcv.dump(outputs, args.out) + kwargs = {} if args.eval_options is None else args.eval_options + if args.show_only: + eval_kwargs = cfg.get("evaluation", {}).copy() + # hard-code way to remove EvalHook args + for key in [ + "interval", + "tmpdir", + "start", + "gpu_collect", + "save_best", + "rule", + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(kwargs) + dataset.show(outputs, show=True, **eval_kwargs) + elif args.format_only: + dataset.format_results(outputs, **kwargs) + elif args.eval: + eval_kwargs = cfg.get("evaluation", {}).copy() + # hard-code way to remove EvalHook args + for key in [ + "interval", + "tmpdir", + "start", + "gpu_collect", + "save_best", + "rule", + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + print(eval_kwargs) + results_dict = dataset.evaluate(outputs, **eval_kwargs) + print(results_dict) + + + +if __name__ == "__main__": + torch.multiprocessing.set_start_method( + "fork", force=True + ) # use fork workers_per_gpu can be > 1 + patcher_builder = generate_patcher_builder() + with patcher_builder.build(): + main() \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/train.py b/model_examples/DiffusionDrive/migrate_to_ascend/train.py new file mode 100644 index 00000000..c5f62a4a --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/train.py @@ -0,0 +1,329 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# Copyright (c) OpenMMLab. All rights reserved. +from __future__ import division +import sys +import os +from os import path as osp +import argparse +import copy +import time +import warnings +from datetime import timedelta + +import torch +import mmcv +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist + +from mmdet import __version__ as mmdet_version +from mmdet.apis import train_detector +from mmdet.datasets import build_dataset +from mmdet.models import build_detector +from mmdet.utils import collect_env, get_root_logger +from mmdet.apis import set_random_seed +from torch import distributed as dist + +import cv2 + +import torch_npu +from torch_npu.contrib import transfer_to_npu +from migrate_to_ascend.patch import generate_patcher_builder +import mx_driving + +print(sys.executable, os.path.abspath(__file__)) + +cv2.setNumThreads(8) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Train a detector") + parser.add_argument("config", help="train config file path") + parser.add_argument("--work-dir", help="the dir to save logs and models") + parser.add_argument( + "--resume-from", help="the checkpoint file to resume from" + ) + parser.add_argument( + "--no-validate", + action="store_true", + help="whether not to evaluate the checkpoint during training", + ) + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + "--gpus", + type=int, + help="number of gpus to use " + "(only applicable to non-distributed training)", + ) + group_gpus.add_argument( + "--gpu-ids", + type=int, + nargs="+", + help="ids of gpus to use " + "(only applicable to non-distributed training)", + ) + parser.add_argument("--seed", type=int, default=0, help="random seed") + parser.add_argument( + "--deterministic", + action="store_true", + help="whether to set deterministic options for CUDNN backend.", + ) + parser.add_argument( + "--options", + nargs="+", + action=DictAction, + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file (deprecate), " + "change to --cfg-options instead.", + ) + parser.add_argument( + "--cfg-options", + nargs="+", + action=DictAction, + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file. If the value to " + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + "Note that the quotation marks are necessary and that no white space " + "is allowed.", + ) + parser.add_argument( + "--dist-url", + type=str, + default="auto", + help="dist url for init process, such as tcp://localhost:8000", + ) + parser.add_argument("--gpus-per-machine", type=int, default=8) + parser.add_argument( + "--launcher", + choices=["none", "pytorch", "slurm", "mpi", "mpi_nccl"], + default="none", + help="job launcher", + ) + parser.add_argument("--local-rank", type=int, default=0) + parser.add_argument( + "--autoscale-lr", + action="store_true", + help="automatically scale lr with the number of gpus", + ) + parser.add_argument( + "--performance", + action="store_true", + help="whether use performance mode", + ) + args = parser.parse_args() + if "LOCAL_RANK" not in os.environ: + os.environ["LOCAL_RANK"] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + "--options and --cfg-options cannot be both specified, " + "--options is deprecated in favor of --cfg-options" + ) + if args.options: + warnings.warn("--options is deprecated in favor of --cfg-options") + args.cfg_options = args.options + + return args + + +def main(args): + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get("custom_imports", None): + from mmcv.utils import import_modules_from_strings + + import_modules_from_strings(**cfg["custom_imports"]) + + # import modules from plguin/xx, registry will be updated + if hasattr(cfg, "plugin"): + if cfg.plugin: + import importlib + + if hasattr(cfg, "plugin_dir"): + plugin_dir = cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split("/") + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + "." + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + else: + # import dir is the dirpath for the config file + _module_dir = os.path.dirname(args.config) + _module_dir = _module_dir.split("/") + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + "." + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + from projects.mmdet3d_plugin.apis.train import custom_train_model + + # set cudnn_benchmark + if cfg.get("cudnn_benchmark", False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get("work_dir", None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join( + "./work_dirs", osp.splitext(osp.basename(args.config))[0] + ) + if args.resume_from is not None: + cfg.resume_from = args.resume_from + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + + if args.autoscale_lr: + cfg.optimizer["lr"] = cfg.optimizer["lr"] * len(cfg.gpu_ids) / 8 + + # init distributed env first, since logger depends on the dist info. + if args.launcher == "none": + distributed = False + elif args.launcher == "mpi_nccl": + distributed = True + + import mpi4py.MPI as MPI + + comm = MPI.COMM_WORLD + mpi_local_rank = comm.Get_rank() + mpi_world_size = comm.Get_size() + print( + "MPI local_rank=%d, world_size=%d" + % (mpi_local_rank, mpi_world_size) + ) + + device_ids_on_machines = list(range(args.gpus_per_machine)) + str_ids = list(map(str, device_ids_on_machines)) + os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str_ids) + torch.cuda.set_device(mpi_local_rank % args.gpus_per_machine) + + dist.init_process_group( + backend="nccl", + init_method=args.dist_url, + world_size=mpi_world_size, + rank=mpi_local_rank, + timeout=timedelta(seconds=3600), + ) + + cfg.gpu_ids = range(mpi_world_size) + print("cfg.gpu_ids:", cfg.gpu_ids) + else: + distributed = True + init_dist( + args.launcher, timeout=timedelta(seconds=3600), **cfg.dist_params + ) + # re-set gpu_ids with distributed training mode + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) + log_file = osp.join(cfg.work_dir, f"{timestamp}.log") + # specify logger name, if we still use 'mmdet', the output info will be + # filtered and won't be saved in the log_file + logger = get_root_logger( + log_file=log_file, log_level=cfg.log_level + ) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) + dash_line = "-" * 60 + "\n" + logger.info( + "Environment info:\n" + dash_line + env_info + "\n" + dash_line + ) + meta["env_info"] = env_info + meta["config"] = cfg.pretty_text + + # log some basic info + logger.info(f"Distributed training: {distributed}") + logger.info(f"Config:\n{cfg.pretty_text}") + + # set random seeds + if args.seed is not None: + logger.info( + f"Set random seed to {args.seed}, " + f"deterministic: {args.deterministic}" + ) + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta["seed"] = args.seed + meta["exp_name"] = osp.basename(args.config) + + model = build_detector( + cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg") + ) + model.init_weights() + logger.info(f"Model:\n{model}") + + cfg.data.train.work_dir = cfg.work_dir + cfg.data.val.work_dir = cfg.work_dir + datasets = [build_dataset(cfg.data.train)] + + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + # in case we use a dataset wrapper + if "dataset" in cfg.data.train: + val_dataset.pipeline = cfg.data.train.dataset.pipeline + else: + val_dataset.pipeline = cfg.data.train.pipeline + # set test_mode=False here in deep copied config + # which do not affect AP/AR calculation later + val_dataset.test_mode = False + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmdet_version=mmdet_version, + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES, + ) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + if hasattr(cfg, "plugin"): + custom_train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta, + ) + else: + train_detector( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta, + ) + + +if __name__ == "__main__": + torch.multiprocessing.set_start_method( + "fork", force=True + ) # use fork workers_per_gpu can be > 1 + args = parse_args() + patcher_builder = generate_patcher_builder(args.performance) + with patcher_builder.build(): + main(args) \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/train_8p_full.sh b/model_examples/DiffusionDrive/migrate_to_ascend/train_8p_full.sh new file mode 100644 index 00000000..4d4bfa5a --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/train_8p_full.sh @@ -0,0 +1,71 @@ +#!/bin/sh + +CONFIG=projects/configs/diffusiondrive_configs/diffusiondrive_small_stage2.py +GLOBAL_BATCH_SIZE=48 +RANK_SIZE=8 +#export ASCEND_RT_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 + + +echo "[INFO] Start setting ENV VAR" + +#将Host日志输出到串口,0-关闭/1-开启 +export ASCEND_SLOG_PRINT_TO_STDOUT=0 +#设置默认日志级别,0-debug/1-info/2-warning/3-error +export ASCEND_GLOBAL_LOG_LEVEL=3 +#设置Host侧Event日志开启标志,0-关闭/1-开启 +export ASCEND_GLOBAL_EVENT_ENABLE=0 + +# 配置环境变量 +#设置是否开启taskque,0-关闭/1-开启/2-优化 +export TASK_QUEUE_ENABLE=2 +#设置是否开启均匀绑核,0-关闭/1-开启粗粒度绑核/2-开启细粒度绑核 +export CPU_AFFINITY_CONF=1 +#减少显存占用 +export PYTORCH_NPU_ALLOC_CONF="expandable_segments:True" + +# 训练 +start_time=$(date +%s) +echo "start_time=$(date -d @${start_time} "+%Y-%m-%d %H:%M:%S")" + +while true +do + PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) + status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" + if [ "${status}" != "0" ]; then + break; + fi +done +echo $PORT + + +GPUS=$RANK_SIZE +MPORT=${PORT:-28651} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.run \ + --nproc_per_node=$GPUS \ + --master_port=$MPORT \ + migrate_to_ascend/train.py $CONFIG \ + --launcher pytorch \ + --deterministic + + +end_time=$(date +%s) +echo "end_time=$(date -d @${end_time} "+%Y-%m-%d %H:%M:%S")" +e2e_time=$(( $end_time - $start_time )) + + + + +log_file=`find work_dirs/diffusiondrive_small_stage2 -regex ".*\.log" | sort -r | head -n 1` + +avg_time=`grep "Iter" ${log_file} | tail -n 10 | awk -F "time: " '{print $2}' | awk '{sum+=$1; count++} END {if(count>0) print sum/count}'` +echo "avg_time : ${avg_time}" + +avg_fps=`awk 'BEGIN{printf "%.3f\n", '$GLOBAL_BATCH_SIZE'/'$avg_time'}'` + +# 输出结果 +echo "[INFO] Final Result" +echo " - End to End Time : ${e2e_time}s" +echo " - Time avg per batch : ${avg_time}s" +echo " - Final Performance images/sec : ${avg_fps}" \ No newline at end of file diff --git a/model_examples/DiffusionDrive/migrate_to_ascend/train_8p_performance.sh b/model_examples/DiffusionDrive/migrate_to_ascend/train_8p_performance.sh new file mode 100644 index 00000000..27bae8fa --- /dev/null +++ b/model_examples/DiffusionDrive/migrate_to_ascend/train_8p_performance.sh @@ -0,0 +1,65 @@ +#!/bin/sh + +CONFIG=projects/configs/diffusiondrive_configs/diffusiondrive_small_stage2.py +GLOBAL_BATCH_SIZE=48 +RANK_SIZE=8 +#export ASCEND_RT_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 + + +#将Host日志输出到串口,0-关闭/1-开启 +export ASCEND_SLOG_PRINT_TO_STDOUT=0 +#设置默认日志级别,0-debug/1-info/2-warning/3-error +export ASCEND_GLOBAL_LOG_LEVEL=3 +#设置Host侧Event日志开启标志,0-关闭/1-开启 +export ASCEND_GLOBAL_EVENT_ENABLE=0 + +#设置是否开启taskque,0-关闭/1-开启/2-优化 +export TASK_QUEUE_ENABLE=2 +#设置是否开启均匀绑核,0-关闭/1-开启粗粒度绑核/2-开启细粒度绑核 +export CPU_AFFINITY_CONF=1 +#减少显存占用 +export PYTORCH_NPU_ALLOC_CONF="expandable_segments:True" + + + +start_time=$(date +%s) +echo "start_time=$(date -d @${start_time} "+%Y-%m-%d %H:%M:%S")" + +while true +do + PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) + status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" + if [ "${status}" != "0" ]; then + break; + fi +done +echo $PORT + +GPUS=$RANK_SIZE +MPORT=${PORT:-28651} + + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -W ignore -m torch.distributed.run \ + --nproc_per_node=$GPUS \ + --master_port=$MPORT \ + migrate_to_ascend/train.py $CONFIG \ + --launcher pytorch \ + --deterministic --performance + +end_time=$(date +%s) +echo "end_time=$(date -d @${end_time} "+%Y-%m-%d %H:%M:%S")" +e2e_time=$(( $end_time - $start_time )) + +log_file=`find work_dirs/diffusiondrive_small_stage2 -regex ".*\.log" | sort -r | head -n 1` + +avg_time=`grep "Iter" ${log_file} | tail -n 10 | awk -F "time: " '{print $2}' | awk '{sum+=$1; count++} END {if(count>0) print sum/count}'` +echo "avg_time : ${avg_time}" + +avg_fps=`awk 'BEGIN{printf "%.3f\n", '$GLOBAL_BATCH_SIZE'/'$avg_time'}'` + +# 输出结果 +echo "[INFO] Final Result" +echo " - End to End Time : ${e2e_time}s" +echo " - Time avg per batch : ${avg_time}s" +echo " - Final Performance images/sec : ${avg_fps}" \ No newline at end of file -- Gitee