diff --git a/mindspeed_llm/mindspore/mindspore_adaptor.py b/mindspeed_llm/mindspore/mindspore_adaptor.py index 79b46e559f6f20a50bff914c7443f6b5ba4b92cd..4deaaaaa1e87b1f39fc9de3e6052625d4389951e 100644 --- a/mindspeed_llm/mindspore/mindspore_adaptor.py +++ b/mindspeed_llm/mindspore/mindspore_adaptor.py @@ -386,6 +386,15 @@ class MindSporeAdaptation(MegatronAdaptationABC): from mindspeed.mindspore.optimizer.adamw import step_func MindSporeAdaptation.register('mindspeed.optimizer.adamw.AdamW.step', step_func) + from mindspeed.mindspore.core.context_parallel.utils import general_out_update + from mindspeed.mindspore.ops.fusion_attention_v2 import npu_fusion_attention, npu_fusion_attention_grad + from mindspeed.mindspore.ops.npu_ring_attention_update import npu_ring_attention_update + MindSporeAdaptation.register('mindspeed.core.context_parallel.utils.general_out_update', general_out_update) + MindSporeAdaptation.register('mindspeed.core.context_parallel.ring_context_parallel.general_out_update', general_out_update) + MindSporeAdaptation.register('mindspeed.ops.fusion_attention_v2.npu_fusion_attention', npu_fusion_attention) + MindSporeAdaptation.register('mindspeed.ops.fusion_attention_v2.npu_fusion_attention_grad', npu_fusion_attention_grad) + MindSporeAdaptation.register('mindspeed.ops.npu_ring_attention_update.npu_ring_attention_update', npu_ring_attention_update) + @staticmethod def reparse_args(): """