From 4b2114870741364cb63fc4c4c59a68c3aa240f93 Mon Sep 17 00:00:00 2001 From: yuezenglin Date: Sat, 7 Jun 2025 11:16:16 +0800 Subject: [PATCH] swap --- docs/sample_code/distributed_gradient_accumulation/train.py | 2 +- .../source_en/parallel/distributed_gradient_accumulation.md | 2 +- .../source_zh_cn/parallel/distributed_gradient_accumulation.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sample_code/distributed_gradient_accumulation/train.py b/docs/sample_code/distributed_gradient_accumulation/train.py index 19e26209d5..2d4cf7c9f3 100644 --- a/docs/sample_code/distributed_gradient_accumulation/train.py +++ b/docs/sample_code/distributed_gradient_accumulation/train.py @@ -20,7 +20,7 @@ import mindspore as ms import mindspore.dataset as ds from mindspore import nn, train from mindspore.communication import init -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation from mindspore.parallel.auto_parallel import AutoParallel from mindspore.nn.utils import no_init_parameters diff --git a/tutorials/source_en/parallel/distributed_gradient_accumulation.md b/tutorials/source_en/parallel/distributed_gradient_accumulation.md index 01f13cc591..1fe0a0a728 100644 --- a/tutorials/source_en/parallel/distributed_gradient_accumulation.md +++ b/tutorials/source_en/parallel/distributed_gradient_accumulation.md @@ -117,7 +117,7 @@ In this step, we need to define the loss function and the training process. Para ```python import mindspore as ms from mindspore import nn, train -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation loss_fn = nn.CrossEntropyLoss() loss_cb = train.LossMonitor(100) diff --git a/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md b/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md index 14965bd6a1..e9d3993bdd 100644 --- a/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md +++ b/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md @@ -117,7 +117,7 @@ with no_init_parameters(): ```python import mindspore as ms from mindspore import nn, train -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation loss_fn = nn.CrossEntropyLoss() loss_cb = train.LossMonitor(100) -- Gitee