From 86502f93430681ca779fd7741b9bd43f19d8fcd3 Mon Sep 17 00:00:00 2001 From: yuezenglin Date: Mon, 9 Jun 2025 10:45:14 +0800 Subject: [PATCH] doc bugfix --- docs/sample_code/distributed_gradient_accumulation/train.py | 2 +- .../source_en/parallel/distributed_gradient_accumulation.md | 2 +- .../source_zh_cn/parallel/distributed_gradient_accumulation.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sample_code/distributed_gradient_accumulation/train.py b/docs/sample_code/distributed_gradient_accumulation/train.py index 19e26209d5..2d4cf7c9f3 100644 --- a/docs/sample_code/distributed_gradient_accumulation/train.py +++ b/docs/sample_code/distributed_gradient_accumulation/train.py @@ -20,7 +20,7 @@ import mindspore as ms import mindspore.dataset as ds from mindspore import nn, train from mindspore.communication import init -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation from mindspore.parallel.auto_parallel import AutoParallel from mindspore.nn.utils import no_init_parameters diff --git a/tutorials/source_en/parallel/distributed_gradient_accumulation.md b/tutorials/source_en/parallel/distributed_gradient_accumulation.md index 88b2dfe009..46c59edc28 100644 --- a/tutorials/source_en/parallel/distributed_gradient_accumulation.md +++ b/tutorials/source_en/parallel/distributed_gradient_accumulation.md @@ -117,7 +117,7 @@ In this step, we need to define the loss function and the training process. Para ```python import mindspore as ms from mindspore import nn, train -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation loss_fn = nn.CrossEntropyLoss() loss_cb = train.LossMonitor(100) diff --git a/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md b/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md index 7fb0327484..6612a29642 100644 --- a/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md +++ b/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md @@ -117,7 +117,7 @@ with no_init_parameters(): ```python import mindspore as ms from mindspore import nn, train -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation loss_fn = nn.CrossEntropyLoss() loss_cb = train.LossMonitor(100) -- Gitee