diff --git a/docs/sample_code/distributed_gradient_accumulation/train.py b/docs/sample_code/distributed_gradient_accumulation/train.py index 19e26209d51517c0228f477653796fdf6a5fed30..2d4cf7c9f39e06f9246f0f29806dc6ce6c6b85f7 100644 --- a/docs/sample_code/distributed_gradient_accumulation/train.py +++ b/docs/sample_code/distributed_gradient_accumulation/train.py @@ -20,7 +20,7 @@ import mindspore as ms import mindspore.dataset as ds from mindspore import nn, train from mindspore.communication import init -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation from mindspore.parallel.auto_parallel import AutoParallel from mindspore.nn.utils import no_init_parameters diff --git a/tutorials/source_en/parallel/distributed_gradient_accumulation.md b/tutorials/source_en/parallel/distributed_gradient_accumulation.md index 2f4ac566a8f7a299be8f286afa83a863c500c5f6..1056196e18b3b75e465ebfdcf6f6475c6ef9c6ea 100644 --- a/tutorials/source_en/parallel/distributed_gradient_accumulation.md +++ b/tutorials/source_en/parallel/distributed_gradient_accumulation.md @@ -117,7 +117,7 @@ In this step, we need to define the loss function and the training process. Para ```python import mindspore as ms from mindspore import nn, train -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation loss_fn = nn.CrossEntropyLoss() loss_cb = train.LossMonitor(100) diff --git a/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md b/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md index d32629fd4df46e0a479d48c55001a92d5818b443..f67bc6b6b2595eefba8cd518116efd7daca92596 100644 --- a/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md +++ b/tutorials/source_zh_cn/parallel/distributed_gradient_accumulation.md @@ -117,7 +117,7 @@ with no_init_parameters(): ```python import mindspore as ms from mindspore import nn, train -from mindspore.parallel import GradAccumulation +from mindspore.parallel.nn import GradAccumulation loss_fn = nn.CrossEntropyLoss() loss_cb = train.LossMonitor(100)