diff --git a/tutorials/experts/source_en/debug/fixing_randomness.md b/tutorials/experts/source_en/debug/fixing_randomness.md index 1663116094d68aab4151218db19645c2a8999e0f..2045284c77a6d05325595d97acf4d1aff94737df 100644 --- a/tutorials/experts/source_en/debug/fixing_randomness.md +++ b/tutorials/experts/source_en/debug/fixing_randomness.md @@ -1,6 +1,6 @@ # Fixed Randomness to Reproduce Run Results of Script - + The purpose of fixed randomness is to reproduce the run results of the script and assist in locating the problem. After fixing the randomness, the loss curve produced by the two trainings under the same conditions should be basically the same, and you can perform debugging multiple times to easily find the cause of the loss curve abnormality without worrying about the problem phenomenon of the last debugging and no longer appearing in this run. diff --git a/tutorials/experts/source_en/others/adaptive_summation.md b/tutorials/experts/source_en/others/adaptive_summation.md index 1969b1a91bdf93b4bfe54725b535d793ff33a749..9eda4e2cf7d80d8681a81950d944bfc3fef3c881 100644 --- a/tutorials/experts/source_en/others/adaptive_summation.md +++ b/tutorials/experts/source_en/others/adaptive_summation.md @@ -1,6 +1,6 @@ # Adaptive Gradient Summation Algorithm - + ## Overview diff --git a/tutorials/experts/source_en/others/dimention_reduce_training.md b/tutorials/experts/source_en/others/dimention_reduce_training.md index a4bdd1792c252f44804700b880b24eff473f4e5e..c04dd6a97f3251edb42d151fd0bc1de8296a43d2 100644 --- a/tutorials/experts/source_en/others/dimention_reduce_training.md +++ b/tutorials/experts/source_en/others/dimention_reduce_training.md @@ -1,6 +1,6 @@ # Dimension Reduction Training Algorithm - + ## Overview diff --git a/tutorials/experts/source_en/others/ms_operator.md b/tutorials/experts/source_en/others/ms_operator.md index 9f13406b2fa9326d98827994ca52d0ee50bd0fef..4b9dc542336b9e5e721556b4061e4c2ba7294e60 100644 --- a/tutorials/experts/source_en/others/ms_operator.md +++ b/tutorials/experts/source_en/others/ms_operator.md @@ -1,6 +1,6 @@ # Performing Distributed Training on K8S Clusters - + MindSpore Operator is a plugin that follows Kubernetes' Operator pattern (based on the CRD-Custom Resource Definition feature) and implements distributed training on Kubernetes. MindSpore Operator defines Scheduler, PS, worker three roles in CRD, and users can easily use MindSpore on K8S for distributed training through simple YAML file configuration. The code repository of mindSpore Operator is described in: [ms-operator](https://gitee.com/mindspore/ms-operator/).