From b85ddccdbffdab7f8524c2a9dacd298e2e95f071 Mon Sep 17 00:00:00 2001 From: huan <3174348550@qq.com> Date: Wed, 18 Jun 2025 15:27:40 +0800 Subject: [PATCH] modify inconsistency in files --- docs/lite/docs/source_en/build/build.md | 4 ++-- docs/lite/docs/source_en/mindir/build.md | 2 +- .../source_en/medium-range/graphcast_tp.ipynb | 2 +- .../docs/source_en/feature/evaluation.md | 4 ++-- .../feature/training_hyperparameters.md | 2 -- docs/mindformers/docs/source_zh_cn/index.rst | 2 +- .../source_en/case_library/shor_algorithm.ipynb | 4 ++-- .../features/compile/graph_optimization.md | 4 ++-- tutorials/source_en/beginner/autograd.md | 16 +++++----------- .../source_en/orange_pi/environment_setup.md | 3 +-- 10 files changed, 17 insertions(+), 26 deletions(-) diff --git a/docs/lite/docs/source_en/build/build.md b/docs/lite/docs/source_en/build/build.md index d66dc85524..f126f443e5 100644 --- a/docs/lite/docs/source_en/build/build.md +++ b/docs/lite/docs/source_en/build/build.md @@ -161,7 +161,7 @@ Then, run the following commands in the root directory of the source code to com Compile aarch32 package ```bash - export OHOS_NDK=OHOS NDK path + export OHOS_NDK=NDK path export TOOLCHAIN_NAME=ohos bash build.sh -I arm32 -j32 ``` @@ -169,7 +169,7 @@ Then, run the following commands in the root directory of the source code to com Compile aarch64 package ```bash - export OHOS_NDK=OHOS NDK path + export OHOS_NDK=NDK path export TOOLCHAIN_NAME=ohos bash build.sh -I arm64 -j32 ``` diff --git a/docs/lite/docs/source_en/mindir/build.md b/docs/lite/docs/source_en/mindir/build.md index 83995cb60e..3dc8f99ae4 100644 --- a/docs/lite/docs/source_en/mindir/build.md +++ b/docs/lite/docs/source_en/mindir/build.md @@ -248,7 +248,7 @@ mindspore-lite-{version}-linux-{arch} │ └── securec └── tools ├── akg - | └── akg-{version}-{python}-{linux}-{arch}.whl # AKG Python whl package + | └── akg-{version}-{python}-linux-{arch}.whl # AKG Python whl package ├── benchmark # Benchmarking Tools │ └── benchmark # Benchmarking tool executable file └── converter # Model converter diff --git a/docs/mindearth/docs/source_en/medium-range/graphcast_tp.ipynb b/docs/mindearth/docs/source_en/medium-range/graphcast_tp.ipynb index 625c23f290..8ce9fb7052 100644 --- a/docs/mindearth/docs/source_en/medium-range/graphcast_tp.ipynb +++ b/docs/mindearth/docs/source_en/medium-range/graphcast_tp.ipynb @@ -274,7 +274,7 @@ "metadata": {}, "outputs": [], "source": [ - "def plt_comparison(pred, label, root_dir='./images'):\n", + "def plt_comparison(pred, label, root_dir='./'):\n", " plt.subplot(1, 2, 1)\n", " plt.imshow(label, cmap='jet')\n", " plt.title('Truth')\n", diff --git a/docs/mindformers/docs/source_en/feature/evaluation.md b/docs/mindformers/docs/source_en/feature/evaluation.md index 3984f1e007..3040735d5a 100644 --- a/docs/mindformers/docs/source_en/feature/evaluation.md +++ b/docs/mindformers/docs/source_en/feature/evaluation.md @@ -300,7 +300,7 @@ Install Ubuntu system according to the following steps: 1. Pull the Decord code, enter the Decord directory, initialize and update Decord dependencies, and execute the following command: ```bash - git clone https://github.com/dmlc/decord.git + git clone --recursive -b v0.6.0 https://github.com/dmlc/decord.git cd decord ``` @@ -334,7 +334,7 @@ For OpenEuler systems follow the steps below to install: 1. Pull the Decord code and enter the `decord` directory. ```bash - git clone --recursive https://github.com/dmlc/decord + git clone --recursive -b v0.6.0 https://github.com/dmlc/decord cd decord ``` diff --git a/docs/mindformers/docs/source_en/feature/training_hyperparameters.md b/docs/mindformers/docs/source_en/feature/training_hyperparameters.md index 7b8ebe4a9e..8740fdc422 100644 --- a/docs/mindformers/docs/source_en/feature/training_hyperparameters.md +++ b/docs/mindformers/docs/source_en/feature/training_hyperparameters.md @@ -2,8 +2,6 @@ [![View Source On Gitee](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/resource/_static/logo_source_en.svg)](https://gitee.com/mindspore/docs/blob/master/docs/mindformers/docs/source_en/feature/training_hyperparameters.md) -## Overview - Hyperparameters significantly affect model performance, with different settings potentially leading to vastly different outcomes. Choices regarding these parameters influence aspects such as training speed, convergence, capacity, and generalization ability. They are not learned directly from the training data but are determined by developers based on experience, experiments, or tuning processes. diff --git a/docs/mindformers/docs/source_zh_cn/index.rst b/docs/mindformers/docs/source_zh_cn/index.rst index dd142e8086..d841d97e64 100644 --- a/docs/mindformers/docs/source_zh_cn/index.rst +++ b/docs/mindformers/docs/source_zh_cn/index.rst @@ -78,7 +78,7 @@ MindSpore Transformers功能特性说明 - `配置文件 `_ - 支持使用`YAML`文件集中管理和调整任务中的可配置项。 + 支持使用 `YAML` 文件集中管理和调整任务中的可配置项。 - `日志 `_ diff --git a/docs/mindquantum/docs/source_en/case_library/shor_algorithm.ipynb b/docs/mindquantum/docs/source_en/case_library/shor_algorithm.ipynb index ac94c3ddf3..35074ea118 100644 --- a/docs/mindquantum/docs/source_en/case_library/shor_algorithm.ipynb +++ b/docs/mindquantum/docs/source_en/case_library/shor_algorithm.ipynb @@ -105,7 +105,7 @@ "\n", "Although the complete $|y \\oplus f(x)\\rangle$ transformation requires more complex quantum circuits (like quantum modular adders and multipliers), we can directly construct this $2^{2q} \\times 2^{2q}$ unitary matrix $U_{a,N}$. This matrix is essentially a permutation matrix that uniquely maps each input basis state $|x\\rangle|y\\rangle$ to the output basis state $|x\\rangle|y \\oplus (a^x \\bmod N)\\rangle$.\n", "\n", - "**Implementation Steps:**\n", + "#### Implementation Steps\n", "\n", "1. **Determine the number of qubits:**\n", " * **Target register (register 2):** Needs $q = \\lceil \\log_2 N \\rceil$ qubits to store $a^x \\bmod N$ (range from $0$ to $N-1$).\n", @@ -121,7 +121,7 @@ "\n", "4. **Create `UnivMathGate`:** Instantiate a `UnivMathGate` with the constructed matrix $U$ and apply it to `register2 + register1` (ensuring $y$ corresponds to the lower bits).\n", "\n", - "**Example: N=15, a=2:**\n", + "#### Example: N=15, a=2\n", "\n", "We need $q=4$ qubits, because $2^4 = 16 \\ge 15$. The total number of qubits $n = 2q = 8$. The Hilbert space dimension is $2^8 = 256$.\n", "\n", diff --git a/docs/mindspore/source_en/features/compile/graph_optimization.md b/docs/mindspore/source_en/features/compile/graph_optimization.md index d399edd364..3c5b9cfe82 100644 --- a/docs/mindspore/source_en/features/compile/graph_optimization.md +++ b/docs/mindspore/source_en/features/compile/graph_optimization.md @@ -269,7 +269,7 @@ In MindSpore's graph mode, the purpose and techniques of redundancy elimination %2(d) = PrimFunc_Div(%0, %1) : (, ) -> () Return(%2) - : () } ``` @@ -311,7 +311,7 @@ In MindSpore's graph mode, the purpose and techniques of redundancy elimination %2(d) = PrimFunc_Div(%0, %1) : (, ) -> () Return(%2) cnode_attrs: {checkpoint: Bool(1)} - : () } ``` diff --git a/tutorials/source_en/beginner/autograd.md b/tutorials/source_en/beginner/autograd.md index 85c79b61ac..d2dbace125 100644 --- a/tutorials/source_en/beginner/autograd.md +++ b/tutorials/source_en/beginner/autograd.md @@ -83,8 +83,7 @@ print(grads) [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], - [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02]]), - Tensor(shape=[3], dtype=Float32, value= [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02])) + [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02]]), Tensor(shape=[3], dtype=Float32, value= [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02])) ``` ## Stop Gradient @@ -112,8 +111,7 @@ print(grads) [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00], [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00], [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00], - [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00]]), - Tensor(shape=[3], dtype=Float32, value= [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00])) + [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00]]), Tensor(shape=[3], dtype=Float32, value= [ 1.06568694e+00, 1.05373347e+00, 1.30146706e+00])) ``` You can see that the gradient values corresponding to $w$ and $b$ have changed. At this point, if you want to block out the effect of z on the gradient, i.e., still only find the derivative of the parameter with respect to loss, you can use the `ops.stop_gradient` interface to truncate the gradient here. We add the `function` implementation to `stop_gradient` and execute it. @@ -137,8 +135,7 @@ print(grads) [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00], [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00], [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00], - [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00]]), - Tensor(shape=[3], dtype=Float32, value= [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00])) + [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00]]), Tensor(shape=[3], dtype=Float32, value= [ 1.32618928e+00, 1.01589143e+00, 1.04216456e+00])) ``` It can be seen that the gradient values corresponding to $w$ and $b$ are the same as the gradient values found by the initial `function`. @@ -166,9 +163,7 @@ print(grads, z) [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], - [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02]]), - Tensor(shape=[3], dtype=Float32, value= [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02])) -[ 3.8211915 -2.994512 -1.932323 ] + [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02]]), Tensor(shape=[3], dtype=Float32, value= [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02])) [ 3.8211915 -2.994512 -1.932323 ] ``` ## Calculating Neural Network Gradient @@ -228,8 +223,7 @@ print(grads) [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02], - [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02]]), - Tensor(shape=[3], dtype=Float32, value= [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02])) + [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02]]), Tensor(shape=[3], dtype=Float32, value= [ 3.26189250e-01, 1.58914644e-02, 4.21645455e-02])) ``` Executing the differentiation function, and we can see that the gradient value is the same as the gradient value obtained from the previous `function`. diff --git a/tutorials/source_en/orange_pi/environment_setup.md b/tutorials/source_en/orange_pi/environment_setup.md index b3c023320a..ba3fa9b1d8 100644 --- a/tutorials/source_en/orange_pi/environment_setup.md +++ b/tutorials/source_en/orange_pi/environment_setup.md @@ -279,8 +279,7 @@ Step 4 Go to the Kernels package download directory. Step 5 Add execution permissions to the kernels package. ```bash -(base) root@orangepiaipro: /home/HwHiAiUser/Downloads# chmod +x ./ -Ascend-cann-kernels-310b_8.0.0_linux-aarch64.run +(base) root@orangepiaipro: /home/HwHiAiUser/Downloads# chmod +x ./Ascend-cann-kernels-310b_8.0.0_linux-aarch64.run ``` Step 6 Execute the following command to upgrade the software. -- Gitee