From cab6c6de08e78825944e05b80e50349dd5f17408 Mon Sep 17 00:00:00 2001 From: lilei Date: Thu, 10 Sep 2020 09:30:00 +0800 Subject: [PATCH] Modify notes --- tutorials/notebook/linear_regression.ipynb | 14 +++++++------- tutorials/notebook/mixed_precision.ipynb | 14 +++++++------- .../advanced_use/checkpoint_for_hybrid_parallel.md | 8 ++++---- .../advanced_use/checkpoint_for_hybrid_parallel.md | 8 ++++---- .../source_zh_cn/quick_start/linear_regression.md | 12 ++++++------ tutorials/tutorial_code/linear_regression.py | 2 +- 6 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tutorials/notebook/linear_regression.ipynb b/tutorials/notebook/linear_regression.ipynb index 291d0a4c5b..4e3665dcf1 100644 --- a/tutorials/notebook/linear_regression.ipynb +++ b/tutorials/notebook/linear_regression.ipynb @@ -209,7 +209,7 @@ "from mindspore import nn\n", "\n", "net = nn.Dense(1,1,TruncatedNormal(0.02),TruncatedNormal(0.02))\n", - "print(\"weight:\", net.weight.default_input[0][0], \"bias:\", net.bias.default_input[0])" + "print(\"weight:\", net.weight.set_data([0][0]), \"bias:\", net.bias.set_data([0]))" ] }, { @@ -248,7 +248,7 @@ ], "source": [ "x = np.arange(-10, 10, 0.1)\n", - "y = x * (net.weight.default_input[0][0].asnumpy()) + (net.bias.default_input[0].asnumpy())\n", + "y = x * (net.weight.set_data([0][0]).asnumpy()) + (net.bias.set_data([0]).asnumpy())\n", "plt.scatter(x1, y1, color=\"red\", s=5)\n", "plt.plot(x, y, \"blue\")\n", "plt.title(\"Eval data and net\")\n", @@ -691,15 +691,15 @@ " data_x,data_y = get_data(batch_size)\n", " grads = train_network(data_x,data_y) \n", " optim(grads)\n", - " plot_model_and_datasets(net.weight.default_input, \n", - " net.bias.default_input, data_x, data_y)\n", + " plot_model_and_datasets(net.weight.data, \n", + " net.bias.data, data_x, data_y)\n", " display.clear_output(wait=True)\n", "\n", "output = net(eval_x)\n", "loss_output = criterion(output, eval_label)\n", "print(\"loss_value:\", loss_output.asnumpy())\n", - "plot_model_and_datasets(net.weight.default_input, net.bias.default_input, data_x,data_y)\n", - "print(\"weight:\", net.weight.default_input[0][0], \"bias:\", net.bias.default_input[0])" + "plot_model_and_datasets(net.weight.data, net.bias.data, data_x,data_y)\n", + "print(\"weight:\", net.weight.set_data([0][0]), \"bias:\", net.bias.set_data([0]))" ] }, { @@ -745,4 +745,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/mixed_precision.ipynb b/tutorials/notebook/mixed_precision.ipynb index 1455e1710d..149d1ebf8a 100644 --- a/tutorials/notebook/mixed_precision.ipynb +++ b/tutorials/notebook/mixed_precision.ipynb @@ -836,13 +836,13 @@ " else:\n", " for _, cell in net.cells_and_names():\n", " if isinstance(cell, nn.Conv2d):\n", - " cell.weight.default_input = weight_init.initializer(weight_init.XavierUniform(),\n", - " cell.weight.default_input.shape,\n", - " cell.weight.default_input.dtype).to_tensor()\n", + " cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),\n", + " cell.weight.data.shape,\n", + " cell.weight.data.dtype).to_tensor())\n", " if isinstance(cell, nn.Dense):\n", - " cell.weight.default_input = weight_init.initializer(weight_init.TruncatedNormal(),\n", - " cell.weight.default_input.shape,\n", - " cell.weight.default_input.dtype).to_tensor()\n", + " cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(),\n", + " cell.weight.data.shape,\n", + " cell.weight.data.dtype).to_tensor())\n", " # init lr\n", " warmup_epochs = 5\n", " lr_init = 0.01\n", @@ -991,4 +991,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/source_en/advanced_use/checkpoint_for_hybrid_parallel.md b/tutorials/source_en/advanced_use/checkpoint_for_hybrid_parallel.md index 4843a034aa..29d10dbbbf 100644 --- a/tutorials/source_en/advanced_use/checkpoint_for_hybrid_parallel.md +++ b/tutorials/source_en/advanced_use/checkpoint_for_hybrid_parallel.md @@ -231,11 +231,11 @@ The following uses a specific model parameter as an example. The parameter name 3. Modify values of model parameters. ``` - new_param.set_parameter_data(tensor_slice, True) - new_param_moments.set_parameter_data(tensor_slice_moments, True) + new_param.set_data(tensor_slice, True) + new_param_moments.set_data(tensor_slice_moments, True) ``` - - `set_parameter_data`: sets the value of a model parameter. The API parameter type is Tensor or number. + - `set_data`: sets the value of a model parameter. The API parameter type is Tensor or number. ### Step 3: Loading the Modified Parameter Data to the Network @@ -486,7 +486,7 @@ User process: rank = get_rank() tensor_slice = Tensor(slice_list[rank]) # modify model parameter data values - new_param.set_parameter_data(tensor_slice, True) + new_param.set_data(tensor_slice, True) # load the modified parameter data into the network weight = np.ones([4, 8]).astype(np.float32) diff --git a/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md b/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md index b19a874640..2b51952eab 100644 --- a/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md +++ b/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md @@ -233,11 +233,11 @@ param_dict = load_checkpoint("./CKP-Integrated_1-4_32.ckpt") 3. 修改模型参数数据值。 ``` - new_param.set_parameter_data(tensor_slice, True) - new_param_moments.set_parameter_data(tensor_slice_moments, True) + new_param.set_data(tensor_slice, True) + new_param_moments.set_data(tensor_slice_moments, True) ``` - - `set_parameter_data`:设置模型参数的值,接口参数类型为Tensor 或number。 + - `set_data`:设置模型参数的值,接口参数类型为Tensor 或number。 ### 步骤3:将修改后的参数数据加载到网络中 @@ -487,7 +487,7 @@ load_param_into_net(opt, param_dict) rank = get_rank() tensor_slice = Tensor(slice_list[rank]) # modify model parameter data values - new_param.set_parameter_data(tensor_slice, True) + new_param.set_data(tensor_slice, True) # load the modified parameter data into the network weight = np.ones([4, 8]).astype(np.float32) diff --git a/tutorials/source_zh_cn/quick_start/linear_regression.md b/tutorials/source_zh_cn/quick_start/linear_regression.md index a89ad01f0c..9a042225ba 100644 --- a/tutorials/source_zh_cn/quick_start/linear_regression.md +++ b/tutorials/source_zh_cn/quick_start/linear_regression.md @@ -129,7 +129,7 @@ from mindspore.common.initializer import TruncatedNormal from mindspore import nn net = nn.Dense(1,1,TruncatedNormal(0.02),TruncatedNormal(0.02)) -print("weight:", net.weight.default_input[0][0], "bias:", net.bias.default_input[0]) +print("weight:", net.weight.set_data([0][0]), "bias:", net.bias.set_data([0])) ``` 输出结果: @@ -144,7 +144,7 @@ print("weight:", net.weight.default_input[0][0], "bias:", net.bias.default_input ```python x = np.arange(-10, 10, 0.1) -y = x * (net.weight.default_input[0][0].asnumpy()) + (net.bias.default_input[0].asnumpy()) +y = x * (net.weight.set_data([0][0]).asnumpy()) + (net.bias.set_data([0]).asnumpy()) plt.scatter(x1, y1, color="red", s=5) plt.plot(x, y, "blue") plt.title("Eval data and net") @@ -373,15 +373,15 @@ for i in range(step_size): data_x,data_y = get_data(batch_size) grads = train_network(data_x,data_y) optim(grads) - plot_model_and_datasets(net.weight.default_input, - net.bias.default_input, data_x, data_y) + plot_model_and_datasets(net.weight.data, + net.bias.data, data_x, data_y) display.clear_output(wait=True) output = net(eval_x) loss_output = criterion(output, eval_label) print("loss_value:", loss_output.asnumpy()) -plot_model_and_datasets(net.weight.default_input, net.bias.default_input, data_x,data_y) -print("weight:", net.weight.default_input[0][0], "bias:", net.bias.default_input[0]) +plot_model_and_datasets(net.weight.data, net.bias.data, data_x,data_y) +print("weight:", net.weight.set_data([0][0]), "bias:", net.bias.set_data([0])) ``` 输出结果: diff --git a/tutorials/tutorial_code/linear_regression.py b/tutorials/tutorial_code/linear_regression.py index bfed5b389e..6e53c6ae09 100644 --- a/tutorials/tutorial_code/linear_regression.py +++ b/tutorials/tutorial_code/linear_regression.py @@ -71,4 +71,4 @@ for i in range(step_size): print(loss_output.asnumpy()) # Print final weight parameters -print("weight:", net.weight.default_input[0][0], "bias:", net.bias.default_input[0]) \ No newline at end of file +print("weight:", net.weight.set_data([0][0]), "bias:", net.bias.set_data([0])) \ No newline at end of file -- Gitee