diff --git a/tutorials/basic/Makefile b/tutorials/basic/Makefile deleted file mode 100644 index 1eff8952707bdfa503c8d60c1e9a903053170ba2..0000000000000000000000000000000000000000 --- a/tutorials/basic/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source_zh_cn -BUILDDIR = build_zh_cn - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/tutorials/basic/requirements.txt b/tutorials/basic/requirements.txt deleted file mode 100644 index 49a77fdec3a5c745edd40eaa223883c31500e975..0000000000000000000000000000000000000000 --- a/tutorials/basic/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -sphinx >= 2.2.1, <= 2.4.4 -docutils == 0.16 -myst_parser == 0.14.0 -sphinx-markdown-tables -sphinx_rtd_theme == 0.5.2 -numpy -nbsphinx -IPython -ipykernel -jieba diff --git a/tutorials/basic/source_en/advanced/test2.md b/tutorials/basic/source_en/advanced/test2.md deleted file mode 100644 index f9ccad0a136294b61866927d4ea2af47e16db160..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_en/advanced/test2.md +++ /dev/null @@ -1,3 +0,0 @@ -# Test2 - -Coming soon. \ No newline at end of file diff --git a/tutorials/basic/source_en/beginner/test1.md b/tutorials/basic/source_en/beginner/test1.md deleted file mode 100644 index cab9dc1609fc287bfdc2fc7a905e187faca88e5a..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_en/beginner/test1.md +++ /dev/null @@ -1,3 +0,0 @@ -# Test1 - -Coming soon. \ No newline at end of file diff --git a/tutorials/basic/source_en/conf.py b/tutorials/basic/source_en/conf.py deleted file mode 100644 index 2747c54a258e9e5d4bf02818bb634c85a9d53951..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_en/conf.py +++ /dev/null @@ -1,90 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import shutil -import IPython -import re -import sys - -# -- Project information ----------------------------------------------------- - -project = 'MindSpore' -copyright = '2022, MindSpore' -author = 'MindSpore' - -# The full version, including alpha/beta/rc tags -release = 'master' - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx_markdown_tables', - 'myst_parser', - 'nbsphinx', - 'sphinx.ext.mathjax', - 'IPython.sphinxext.ipython_console_highlighting' -] - -source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', -} - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - -highlight_language = 'python' - -pygments_style = 'sphinx' - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' -#modify layout.html for sphinx_rtd_theme. -import sphinx_rtd_theme -layout_target = os.path.join(os.path.dirname(sphinx_rtd_theme.__file__), 'layout.html') -layout_src = '../../../resource/_static/layout.html' -if os.path.exists(layout_target): - os.remove(layout_target) -shutil.copy(layout_src, layout_target) - -html_search_language = 'en' - -html_static_path = ['_static'] - -sys.path.append(os.path.abspath('../../../resource/sphinx_ext')) -import anchor_mod -import nbsphinx_mod - - -sys.path.append(os.path.abspath('../../../resource/search')) -import search_code - -sys.path.append(os.path.abspath('../../../resource/custom_directives')) -from custom_directives import IncludeCodeDirective - -def setup(app): - app.add_directive('includecode', IncludeCodeDirective) - diff --git a/tutorials/basic/source_en/index.rst b/tutorials/basic/source_en/index.rst deleted file mode 100644 index 2d1091764c6568aaa498032038ab3912dc9b4036..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_en/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. MindSpore documentation master file, created by - sphinx-quickstart on Thu Mar 24 11:00:00 2022. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -For Beginners -========================= - -.. toctree:: - :glob: - :maxdepth: 1 - :caption: Beginner - - beginner/test1 - -.. toctree:: - :glob: - :maxdepth: 1 - :caption: Advanced - - advanced/test2 diff --git a/tutorials/basic/source_zh_cn/advanced/test2.md b/tutorials/basic/source_zh_cn/advanced/test2.md deleted file mode 100644 index 752e358048fe1f605212660db8f561f755d3d9ea..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_zh_cn/advanced/test2.md +++ /dev/null @@ -1,3 +0,0 @@ -# 测试2 - -即将更新。 \ No newline at end of file diff --git a/tutorials/basic/source_zh_cn/beginner/test1.md b/tutorials/basic/source_zh_cn/beginner/test1.md deleted file mode 100644 index dda766ca63b62d98aa033d375642ab01794b2508..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_zh_cn/beginner/test1.md +++ /dev/null @@ -1,3 +0,0 @@ -# 测试1 - -即将更新。 \ No newline at end of file diff --git a/tutorials/basic/source_zh_cn/conf.py b/tutorials/basic/source_zh_cn/conf.py deleted file mode 100644 index a7529d398aa3068d2229951170b6a946c93d72f4..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_zh_cn/conf.py +++ /dev/null @@ -1,94 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import shutil -import IPython -import re -import sys - -# -- Project information ----------------------------------------------------- - -project = 'MindSpore' -copyright = '2022, MindSpore' -author = 'MindSpore' - -# The full version, including alpha/beta/rc tags -release = 'master' - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx_markdown_tables', - 'myst_parser', - 'nbsphinx', - 'sphinx.ext.mathjax', - 'IPython.sphinxext.ipython_console_highlighting' -] - -source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', -} - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - -highlight_language = 'python' - -pygments_style = 'sphinx' - -myst_update_mathjax = False - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' -#modify layout.html for sphinx_rtd_theme. -import sphinx_rtd_theme -layout_target = os.path.join(os.path.dirname(sphinx_rtd_theme.__file__), 'layout.html') -layout_src = '../../../resource/_static/layout.html' -if os.path.exists(layout_target): - os.remove(layout_target) -shutil.copy(layout_src, layout_target) - -html_search_language = 'zh' - -html_search_options = {'dict': '../../resource/jieba.txt'} - -html_static_path = ['_static'] - -sys.path.append(os.path.abspath('../../../resource/sphinx_ext')) -import anchor_mod -import nbsphinx_mod - - -sys.path.append(os.path.abspath('../../../resource/search')) -import search_code - -sys.path.append(os.path.abspath('../../../resource/custom_directives')) -from custom_directives import IncludeCodeDirective - -def setup(app): - app.add_directive('includecode', IncludeCodeDirective) - diff --git a/tutorials/basic/source_zh_cn/index.rst b/tutorials/basic/source_zh_cn/index.rst deleted file mode 100644 index fcf3e79e49063322f82b26f2ac67aa4a29c78162..0000000000000000000000000000000000000000 --- a/tutorials/basic/source_zh_cn/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. MindSpore documentation master file, created by - sphinx-quickstart on Thu Mar 24 11:00:00 2022. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -面向新手 -===================== - -.. toctree:: - :glob: - :maxdepth: 1 - :caption: 初级 - - beginner/test1 - -.. toctree:: - :glob: - :maxdepth: 1 - :caption: 进阶 - - advanced/test2 - - diff --git a/tutorials/source_en/autograd.md b/tutorials/source_en/autograd.md deleted file mode 100644 index cf065256614e9ec767474dc9e18d62f85c53150c..0000000000000000000000000000000000000000 --- a/tutorials/source_en/autograd.md +++ /dev/null @@ -1,195 +0,0 @@ -# Automatic Differentiation - -`Ascend` `GPU` `CPU` `Beginner` `Model Development` - - - -Automatic differentiation is commonly used when implementing machine learning algorithms such as backpropagation for training neural networks. By using automatic differentiation, multi-layer composite functions could be divided into several simple computational steps, thereby helping users avoid implementing complex derivation codes. As a result, automatic differentiation enables ease of use of MindSpore. - -The first-order derivative method of MindSpore is `mindspore.ops.GradOperation (get_all=False, get_by_list=False, sens_param=False)`. When `get_all` is set to `False`, the first input derivative is computed. When `get_all` is set to `True`, all input derivatives are computed. When `get_by_list` is set to `False`, weight derivatives are not computed. When `get_by_list` is set to `True`, the weight derivative is computed. `sens_param` scales the output value of the network to change the final gradient. The following uses the MatMul operator derivative for in-depth analysis. - -Import the required modules and APIs: - -```python -import numpy as np -import mindspore.nn as nn -import mindspore.ops as ops -from mindspore import Tensor -from mindspore import ParameterTuple, Parameter -from mindspore import dtype as mstype -``` - -## First-order Derivative of the Input - -To compute the input derivative, you need to define a network requiring a derivative. The following uses a network $f(x,y)=z *x* y$ formed by the MatMul operator as an example. - -The network structure is as follows: - -```python -class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.matmul = ops.MatMul() - self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z') - - def construct(self, x, y): - x = x * self.z - out = self.matmul(x, y) - return out -``` - -Define the network requiring the derivative. In the `__init__` function, define the `self.net` and `ops.GradOperation` networks. In the `construct` function, compute the derivative of `self.net`. - -The network structure is as follows: - -```python -class GradNetWrtX(nn.Cell): - def __init__(self, net): - super(GradNetWrtX, self).__init__() - self.net = net - self.grad_op = ops.GradOperation() - - def construct(self, x, y): - gradient_function = self.grad_op(self.net) - return gradient_function(x, y) -``` - -Define the input and display the output: - -```python -x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32) -y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32) -output = GradNetWrtX(Net())(x, y) -print(output) -``` - -```text - [[4.5099998 2.7 3.6000001] - [4.5099998 2.7 3.6000001]] -``` - -If the derivatives of the `x` and `y` inputs are considered, you only need to set `self.grad_op = GradOperation(get_all=True)` in `GradNetWrtX`. - -## First-order Derivative of the Weight - -To compute weight derivatives, you need to set `get_by_list` in `ops.GradOperation` to `True`. - -The `GradNetWrtX` structure is as follows: - -```python -class GradNetWrtX(nn.Cell): - def __init__(self, net): - super(GradNetWrtX, self).__init__() - self.net = net - self.params = ParameterTuple(net.trainable_params()) - self.grad_op = ops.GradOperation(get_by_list=True) - - def construct(self, x, y): - gradient_function = self.grad_op(self.net, self.params) - return gradient_function(x, y) -``` - -Run and display the output: - -```python -output = GradNetWrtX(Net())(x, y) -print(output) -``` - -```text -(Tensor(shape=[1], dtype=Float32, value= [ 2.15359993e+01]),) -``` - -If computation of certain weight derivatives is not required, set `requirements_grad` to `False` when defining the network requiring derivatives. - -```Python -self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z', requires_grad=False) -``` - -## Gradient Value Scaling - -You can use the `sens_param` parameter to scale the output value of the network to change the final gradient. Set `sens_param` in `ops.GradOperation` to `True` and determine the scaling index. The dimension must be the same as the output dimension. - -The scaling index `self.grad_wrt_output` may be in the following format: - -```python -self.grad_wrt_output = Tensor([[s1, s2, s3], [s4, s5, s6]]) -``` - -The `GradNetWrtX` structure is as follows: - -```python -class GradNetWrtX(nn.Cell): - def __init__(self, net): - super(GradNetWrtX, self).__init__() - self.net = net - self.grad_op = ops.GradOperation(sens_param=True) - self.grad_wrt_output = Tensor([[0.1, 0.6, 0.2], [0.8, 1.3, 1.1]], dtype=mstype.float32) - - def construct(self, x, y): - gradient_function = self.grad_op(self.net) - return gradient_function(x, y, self.grad_wrt_output) - -output = GradNetWrtX(Net())(x, y) -print(output) -``` - -```text -[[2.211 0.51 1.49 ] - [5.588 2.68 4.07 ]] -``` - -## Stop Gradient - -We can use `stop_gradient` to disable calculation of gradient for certain operators. For example: - -```python -import numpy as np -import mindspore.nn as nn -import mindspore.ops as ops -from mindspore import Tensor -from mindspore import ParameterTuple, Parameter -from mindspore import dtype as mstype -from mindspore.ops import stop_gradient - -class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.matmul = ops.MatMul() - - def construct(self, x, y): - out1 = self.matmul(x, y) - out2 = self.matmul(x, y) - out2 = stop_gradient(out2) - out = out1 + out2 - return out - -class GradNetWrtX(nn.Cell): - def __init__(self, net): - super(GradNetWrtX, self).__init__() - self.net = net - self.grad_op = ops.GradOperation() - - def construct(self, x, y): - gradient_function = self.grad_op(self.net) - return gradient_function(x, y) - -x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32) -y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32) -output = GradNetWrtX(Net())(x, y) -print(output) -``` - -```text - [[4.5, 2.7, 3.6], - [4.5, 2.7, 3.6]] -``` - -Here, we set `stop_gradient` to `out2`, so this operator does not have any contribution to gradient. If we delete `out2 = stop_gradient(out2)`, the result is: - -```text - [[9.0, 5.4, 7.2], - [9.0, 5.4, 7.2]] -``` - -After we do not set `stop_gradient` to `out2`, it will make the same contribution to gradient as `out1`. So we can see that each result has doubled. \ No newline at end of file diff --git a/tutorials/source_en/beginner/autograd.md b/tutorials/source_en/beginner/autograd.md new file mode 100644 index 0000000000000000000000000000000000000000..6fead1c1c05ce181726eb7a3791320bcc8879d0c --- /dev/null +++ b/tutorials/source_en/beginner/autograd.md @@ -0,0 +1,220 @@ +# Automatic Differentiation + + + +Automatic differentiation is able to calculate the derivative value of a derivative function at a certain point, which is a generalization of backpropagation algorithms. The main problem solved by automatic differentiation is to decompose a complex mathematical operation into a series of simple basic operations, which shields the user from a large number of details and processes of differentiation, which greatly reduces the threshold for the use of the framework. + +MindSpore uses `ops.GradOperation` to calculate a first-order derivative, and the attributes of the first-order derivative are as the following: + +- `get_all`:Whether to derive the input parameters, the default value is False. +- `get_by_list`:Whether to derive the weight parameters, the default value is False. +- `sens_param`:Whether to scale the output value of the network to change the final gradient, the default value is False. + +This chapter uses `ops.GradOperation` in MindSpore to find first-order derivatives of the function $f(x)=wx+b$. + +## First-order Derivative of the Input + +The formula needs to be defined before the input can be derived: +$$ +f(x)=wx+b \tag {1} +$$ +The example code below is an expression of Equation (1), and since MindSpore is functionally programmed, all expressions of computational formulas are represented as functions. + +```python +import numpy as np +import mindspore.nn as nn +from mindspore import Parameter, Tensor + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.w = Parameter(np.array([6.0]), name='w') + self.b = Parameter(np.array([1.0]), name='b') + + def construct(self, x): + f = self.w * x + self.b + return f +``` + +Define the derivative class `GradNet`. In the `__init__` function, define the `self.net` and `ops.GradOperation` networks. In the `construct` function, compute the derivative of `self.net`. Its corresponding MindSpore internally produces the following formula (2): +$$ +f^{'}(x)=w\tag {2} +$$ + +```python +from mindspore import dtype as mstype +import mindspore.ops as ops + +class GradNet(nn.Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + self.grad_op = ops.GradOperation() + + def construct(self, x): + gradient_function = self.grad_op(self.net) + return gradient_function(x) +``` + +At last, define the weight parameter as w and a first-order derivative is found for the input parameter x in the input formula (1). From the running result, the input in formula (1) is 6, that is: +$$ +f(x)=wx+b=6*x+1 \tag {3} +$$ + To derive the above equation, there is: +$$ +f^{'}(x)=w=6 \tag {4} +$$ + +```python +x = Tensor([100], dtype=mstype.float32) +output = GradNet(Net())(x) + +print(output) +``` + +```text +[6.] +``` + +MindSpore calculates the first derivative method `ops.GradOperation (get_all=False, get_by_lsit=False, sens_param=False)`, where when `get_all` is `False`, only the first input is evaluated, and when `True` is set, all inputs are evaluated. + +## First-order Derivative of the Weight + +To compute weight derivatives, you need to set `get_by_list` in `ops.GradOperation` to `True`. + +```python +from mindspore import ParameterTuple + +class GradNet(nn.Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + self.params = ParameterTuple(net.trainable_params()) + self.grad_op = ops.GradOperation(get_by_list=True) # Set the first-order derivative of the weight parameters + + def construct(self, x): + gradient_function = self.grad_op(self.net, self.params) + return gradient_function(x) +``` + +Next, derive the function: + +```python +# Perform a derivative calculation on the function +x = Tensor([100], dtype=mstype.float32) +fx = GradNet(Net())(x) + +# Print the results +print(fx) +print(f"wgrad: {fx[0]}\nbgrad: {fx[1]}") +``` + +```text +(Tensor(shape=[1], dtype=Float32, value= [ 6.00000000e+00]), Tensor(shape=[1], dtype=Float32, value= [ 1.00000000e+00])) +wgrad: [6.] +bgrad: [1.] +``` + +If computation of certain weight derivatives is not required, set `requirements_grad` to `False` when defining the network requiring derivatives. + +```Python +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.w = Parameter(Tensor(np.array([6], np.float32)), name='w') + self.b = Parameter(Tensor(np.array([1.0], np.float32)), name='b', requires_grad=False) + + def construct(self, x): + out = x * self.w + self.b + return out + +class GradNet(nn.Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + self.params = ParameterTuple(net.trainable_params()) + self.grad_op = ops.GradOperation(get_by_list=True) + + def construct(self, x): + gradient_function = self.grad_op(self.net, self.params) + return gradient_function(x) + +# Construct a derivative network +x = Tensor([5], dtype=mstype.float32) +fw = GradNet(Net())(x) + +print(fw) +``` + +```text +(Tensor(shape=[1], dtype=Float32, value= [ 5.00000000e+00]),) +``` + +## Gradient Value Scaling + +You can use the `sens_param` parameter to scale the output value of the network to change the final gradient. Set `sens_param` in `ops.GradOperation` to `True` and determine the scaling index. The dimension must be the same as the output dimension. + +```python +class GradNet(nn.Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + # Derivative operation + self.grad_op = ops.GradOperation(sens_param=True) + # Scale index + self.grad_wrt_output = Tensor([0.1], dtype=mstype.float32) + + def construct(self, x): + gradient_function = self.grad_op(self.net) + return gradient_function(x, self.grad_wrt_output) + +x = Tensor([6], dtype=mstype.float32) +output = GradNet(Net())(x) + +print(output) +``` + +```text +[0.6] +``` + +## Stopping Gradient + +We can use `stop_gradient` to disable calculation of gradient for certain operators. For example: + +```python +from mindspore.ops import stop_gradient + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.w = Parameter(Tensor(np.array([6], np.float32)), name='w') + self.b = Parameter(Tensor(np.array([1.0], np.float32)), name='b') + + def construct(self, x): + out = x * self.w + self.b + # Stops updating the gradient, and out does not contribute to gradient calculations + out = stop_gradient(out) + return out + +class GradNet(nn.Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + self.params = ParameterTuple(net.trainable_params()) + self.grad_op = ops.GradOperation(get_by_list=True) + + def construct(self, x): + gradient_function = self.grad_op(self.net, self.params) + return gradient_function(x) + +x = Tensor([100], dtype=mstype.float32) +output = GradNet(Net())(x) + +print(f"wgrad: {output[0]}\nbgrad: {output[1]}") +``` + +```text +wgrad: [0.] +bgrad: [0.] +``` diff --git a/tutorials/source_en/beginner/dataset.md b/tutorials/source_en/beginner/dataset.md new file mode 100644 index 0000000000000000000000000000000000000000..14ddbba23a352a27c7f229e8845f1b66715ce4d6 --- /dev/null +++ b/tutorials/source_en/beginner/dataset.md @@ -0,0 +1,174 @@ +# Data Processing + + + +Data is the foundation of deep learning, and inputting the high-quality data plays an active role in the entire deep neural network. + +[mindspore.dataset](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore.dataset.html) provides a loading interface for some commonly used datasets and standard format datasets, enabling users to quickly perform data processing operations. For the image datasets, users can use `mindvision.dataset` to load and process datasets. This chapter first describes how to load and process a CIFAR-10 dataset by using the `mindvision.dataset.Cifar10` interface, and then describes how to use `mindspore.dataset.GeneratorDataset` to implement custom dataset loading. + +> `mindvision.dataset`is a dataset interface developed on the basis of `mindspore.dataset`. In addition to providing dataset loading capabilities, `mindvision.dataset` further provides dataset download capabilities, data processing, and data enhancement capabilities. + +## Data Process + +In the network training and inference process, the raw data is generally stored in a disk or database. The raw data needs to be read into the memory space through the data loading step, converted into a framework-common tensor format, and then mapped to a more easy-to-learn space through the data processing and augmentation steps. While the number of samples and generalization is increased, the data finally enters the network for calculation. + +The overall process is shown in the following figure: + +![dataset_pipeline](https://gitee.com/mindspore/docs/raw/master/tutorials/source_zh_cn/beginner/images/dataset_pipeline.png) + +### Dataset + +A dataset is a collection of samples, and a row of a dataset is a sample that contains one or more features, and may further contain a label. The dataset needs to meet certain specification requirements to make it easier to evaluate the effectiveness of the model. + +Dataset supports multiple format datasets, including MindRecord, a MindSpore self-developed data format, commonly used public image datasets and text datasets, user-defined datasets, etc. + +### Dataset Loading + +Dataset loading allows the model to be continuously acquired for training during training. Dataset provides corresponding classes for a variety of commonly used datasets to load datasets. For data files in different storage formats, Dataset also has corresponding classes for data loading. + +Dataset provides multiple uses of the sampler (Sampler), and the sampler is responsible for generating the read index sequence. The Dataset is responsible for reading the corresponding data according to the index, helping users to sample the dataset in different forms to meet the training needs, and solving problems such as the data set is too large or the sample class distribution is uneven. + +> It should be noted that the sampler is responsible for performing filter and reorder operations on the sample, not performing the Batch operation. + +### Data processing + +After the Dataset loads the data into the memory, the data is organized in a Tensor form. Tensor is also a basic data structure in data augmentation operations. + +## Loading the Dataset + +In the following example, the CIFAR-10 dataset is loaded through the `mindvision.dataset.Cifar10` interface. The CIFAR-10 dataset has a total of 60,000 32*32 color images, which are divided into 10 categories, each with 6,000 maps, and a total of 50,000 training pictures and 10,000 test pictures in the dataset. `Cifar10` interface provides CIFAR-10 dataset download and load capabilities. + +![cifar10](https://gitee.com/mindspore/docs/raw/master/tutorials/source_zh_cn/beginner/images/cifar10.jpg) + +- `path`: The location of the dataset root directory. +- `split`: Training, testing or inferencing of the dataset, optionally `train`,`test` or `infer`, `train` by default. +- `download`: Whether to download the dataset. When `ture` is set, if the dataset does not exist, you can download and extract the dataset, `False` by default. + +```python +from mindvision.dataset import Cifar10 + +# Dataset root directory +data_dir = "./datasets" + +# Download, extract and load the CIFAR-10 training dataset +dataset = Cifar10(path=data_dir, split='train', batch_size=6, resize=32, download=True) +dataset = dataset.run() +``` + +The directory structures of the CIFAR-10 dataset files are as follows: + +```text +datasets/ +├── cifar-10-batches-py +│ ├── batches.meta +│ ├── data_batch_1 +│ ├── data_batch_2 +│ ├── data_batch_3 +│ ├── data_batch_4 +│ ├── data_batch_5 +│ ├── readme.html +│ └── test_batch +└── cifar-10-python.tar.gz +``` + +## Iterating Dataset + +You can use `create_dict_iterator` interface to create a data iterator to iteratively access data. The data type of the access is `Tensor` by default, and if `output_numpy=True` is set, the data type of the access is `Numpy`. + +The following shows the corresponding access data type, and the image shapes and labels. + +```python +data = next(dataset.create_dict_iterator()) +print(f"Data type:{type(data['image'])}\nImage shape: {data['image'].shape}, Label: {data['label']}") + +data = next(dataset.create_dict_iterator(output_numpy=True)) +print(f"Data type:{type(data['image'])}\nImage shape: {data['image'].shape}, Label: {data['label']}") +``` + +```text +Data type: +Image shape: (6, 3, 32, 32), Label: [7 1 2 8 7 8] +Data type: +Image shape: (6, 3, 32, 32), Label: [8 0 0 2 6 1] +``` + +## Data Processing and Augmentation + +### Data Processing + +`mindvision.dataset.Cifar10` interface provides data processing capbilities. The data can be processed by simply setting the corresponding attributes. + +- `shuffle`: Whether to disrupt the order of the datasets, when `True` is set, the order of the datasets is disturbed, `False` by default . +- `batch_size`: The number of data contained in each group. The `batch_size=2` contains 2 data per group, and the default size of the `batch_size` value is 32. +- `repeat_num`: For the number of duplicate datasets. `repeat_num=1` is a dataset, and the default value of the `repeat_num` is 1. + +```python +import numpy as np +import matplotlib.pyplot as plt + +import mindspore.dataset.vision.c_transforms as transforms + +trans = [transforms.HWC2CHW()] +dataset = Cifar10(data_dir, batch_size=6, resize=32, repeat_num=1, shuffle=True, transform=trans) +data = dataset.run() +data = next(data.create_dict_iterator()) + +images = data["image"].asnumpy() +labels = data["label"].asnumpy() +print(f"Image shape: {images.shape}, Label: {labels}") + +plt.figure() +for i in range(1, 7): + plt.subplot(2, 3, i) + image_trans = np.transpose(images[i-1], (1, 2, 0)) + plt.title(f"{dataset.index2label[labels[i-1]]}") + plt.imshow(image_trans, interpolation="None") +plt.show() +``` + +```text +Image shape: (6, 3, 32, 32), Label: [9 3 8 9 6 8] +``` + +### Data Augmentation + +Problems such as too small amount of data or single sample scene will affect the training effect of the model, and users can expand the diversity of samples through data augmentation operations to improve the generalization ability of the model. The `mindvision.dataset.Cifar10` interface uses the default data augmentation feature, which allows users to perform data augmentation operations by setting attribute `transform` and `target_transform`. + +- `transform`: augment dataset image data. +- `target_transform`: process the dataset label data. + +This section describes data augmentation of the CIFAR-10 dataset by using operators in the `mindspore.dataset.vision .c_transforms` module. + +```python +import numpy as np +import matplotlib.pyplot as plt + +import mindspore.dataset.vision.c_transforms as transforms + +# Image augmentation +trans = [ + transforms.RandomCrop((32, 32), (4, 4, 4, 4)), # Automatic cropping of images + transforms.RandomHorizontalFlip(prob=0.5), # Flip the image randomly and horizontally + transforms.HWC2CHW(), # Convert (h, w, c) to (c, h, w) +] + +dataset = Cifar10(data_dir, batch_size=6, resize=32, transform=trans) +data = dataset.run() +data = next(data.create_dict_iterator()) +images = data["image"].asnumpy() +labels = data["label"].asnumpy() +print(f"Image shape: {images.shape}, Label: {labels}") + +plt.figure() +for i in range(1, 7): + plt.subplot(2, 3, i) + image_trans = np.transpose(images[i-1], (1, 2, 0)) + plt.title(f"{dataset.index2label[labels[i-1]]}") + plt.imshow(image_trans, interpolation="None") +plt.show() +``` + +```text +Image shape: (6, 3, 32, 32), Label: [7 6 7 4 5 3] +``` + diff --git a/tutorials/source_en/beginner/images/introduction2.png b/tutorials/source_en/beginner/images/introduction2.png new file mode 100644 index 0000000000000000000000000000000000000000..afb59aced75a3505f209794731c97973feebbf13 Binary files /dev/null and b/tutorials/source_en/beginner/images/introduction2.png differ diff --git a/tutorials/source_en/beginner/images/introduction4.png b/tutorials/source_en/beginner/images/introduction4.png new file mode 100644 index 0000000000000000000000000000000000000000..0f897ef607fdd55924be9f8ec8ced24b1a6ea73e Binary files /dev/null and b/tutorials/source_en/beginner/images/introduction4.png differ diff --git a/tutorials/source_en/beginner/introduction.ipynb b/tutorials/source_en/beginner/introduction.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4e305ef0660353655f1ec6791cde4dfee87afff6 --- /dev/null +++ b/tutorials/source_en/beginner/introduction.ipynb @@ -0,0 +1,142 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Overview\n", + "\n", + "[![View-Source](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source_en.png)](https://gitee.com/mindspore/docs/blob/master/tutorials/source_en/beginner/introduction.ipynb)\n", + "\n", + "The following describes the Huawei AI full-stack solution and introduces the position of MindSpore in the solution. Developers who are interested in MindSpore can visit the [MindSpore community](https://gitee.com/mindspore/mindspore) and click [Watch, Star, and Fork](https://gitee.com/mindspore/mindspore).\n", + "\n", + "## MindSpore Introduction\n", + "\n", + "MindSpore is a deep learning framework in all scenarios, aiming to achieve easy development, efficient execution, and all-scenario coverage.\n", + "\n", + "Easy development features friendly APIs and easy debugging. Efficient execution is reflected in computing, data preprocessing, and distributed training. All-scenario coverage means that the framework supports cloud, edge, and device scenarios.\n", + "\n", + "The following figure shows the overall MindSpore architecture:\n", + "\n", + "![MindSpore](https://gitee.com/mindspore/docs/raw/master/tutorials/source_en/beginner/images/introduction2.png)\n", + "\n", + "- **ModelZoo**: ModelZoo provides available deep learning algorithm networks, and more developers are welcome to contribute new networks ([ModelZoo](https://gitee.com/mindspore/models)).\n", + "- **Extend**: The expansion package of MindSpore expands the support of new fields, such as GNN/deep probabilistic programming/reinforcement learning, etc. We look forward to more developers to contribute and build together.\n", + "- **Science**:MindScience is a scientific computing kits for various industries based on the converged MindSpore framefork. It contains the industry-leading datasets, basic network structures, high-precision pre-trained models, and pre-and post-processing tools that accelerate application development of the scientific computing ([More Information](https://mindspore.cn/mindscience/docs/en/master/index.html)).\n", + "- **Expression**: Python-based frontend expression and programming interfaces. In the future, more frontends based on C/C++ will be provided. Cangjie, Huawei's self-developed programming language frontend, is now in the pre-research phase. In addition, Huawei is working on interconnection with third-party frontends to introduce more third-party ecosystems.\n", + "- **Data**: Providing functions such as efficient data processing, common data sets loading and programming interfaces, and supporting users to flexibly define processing registration and pipeline parallel optimization.\n", + "- **Compiler**: The core compiler of the layer, which implements three major functions based on the unified device-cloud MindIR, including hardware-independent optimization (type derivation, automatic differentiation, and expression simplification), hardware-related optimization (automatic parallelism, memory optimization, graph kernel fusion, and pipeline execution) and optimization related to deployment and inference (quantification and pruning).\n", + "- **Runtime**: MindSpore runtime system, which covers the cloud-side host-side runtime system, the device-side and the lightweight runtime system of the smaller IoT.\n", + "- **Insight**: Provides MindSpore's visual debugging and tuning tools, and supports users to debug and tune the training network ([More Information](https://mindspore.cn/mindinsight/docs/en/master/index.html)).\n", + "- **Armour**: For enterprise-level applications, security and privacy protection related enhancements, such as anti-robustness, model security testing, differential privacy training, privacy leakage risk assessment, data drift detection, etc. technology ([More Information](https://mindspore.cn/mindarmour/docs/en/master/index.html)).\n", + "\n", + "### Execution Process\n", + "\n", + "With an understanding of the overall architecture of Ascend MindSpore, we can look at the overall coordination relationship between the various modules, as shown in the figure:\n", + "\n", + "![MindSpore](https://gitee.com/mindspore/docs/raw/master/tutorials/source_en/beginner/images/introduction4.png)\n", + "\n", + "As a full-scenario AI framework, MindSpore supports different series of hardware for end (mobile phone and IOT device), edge (base station and routing device), and cloud (server) scenarios, including Ascend series products, NVIDIA series products, Arm series Qualcomm Snapdragon, Huawei Kirin chips and other series of products.\n", + "\n", + "The blue box on the left is the MindSpore main framework, which mainly provides the basic API functions related to the training and verification of neural networks, and also provides automatic differentiation and automatic parallelism by default.\n", + "\n", + "Below the blue box is the MindSpore Data module, which can be used for data preprocessing, including data sampling, data iteration, data format conversion and other different data operations. In the process of training, many debugging and tuning problems will be encountered, so the MindSpore Insight module visualizes the data related to debugging and tuning such as loss curves, operator execution, and weight parameter variables, so as to facilitate users to debug and optimize during training.\n", + "\n", + "The simplest way to be AI security is from the perspective of attack and defense, for example, the attacker introduces malicious data during the training stage, affecting the inference ability of the AI model, so MindSpore launched the MindSpore Armour module to provide AN security mechanisms for MindSpore.\n", + "\n", + "The content above the blue box is closer to the users related to algorithm development, including storing a large number of AI algorithm model libraries ModelZoo, providing a development tool suite for different fields MindSpore DevKit, and a high-level expansion library MindSpore Extend, which is worth mentioning the scientific computing suite MindSciences in MindSpore Extend. For the first time, MindSpore explores the combination of scientific computing and deep learning, the combination of numerical computing and deep learning, and the support of electromagnetic simulation, drug molecule simulation and so on through deep learning.\n", + "\n", + "After the neural network model is trained, you can export the model or load the model that has been trained in MindSpore Hub. Then MindIR provides a unified IR format for the end cloud, which defines the logical structure of the network and the properties of the operators through the unified IR, and decouples the model file in the MindIR format with the hardware platform to achieve multiple deployments at one time. Therefore, as shown in the figure, the model is exported to different modules through IR to perform inference.\n", + "\n", + "### Design Philosophy\n", + "\n", + "- Support full-scenario collaboration\n", + "\n", + " MindSpore is an industry-wide best practice. It provides data scientists and algorithm engineers with a unified interface for model training, inference, and export. It supports flexible deployment in different scenarios such as end, edge, and cloud, and promotes the prosperity and development in deep learning, scientific computing and other fields.\n", + "\n", + "- Provide Python programming normal form to simplify AI programming\n", + "\n", + " MindSpore provides a Python programming normal form. Users can build complex neural network models using Python's native control logic, making AI programming easy.\n", + "\n", + "- Provide a unified coding method for PyNative graphs and static graphs\n", + "\n", + " At present, there are two execution modes of mainstream deep learning frameworks, they are Graph mode and PyNative mode. Graph mode has high training performance but is difficult to debug. Although the Pynative mode is easier to debug than the static graph mode, it is difficult to execute efficiently. MindSpore provides a unified coding method for PyNative graphs and static graphs, which greatly increases their compatibility. Users do not need to develop multiple sets of codes, and can switch the mode only by changing one line of code. For example, users can set `context.set_context(mode=context.PYNATIVE_MODE)` to switch to the PyNative mode and set `context.set_context(mode=context.GRAPH_MODE)` to switch to the Graph mode, users can have an easier development, debugging and performance experience.\n", + "\n", + "- Use functional differentiable programming architecture and allow users to focus on the mathematical native expression of model algorithms\n", + "\n", + " Neural network models are usually trained based on gradient descent algorithms, but the manual derivation process is complicated and the results are prone to errors. The Automatic Differentiation mechanism based on Source Code Transformation of MindSpore adopts a functional differentiable programming architecture, and provides a Python programming interface at the interface layer, including the expression of control flow. Users can focus on the mathematically native expression of the model algorithm without manual derivation.\n", + "\n", + "- Unify the coding method of single device and distributed training\n", + "\n", + " With the increasing scale of neural network models and datasets, distributed parallel training has become a common practice in neural network training. However, the strategy selection and writing of distributed parallel training are very complicated, which seriously restricts the training efficiency of deep learning models. MindSpore unifies the coding methods of single device and distributed training. Developers do not need to write complex distributed strategies. They can implement distributed training by adding a small amount of code to the single device code. For example, setting `context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)` can automatically establish a cost model, select an optimal parallel mode for users, improve the efficiency of neural network training, greatly reduce the threshold of AI development, and enable users to quickly implement model ideas.\n", + "\n", + "### API Level Structure\n", + "\n", + "To support network building, entire graph execution, subgraph execution, and single-operator execution, MindSpore provides users with three levels of APIs. In ascending order, these are Low-Level Python API, Medium-Level Python API, and High-Level Python API.\n", + "\n", + "![MindSpore API](https://gitee.com/mindspore/docs/raw/master/tutorials/source_zh_cn/beginner/images/introduction3.png)\n", + "\n", + "- High-Level Python API\n", + "\n", + " High-level APIs are at the first layer. Based on the medium-level API, these advanced APIs include training and inference management, mixed precision training, and debugging and optimization, enabling users to control the execution process of the entire network and implement training, inference, and optimization of the neural network. For example, by utilizing the Model API, users can specify the neural network model to be trained as well as related training settings, train the neural network model, and debug the neural network performance through the Profiler API.\n", + "\n", + "- Medium-Level Python API\n", + "\n", + " Medium-level APIs are at the second layer, which encapsulates low-cost APIs and provides such modules as the network layer, optimizer, and loss function. Users can flexibly build neural networks and control execution processes through the medium-level API to quickly implement model algorithm logic. For example, users can call the Cell API to build neural network models and computing logic, add the loss function and optimization methods to the neural network model by using the loss module and Optimizer API, and use the dataset module to process data for model training and derivation.\n", + "\n", + "- Low-Level Python API\n", + "\n", + " Low-level APIs are at the third layer, including tensor definition, basic operators, and automatic differential modules, enabling users to easily define tensors and perform derivative computation. For example, users can customize tensors by using the Tensor API, and use the GradOperation operator in the ops.composite module to calculate the derivative of the function at a specified position.\n", + "\n", + "## Introduction to Huawei Ascend AI Full-Stack Solution\n", + "\n", + "Ascend computing is a full-stack AI computing infrastructure and application based on the Ascend series processors. It includes the Ascend series chips, Atlas series hardware, CANN chip enablement, MindSpore AI framework, ModelArts, and MindX application enablement.\n", + "\n", + "Huawei Atlas AI computing solution is based on Ascend series AI processors and uses various product forms such as modules, cards, edge stations, servers, and clusters to build an all-scenario AI infrastructure solution oriented to device, edge, and cloud. It covers data center and intelligent edge solutions, as well as the entire inference and training processes in the deep learning field.\n", + "\n", + "- **Atlas series**: provides AI training, inference cards, and training servers ([learn more](https://e.huawei.com/en/products/cloud-computing-dc/atlas/)).\n", + "- **CANN at heterogeneous computing architecture**: a driver layer that enables chips ([learn more](https://www.hiascend.com/en/software/cann)).\n", + "- **MindSpore**: all-scenario AI framework ([learn more](https://www.mindspore.cn/en)).\n", + "- **MindX SDK**: Ascend SDK that provides application solutions ([learn more](https://www.hiascend.com/en/software/mindx-sdk)).\n", + "- **ModelArts**: HUAWEI CLOUD AI development platform ([learn more](https://www.huaweicloud.com/product/modelarts.html)).\n", + "- **MindStudio**: E2E development toolchain that provides one-stop IDE for AI development ([learn more](https://www.hiascend.com/en/software/mindstudio)).\n", + "\n", + "For details, click [Huawei Ascend official website](https://e.huawei.com/en/products/servers/ascend).\n", + "\n", + "## Joining the Community\n", + "\n", + "Welcome every developer to the MindSpore community and contribute to this all-scenario AI framework.\n", + "\n", + "- **MindSpore official website**: provides comprehensive MindSpore information, including installation, tutorials, documents, community, resources, and news ([learn more](https://www.mindspore.cn/en)).\n", + "- **MindSpore code**:\n", + "\n", + " - [MindSpore Gitee](https://gitee.com/mindspore/mindspore): Top 1 Gitee open-source project in 2020, where you can track the latest progress of MindSpore by clicking Watch, Star, and Fork, discuss issues, and commit code.\n", + "\n", + " - [MindSpore Github](https://github.com/mindspore-ai/mindspore): MindSpore code image of Gitee. Developers who are accustomed to using GitHub can learn MindSpore and view the latest code implementation here.\n", + "\n", + "- **MindSpore forum**: We are dedicated to serving every developer. You can find your voice in MindSpore, regardless of whether you are an entry-level developer or a master. Let's learn and grow together. ([Learn more](https://bbs.huaweicloud.com/forum/forum-1076-1.html))" + ], + "metadata": {} + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/tutorials/source_en/model.md b/tutorials/source_en/beginner/model.md similarity index 30% rename from tutorials/source_en/model.md rename to tutorials/source_en/beginner/model.md index de98dd5fa5c58373fe0d39dd655c97987d58ebd2..a91ebc527d289b20f4b27b34cdaf377c44287134 100644 --- a/tutorials/source_en/model.md +++ b/tutorials/source_en/beginner/model.md @@ -1,41 +1,51 @@ # Building a Neural Network -`Ascend` `GPU` `CPU` `Beginner` `Model Development` + - +A neural network model consists of multiple data operation layers. `mindspore.nn` provides various basic network modules. The following uses LeNet-5 as an example to first describe how to build a neural network model by using `mindspore.nn` , and then describes how to build a LeNet-5 network model by using `mindvision.classification.models`. -A neural network model consists of multiple data operation layers. `mindspore.nn` provides various basic network modules. +> `mindvision.classification.models` is a network model interface developed based on `mindspore.nn`, providing some classic and commonly used network models for the convenience of users. -The following uses LeNet as an example to describe how MindSpore builds a neural network model. +## LeNet-5 model -Import the required modules and APIs: +[LeNet-5](https://ieeexplore.ieee.org/document/726791) is a typical convolutional neural network proposed by Professor Yann LeCun in 1998, which achieves 99.4% accuracy on the MNIST dataset and is the first classic in the field of CNN. The model structure is shown in the following figure: -```python -import numpy as np -import mindspore -import mindspore.nn as nn -from mindspore import Tensor -``` +![LeNet-5](https://gitee.com/mindspore/docs/raw/master/tutorials/source_zh_cn/beginner/images/lenet.png) + +According to the network structure of LeNet, there are 7 layers of LeNet removal input layer, including 3 convolutional layers, 2 sub-sampling layers, and 3 fully-connected layers. ## Defining a Model Class +In the above figure, C is used to represent the convolutional layer, S to represent the sampling layer, and F to represent the fully-connected layer. + +The input size of the picture is fixed at 32∗32. In order to get a good convolution effect, the number is required in the center of the picture, so the size at 32∗32 is actually the result of the picture at 28∗28 after filled. In addition, unlike the input picture of the three channels of the CNN network, the input of the LeNet picture is only a normalized binary image. The output of the network is a prediction probability of ten digits 0\~9, which can be understood as the probability that the input image belongs to 0\~9 digits. + The `Cell` class of MindSpore is the base class for building all networks and the basic unit of a network. When a neural network is required, you need to inherit the `Cell` class and overwrite the `__init__` and `construct` methods. ```python +import mindspore.nn as nn + class LeNet5(nn.Cell): """ Lenet network structure """ def __init__(self, num_class=10, num_channel=1): super(LeNet5, self).__init__() - # Define the required operation. + # Convolutional layer, the number of input channels is num_channel, the number of output channels is 6, and the convolutional kernel size is 5*5 self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') + # Convolutional layer, the number of input channels is 6, the number of output channels is 16, and the convolutional kernel size is 5 * 5 self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') + # Fully connected layer, the number of inputs is 16*5*5, and the number of outputs is 120 self.fc1 = nn.Dense(16 * 5 * 5, 120) + # Fully-connected layer, the number of inputs is 120, and the number of outputs is 84 self.fc2 = nn.Dense(120, 84) + # Fully connected layer, the number of inputs is 84, and the number of classifications is num_class self.fc3 = nn.Dense(84, num_class) + # ReLU Activation function self.relu = nn.ReLU() + # Pooling layer self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + # Multidimensional arrays are flattened into one-dimensional arrays self.flatten = nn.Flatten() def construct(self, x): @@ -55,23 +65,50 @@ class LeNet5(nn.Cell): return x ``` +Next, build the neural network model defined above and look at the structure of the network model. + +```python +model = LeNet5() + +print(model) +``` + +```text +LeNet5< + (conv1): Conv2d + (conv2): Conv2d + (fc1): Dense + (fc2): Dense + (fc3): Dense + (relu): ReLU<> + (max_pool2d): MaxPool2d + (flatten): Flatten<> + > +``` + ## Model Layers -The following describes the key member functions of the `Cell` class used in LeNet, and then describes how to use the `Cell` class to access model parameters through the instantiation network. +The following describes the key member functions of the `Cell` class used in LeNet, and then describes how to use the `Cell` class to access model parameters through the instantiation network. For more `cell` class contents, refer to [mindspore.nn interface](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore.nn.html). ### nn.Conv2d Add the `nn.Conv2d` layer and add a convolution function to the network to help the neural network extract features. ```python -conv2d = nn.Conv2d(1, 6, 5, has_bias=False, weight_init='normal', pad_mode='valid') -input_x = Tensor(np.ones([1, 1, 32, 32]), mindspore.float32) +import numpy as np + +from mindspore import Tensor +from mindspore import dtype as mstype + +# The number of channels input is 1, the number of channels of output is 6, the convolutional kernel size is 5*5, and the parameters are initialized using the norm operator, and the pixels are not filled +conv2d = nn.Conv2d(1, 6, 5, has_bias=False, weight_init='normal', pad_mode='same') +input_x = Tensor(np.ones([1, 1, 32, 32]), mstype.float32) print(conv2d(input_x).shape) ``` ```text - (1, 6, 28, 28) +(8, 6, 32, 32) ``` ### nn.ReLU @@ -80,9 +117,10 @@ Add the `nn.ReLU` layer and add a non-linear activation function to the network ```python relu = nn.ReLU() -input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16) -output = relu(input_x) +input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mstype.float16) + +output = relu(input_x) print(output) ``` @@ -95,19 +133,19 @@ print(output) Initialize the `nn.MaxPool2d` layer and down-sample the 6 x 28 x 28 array to a 6 x 14 x 14 array. ```python -max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) -input_x = Tensor(np.ones([1, 6, 28, 28]), mindspore.float32) +max_pool2d = nn.MaxPool2d(kernel_size=4, stride=4) +input_x = Tensor(np.ones([1, 6, 28, 28]), mstype.float32) print(max_pool2d(input_x).shape) ``` ```text - (1, 6, 14, 14) + (1, 6, 7, 7) ``` ### nn.Flatten -Initialize the `nn.Flatten` layer and convert the 16 x 5 x 5 array into 400 consecutive arrays. +Initialize the `nn.Flatten` layer and convert the 1x16 x 5 x 5 array into 400 consecutive arrays. ```python flatten = nn.Flatten() @@ -127,7 +165,7 @@ Initialize the `nn.Dense` layer and perform linear transformation on the input m ```python dense = nn.Dense(400, 120, weight_init='normal') -input_x = Tensor(np.ones([1, 400]), mindspore.float32) +input_x = Tensor(np.ones([1, 400]), mstype.float32) output = dense(input_x) print(output.shape) @@ -139,23 +177,46 @@ print(output.shape) ## Model Parameters -The convolutional layer and fully-connected layer in the network will have weights and offsets after being instantiated, and these weight and offset parameters are optimized in subsequent training. In `nn.Cell`, the `parameters_and_names()` method is used to access all parameters. +The convolutional layer and fully-connected layer in the network will have weights and offsets after being instantiated, which has a weight parameter and a bias parameter, and these parameters are optimized in subsequent training. During training, you can use `get_parameters()` to view information such as the name, shape, data type, and whether the network layers are inversely calculated. + +```python +for m in model.get_parameters(): + print(f"layer:{m.name}, shape:{m.shape}, dtype:{m.dtype}, requeires_grad:{m.requires_grad}") +``` + +```text +layer:backbone.conv1.weight, shape:(6, 1, 5, 5), dtype:Float32, requeires_grad:True +layer:backbone.conv2.weight, shape:(16, 6, 5, 5), dtype:Float32, requeires_grad:True +layer:backbone.fc1.weight, shape:(120, 400), dtype:Float32, requeires_grad:True +layer:backbone.fc1.bias, shape:(120,), dtype:Float32, requeires_grad:True +layer:backbone.fc2.weight, shape:(84, 120), dtype:Float32, requeires_grad:True +layer:backbone.fc2.bias, shape:(84,), dtype:Float32, requeires_grad:True +layer:backbone.fc3.weight, shape:(10, 84), dtype:Float32, requeires_grad:True +layer:backbone.fc3.bias, shape:(10,), dtype:Float32, requeires_grad:True +``` + +## Quickly Build a LeNet-5 Network Model -In the example, we traverse each parameter and display the name and attribute of each layer in the network. +The above describes the use of `mindspore.nn.cell` to build a LeNet-5 network model. The network model interface has been built in `mindvision.classification.models`, and the LeNet-5 network model can be directly built using the `lenet` interface. ```python -model = LeNet5() -for m in model.parameters_and_names(): - print(m) +from mindvision.classification.models import lenet + +# num_classes represents the category of the classification, and pretrained indicates whether to train with the trained model +model = lenet(num_classes=10, pretrained=False) + +for m in model.get_parameters(): + print(f"layer:{m.name}, shape:{m.shape}, dtype:{m.dtype}, requeires_grad:{m.requires_grad}") ``` ```text -('conv1.weight', Parameter (name=conv1.weight, shape=(6, 1, 5, 5), dtype=Float32, requires_grad=True)), -('conv2.weight', Parameter (name=conv2.weight, shape=(16, 6, 5, 5), dtype=Float32, requires_grad=True)), -('fc1.weight', Parameter (name=fc1.weight, shape=(120, 400), dtype=Float32, requires_grad=True)), -('fc1.bias', Parameter (name=fc1.bias, shape=(120,), dtype=Float32, requires_grad=True)), -('fc2.weight', Parameter (name=fc2.weight, shape=(84, 120), dtype=Float32, requires_grad=True)), -('fc2.bias', Parameter (name=fc2.bias, shape=(84,), dtype=Float32, requires_grad=True)), -('fc3.weight', Parameter (name=fc3.weight, shape=(10, 84), dtype=Float32, requires_grad=True)), -('fc3.bias', Parameter (name=fc3.bias, shape=(10,), dtype=Float32, requires_grad=True)) +layer:backbone.conv1.weight, shape:(6, 1, 5, 5), dtype:Float32, requeires_grad:True +layer:backbone.conv2.weight, shape:(16, 6, 5, 5), dtype:Float32, requeires_grad:True +layer:backbone.fc1.weight, shape:(120, 400), dtype:Float32, requeires_grad:True +layer:backbone.fc1.bias, shape:(120,), dtype:Float32, requeires_grad:True +layer:backbone.fc2.weight, shape:(84, 120), dtype:Float32, requeires_grad:True +layer:backbone.fc2.bias, shape:(84,), dtype:Float32, requeires_grad:True +layer:backbone.fc3.weight, shape:(10, 84), dtype:Float32, requeires_grad:True +layer:backbone.fc3.bias, shape:(10,), dtype:Float32, requeires_grad:True ``` + diff --git a/tutorials/source_en/quick_start.md b/tutorials/source_en/beginner/quick_start.md similarity index 98% rename from tutorials/source_en/quick_start.md rename to tutorials/source_en/beginner/quick_start.md index e9d2af5941f3b8dbadc7bd5c56a68de34ff1bd98..19a5312c14898df32c42b2090a354e8f06ea4776 100644 --- a/tutorials/source_en/quick_start.md +++ b/tutorials/source_en/beginner/quick_start.md @@ -2,7 +2,7 @@ `Ascend` `GPU` `CPU` `Beginner` `Whole Process` - + The following describes the basic functions of MindSpore to implement common tasks in deep learning. For details, see links in each section. diff --git a/tutorials/source_en/save_load_model.md b/tutorials/source_en/beginner/save_load.md similarity index 30% rename from tutorials/source_en/save_load_model.md rename to tutorials/source_en/beginner/save_load.md index bf72874642a3d70dc718c6f5a65633d695814a52..e4d3df1663173496b3d91bd07cf0c8c242e4d182 100644 --- a/tutorials/source_en/save_load_model.md +++ b/tutorials/source_en/beginner/save_load.md @@ -1,14 +1,68 @@ # Saving and Loading the Model -`Ascend` `GPU` `CPU` `Beginner` `Model Export` `Model Loading` + - +The content of the previous chapter mainly introduced how to adjust hyperparameters and train network models. During the process of training the network model, we actually want to save the intermediate and final results for fine-tune and subsequent model deployment and inference, and now start learning how to set hyperparameters and optimize model parameters. -In the previous tutorial, you learn how to train the network. In this tutorial, you will learn how to save and load a model, and how to export a saved model in a specified format to different platforms for inference. +## Model Training + +The following are the basic steps and code for network model training, with sample code as follows: + +```python +import mindspore.nn as nn +from mindspore.train import Model + +from mindvision.classification.dataset import Mnist +from mindvision.classification.models import lenet +from mindvision.engine.callback import LossMonitor + +epochs = 10 # Training batch + +# 1. Build a dataset +download_train = Mnist(path="./mnist", split="train", batch_size=batch_size, repeat_num=1, shuffle=True, resize=32, download=True) +dataset_train = download_train.run() + +# 2. Define a neural network +network = lenet(num_classes=10, pretrained=False) +# 3.1 Define a loss function +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') +# 3.2 Defines an optimizer function +net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.01, momentum=0.9) +# 3.3 Initialize model parameters +model = Model(network, loss_fn=net_loss, optimizer=net_opt, metrics={'accuracy'}) + +# 4. Perform training on the neural network +model.train(epochs, dataset_train, callbacks=[LossMonitor(0.01, 1875)]) +``` + +```text +Epoch:[ 0/ 10], step:[ 1875/ 1875], loss:[0.148/1.210], time:2.021 ms, lr:0.01000 +Epoch time: 4251.808 ms, per step time: 2.268 ms, avg loss: 1.210 +Epoch:[ 1/ 10], step:[ 1875/ 1875], loss:[0.049/0.081], time:2.048 ms, lr:0.01000 +Epoch time: 4301.405 ms, per step time: 2.294 ms, avg loss: 0.081 +Epoch:[ 2/ 10], step:[ 1875/ 1875], loss:[0.014/0.050], time:1.992 ms, lr:0.01000 +Epoch time: 4278.799 ms, per step time: 2.282 ms, avg loss: 0.050 +Epoch:[ 3/ 10], step:[ 1875/ 1875], loss:[0.035/0.038], time:2.254 ms, lr:0.01000 +Epoch time: 4380.553 ms, per step time: 2.336 ms, avg loss: 0.038 +Epoch:[ 4/ 10], step:[ 1875/ 1875], loss:[0.130/0.031], time:1.932 ms, lr:0.01000 +Epoch time: 4287.547 ms, per step time: 2.287 ms, avg loss: 0.031 +Epoch:[ 5/ 10], step:[ 1875/ 1875], loss:[0.003/0.027], time:1.981 ms, lr:0.01000 +Epoch time: 4377.000 ms, per step time: 2.334 ms, avg loss: 0.027 +Epoch:[ 6/ 10], step:[ 1875/ 1875], loss:[0.004/0.023], time:2.167 ms, lr:0.01000 +Epoch time: 4687.250 ms, per step time: 2.500 ms, avg loss: 0.023 +Epoch:[ 7/ 10], step:[ 1875/ 1875], loss:[0.004/0.020], time:2.226 ms, lr:0.01000 +Epoch time: 4685.529 ms, per step time: 2.499 ms, avg loss: 0.020 +Epoch:[ 8/ 10], step:[ 1875/ 1875], loss:[0.000/0.016], time:2.275 ms, lr:0.01000 +Epoch time: 4651.129 ms, per step time: 2.481 ms, avg loss: 0.016 +Epoch:[ 9/ 10], step:[ 1875/ 1875], loss:[0.022/0.015], time:2.177 ms, lr:0.01000 +Epoch time: 4623.760 ms, per step time: 2.466 ms, avg loss: 0.015 +``` + +As you can see from the printed results above, the loss values tend to converge as the number of training rounds increases. ## Saving the Model -There are two main ways to save the interface of the model: +After training the network, the following will describe how to save and load the model. There are two main ways to save the interface of the model: 1. One is to simply save the network model, which can be saved before and after training. The advantage is that the interface is simple and easy to use, but only the state of the network model when the command is executed is retained; @@ -16,7 +70,7 @@ There are two main ways to save the interface of the model: ### Saving the Model Directly -Use the save_checkpoint provided by MindSpore to save the model, pass it to the network and save the path: +Use the save_checkpoint provided by MindSpore to save the model, and pass it to the network and save the path: ```python import mindspore as ms @@ -25,29 +79,26 @@ import mindspore as ms ms.save_checkpoint(net, "./MyNet.ckpt") ``` -Here, `net` is the training network, and the definition method can be referred to [Building a Neural Network](https://www.mindspore.cn/tutorials/en/master/model.html). +Here, `network` is the training network, and `"./MyNet.ckpt"` is the saving path of the network model. ### Saving the Model During Training -In the process of model training, use the `callbacks` parameter in `model.train` to pass in the object `ModelCheckpoint` that saves the model, which can save the model parameters and generate CheckPoint (abbreviated as ckpt) files. - -```python -from mindspore.train.callback import ModelCheckpoint - -ckpt_cb = ModelCheckpoint() -model.train(epoch_num, dataset, callbacks=ckpt_cb) -``` - -Here, `epoch_num` is the number of times that the dataset is traversed during training. The definition method can be referred to [Training the Model](https://www.mindspore.cn/tutorials/en/master/optimization.html). `dataset` is the dataset to be loaded. The definition method can be referred to [Loading and Processing Data](https://www.mindspore.cn/tutorials/en/master/dataset.html). +In the process of model training, use the `callbacks` parameter in `model.train` to pass in the object [ModelCheckpoint](https://mindspore.cn/docs/api/en/master/api_python/mindspore.train.html#mindspore.train.callback.ModelCheckpoint) that saves the model (Generally used with [CheckpointConfig](https://mindspore.cn/docs/api/en/master/api_python/mindspore.train.html#mindspore.train.callback.CheckpointConfig)), which can save the model parameters and generate CheckPoint (abbreviated as ckpt) files. You can configure the checkpoint policies as required. The following describes the usage: ```python from mindspore.train.callback import ModelCheckpoint, CheckpointConfig -config_ck = CheckpointConfig(save_checkpoint_steps=32, keep_checkpoint_max=10) -ckpt_cb = ModelCheckpoint(prefix='resnet50', directory=None, config=config_ck) -model.train(epoch_num, dataset, callbacks=ckpt_cb) +# Set the number of epoch_num +epoch_num = 5 + +# Set the model saving parameters +config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10) + +# Apply the model saving parameters +ckpoint = ModelCheckpoint(prefix="lenet", directory="./lenet", config=config_ck) +model.train(epoch_num, dataset_train, callbacks=ckpoint) ``` In the preceding code, you need to initialize a `CheckpointConfig` class object to set the saving policy. @@ -62,16 +113,16 @@ Create a `ModelCheckpoint` object and pass it to the `model.train` method. Then The generated checkpoint file is as follows: ```text -resnet50-graph.meta # Computational graph after build. -resnet50-1_32.ckpt # The extension of the checkpoint file is .ckpt. -resnet50-2_32.ckpt # The file name format contains the epoch and step correspond to the saved parameters. -resnet50-3_32.ckpt # The file name indicates that the model parameters generated during the 32nd step of the third epoch are saved. +lenet-graph.meta # Computational graph after compiled. +lenet-1_1875.ckpt # The extension of the checkpoint file is .ckpt. +lenet-2_1875.ckpt # The file name format contains the epoch and step correspond to the saved parameters. Here are the model parameters for the 1875th step of the 2nd epoch. +lenet-3_1875.ckpt # indicates the model parameters saved for the 1875th step of the 3rd epoch. ... ``` -If you use the same prefix and run the training script for multiple times, checkpoint files with the same name may be generated. To help users distinguish files generated each time, MindSpore adds underscores (_) and digits to the end of the user-defined prefix. If you want to delete the `.ckpt` file, delete the `.meta` file at the same time. +If you use the same prefix and run the training script for multiple times, checkpoint files with the same name may be generated. To help users distinguish files generated each time, MindSpore adds underscores "_" and digits to the end of the user-defined prefix. If you want to delete the `.ckpt` file, delete the `.meta` file at the same time. -For example, `resnet50_3-2_32.ckpt` indicates the checkpoint file generated during the 32nd step of the second epoch after the script is executed for the third time. +For example, `lenet_3-2_1875.ckpt` indicates the CheckPoint file of the 1875th step of the 2nd epoch generated by running the 3rd script. ## Loading the Model @@ -82,12 +133,18 @@ The sample code is as follows: ```python from mindspore import load_checkpoint, load_param_into_net -resnet = ResNet50() -# Store model parameters in the parameter dictionary. -param_dict = load_checkpoint("resnet50-2_32.ckpt") -# Load parameters to the network. -load_param_into_net(resnet, param_dict) -model = Model(resnet, loss, metrics={"accuracy"}) +from mindvision.classification.dataset import Mnist +from mindvision.classification.models import lenet + +# Store the model parameters in the parameter dictionary, where the model parameters saved during the training process above are loaded +param_dict = load_checkpoint("./lenet/lenet-1_1875.ckpt") + +# Redefine a LeNet neural network +net = lenet(num_classes=10, pretrained=False) + +# Load parameters to the network +load_param_into_net(network, param_dict) +model = Model(network, loss_fn=net_loss, optimizer=net_opt, metrics={"accuracy"}) ``` - The `load_checkpoint` method loads the network parameters in the parameter file to the `param_dict` dictionary. @@ -95,80 +152,45 @@ model = Model(resnet, loss, metrics={"accuracy"}) ### Validating the Model -In the inference-only scenario, parameters are directly loaded to the network for subsequent inference and validation. The sample code is as follows: +After the above module loads the parameters into the network, for the inference scenario, you can call the `eval` function for inference verification. The sample code is as follows: ```python -# Define a validation dataset. -dataset_eval = create_dataset(os.path.join(mnist_path, "test"), 32, 1) - # Call eval() for inference. +download_eval = Mnist(path="./mnist", split="test", batch_size=32, resize=32, download=True) +dataset_eval = download_eval.run() acc = model.eval(dataset_eval) + +print("{}".format(acc)) +``` + +```text +{'accuracy': 0.9857772435897436} ``` ### For Transfer Learning -You can load network parameters and optimizer parameters to the model in the case of task interruption, retraining, and fine-tuning. The sample code is as follows: +For task interruption retraining and fine-tuning scenarios, you can call the `train` function for transfer learning. The sample code is as follows: ```python -# Set the number of training epochs. -epoch = 1 # Define a training dataset. -dataset = create_dataset(os.path.join(mnist_path, "train"), 32, 1) -# Call train() for training. -model.train(epoch, dataset) -``` - -## Exporting the Model - -During model training, you can add checkpoints to save model parameters for inference and retraining. If you want to perform inference on different hardware platforms, you can generate MindIR, AIR, or ONNX files based on the network and checkpoint files. - -The following describes how to save a checkpoint file and export a MindIR, AIR, or ONNX file. +download_train = Mnist(path="./mnist", split="train", batch_size=32, repeat_num=1, shuffle=True, resize=32, download=True) +dataset_train = download_train.run() -> MindSpore is an all-scenario AI framework that uses MindSpore IR to unify intermediate representation of network models. Therefore, you are advised to export files in MindIR format. - -### Exporting a MindIR File - -If you want to perform inference across platforms or hardware (such as the Ascend AI Processors, MindSpore devices, or GPUs) after obtaining a checkpoint file, you can define the network and checkpoint to generate a model file in MINDIR format. Currently, the inference network export based on static graphs is supported and does not contain control flow semantics. An example of the code for exporting the file is as follows: - -```python -from mindspore import export, load_checkpoint, load_param_into_net -from mindspore import Tensor -import numpy as np - -resnet = ResNet50() -# Store model parameters in the parameter dictionary. -param_dict = load_checkpoint("resnet50-2_32.ckpt") - -# Load parameters to the network. -load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name='resnet50-2_32', file_format='MINDIR') +# Network model calls train() for training. +model.train(epoch_num, dataset_train, callbacks=[LossMonitor(0.01, 1875)]) ``` -> - `input` specifies the input shape and data type of the exported model. If the network has multiple inputs, you need to pass them to the `export` method. Example: `export(network, Tensor(input1), Tensor(input2), file_name='network', file_format='MINDIR')` -> - If `file_name` does not contain the ".mindir" suffix, the system will automatically add the ".mindir" suffix to it. - -### Exporting in Other Formats - -#### Exporting an AIR File - -If you want to perform inference on the Ascend AI Processor after obtaining a checkpoint file, use the network and checkpoint to generate a model file in AIR format. An example of the code for exporting the file is as follows: - -```python -export(resnet, Tensor(input), file_name='resnet50-2_32', file_format='AIR') +```text +Epoch:[ 0/ 5], step:[ 1875/ 1875], loss:[0.000/0.010], time:2.193 ms, lr:0.01000 +Epoch time: 4106.620 ms, per step time: 2.190 ms, avg loss: 0.010 +Epoch:[ 1/ 5], step:[ 1875/ 1875], loss:[0.000/0.009], time:2.036 ms, lr:0.01000 +Epoch time: 4233.697 ms, per step time: 2.258 ms, avg loss: 0.009 +Epoch:[ 2/ 5], step:[ 1875/ 1875], loss:[0.000/0.010], time:2.045 ms, lr:0.01000 +Epoch time: 4246.248 ms, per step time: 2.265 ms, avg loss: 0.010 +Epoch:[ 3/ 5], step:[ 1875/ 1875], loss:[0.000/0.008], time:2.001 ms, lr:0.01000 +Epoch time: 4235.036 ms, per step time: 2.259 ms, avg loss: 0.008 +Epoch:[ 4/ 5], step:[ 1875/ 1875], loss:[0.002/0.008], time:2.039 ms, lr:0.01000 +Epoch time: 4354.482 ms, per step time: 2.322 ms, avg loss: 0.008 ``` -> - `input` specifies the input shape and data type of the exported model. If the network has multiple inputs, you need to pass them to the `export` method. Example: `export(network, Tensor(input1), Tensor(input2), file_name='network', file_format='AIR')` -> - If `file_name` does not contain the ".air" suffix, the system will automatically add the ".air" suffix to it. - -#### Exporting an ONNX File - -If you want to perform inference on other third-party hardware after obtaining a checkpoint file, use the network and checkpoint to generate a model file in ONNX format. An example of the code for exporting the file is as follows: - -```python -export(resnet, Tensor(input), file_name='resnet50-2_32', file_format='ONNX') -``` -> - `input` specifies the input shape and data type of the exported model. If the network has multiple inputs, you need to pass them to the `export` method. Example: `export(network, Tensor(input1), Tensor(input2), file_name='network', file_format='ONNX')` -> - If `file_name` does not contain the ".onnx" suffix, the system will automatically add the ".onnx" suffix to it. -> - Currently, only the ONNX format export of ResNet50 and BERT[CLS] networks are supported. diff --git a/tutorials/source_en/beginner/tensor.ipynb b/tutorials/source_en/beginner/tensor.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..db27ae693903f9a1f7ac9dbfcf46e3a67db318f2 --- /dev/null +++ b/tutorials/source_en/beginner/tensor.ipynb @@ -0,0 +1,695 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Tensor\n", + "\n", + "[![View-Source](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source_en.png)](https://gitee.com/mindspore/docs/blob/master/tutorials/source_en/beginner/tensor.ipynb)\n", + "\n", + "Tensor is a multilinear function that can be used to represent linear relationships between vectors , scalars , and other tensors. The basic examples of these linear relations are the inner product , the outer product , the linear map , and the Cartesian product. In the $n$ dimensional space, its coordinates have $n^{r}$ components, where each component is a function of coordinates, and these components are also linearly transformed according to certain rules when the coordinates are transformed. $r$ is called the rank or order of this tensor (neither has anything to do with the rank and order of the matrix).\n", + "\n", + "Tensor is a special data structure that is very similar to arrays and matrices. [Tensor](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore/mindspore.Tensor.html) is the basic data structure in MindSpore network operations, and this chapter mainly introduces the attributes and usage of the tensor and the sparse tensor." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "## Building the Tensor\n", + "\n", + "There are multiple methods for initializing the tensor. When building the tensor, you can pass the Tensor with `float`, `int`, `bool`, `tuple`, `list`, and `NumPy.array` types.\n", + "\n", + "- **Generating a tensor based on data.**\n", + "\n", + "You can create a tensor based on data. The data type can be set or automatically inferred." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 2, + "source": [ + "from mindspore import Tensor\n", + "\n", + "x = Tensor(0.1)" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "- **Generating a tensor from the NumPy array.**\n", + "\n", + "You can create a tensor from the NumPy array." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 3, + "source": [ + "import numpy as np\n", + "\n", + "arr = np.array([1, 0, 1, 0])\n", + "tensor_arr = Tensor(arr)\n", + "\n", + "print(type(arr))\n", + "print(type(tensor_arr))" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "source": [ + "If the initial value is `NumPy.array`, the generated `Tensor` data type corresponds to `NumPy.array`." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "- **Generating a tensor by using the init**\n", + "\n", + "When using the `init` to initialize a tensor, the parameters that support passing are `init`, `shape` and `dtype`.\n", + "\n", + "- `init`: Support passing the subclass of [initializer](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore.common.initializer.html).\n", + "- `shape`: Support passing `list`, `tuple`, `int`.\n", + "- `dtype`: Support passing [mindspore.dtype](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore.html#mindspore.dtype)." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 4, + "source": [ + "from mindspore import Tensor\n", + "from mindspore import set_seed\n", + "from mindspore import dtype as mstype\n", + "from mindspore.common.initializer import One, Normal\n", + "\n", + "set_seed(1)\n", + "\n", + "tensor1 = Tensor(shape=(2, 2), dtype=mstype.float32, init=One())\n", + "tensor2 = Tensor(shape=(2, 2), dtype=mstype.float32, init=Normal())\n", + "\n", + "print(\"tensor1:\\n\", tensor1)\n", + "print(\"tensor2:\\n\", tensor2)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "tensor1:\n", + " [[1. 1.]\n", + " [1. 1.]]\n", + "tensor2:\n", + " [[-0.00128023 -0.01392901]\n", + " [ 0.0130886 -0.00107818]]\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "The `init` is used for delayed initialization in parallel mode. Usually, it is not recommended to use `init` interface to initialize parameters.\n", + "\n", + "- **Inheriting attributes of another tensor to form a new tensor.**" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 5, + "source": [ + "from mindspore import ops\n", + "\n", + "oneslike = ops.OnesLike()\n", + "x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))\n", + "output = oneslike(x)\n", + "\n", + "print(output)\n", + "print(\"input shape:\", x.shape)\n", + "print(\"output shape:\", output.shape)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[[1 1]\n", + " [1 1]]\n", + "input shape: (2, 2)\n", + "output shape: (2, 2)\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "- **Outputting a constant tensor of a specified size.**\n", + "\n", + "`shape` is the size tuple of a tensor, which determines the dimension of the output tensor." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 6, + "source": [ + "shape = (2, 2)\n", + "ones = ops.Ones()\n", + "output = ones(shape, mstype.float32)\n", + "\n", + "zeros = ops.Zeros()\n", + "output = zeros(shape, mstype.float32)\n", + "print(output)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[[1. 1.]\n", + " [1. 1.]]\n", + "[[0. 0.]\n", + " [0. 0.]]\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "During `Tensor` initialization, dtype can be specified, for example, `mstype.int32`, `mstype.float32` or `mstype.bool_`.\n", + "\n", + "## Tensor Attributes\n", + "\n", + "Tensor attributes include shape, data type, transposed tensor, item size, number of bytes occupied, dimension, size of elements, and stride per dimension.\n", + "\n", + "- shape: the shape of `Tensor`, a tuple.\n", + "\n", + "- dtype: the dtype of `Tensor`, a data type of MindSpore.\n", + "\n", + "- T: the Transpose of `Tensor`, also a `Tensor`.\n", + "\n", + "- itemsize: the number of bytes occupied by each element in `Tensor` is an integer.\n", + "\n", + "- nbytes: the total number of bytes occupied by `Tensor`, an integer.\n", + "\n", + "- ndim: the rank of a `Tensor`, which is len(tensor.shape), an integer.\n", + "\n", + "- size: the number of all elements in `Tensor`, an integer.\n", + "\n", + "- strides: the number of bytes to traverse in each dimension of `Tensor`." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 7, + "source": [ + "x = Tensor(np.array([[1, 2], [3, 4]]), mstype.int32)\n", + "\n", + "print(\"x_shape:\", x.shape)\n", + "print(\"x_dtype:\", x.dtype)\n", + "print(\"x_transposed:\\n\", x.T)\n", + "print(\"x_itemsize:\", x.itemsize)\n", + "print(\"x_nbytes:\", x.nbytes)\n", + "print(\"x_ndim:\", x.ndim)\n", + "print(\"x_size:\", x.size)\n", + "print(\"x_strides:\", x.strides)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "x_shape: (2, 2)\n", + "x_dtype: Int32\n", + "x_transposed:\n", + " [[1 3]\n", + " [2 4]]\n", + "x_itemsize: 4\n", + "x_nbytes: 16\n", + "x_ndim: 2\n", + "x_size: 4\n", + "x_strides: (8, 4)\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "## Tensor Indexing\n", + "\n", + "Tensor indexing is similar to Numpy indexing, indexing starts from 0, negative indexing means indexing in reverse order, and colons `:` and `...` are used for slicing." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 8, + "source": [ + "tensor = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32))\n", + "\n", + "print(\"First row: {}\".format(tensor[0]))\n", + "print(\"value of top right corner: {}\".format(tensor[1, 1]))\n", + "print(\"Last column: {}\".format(tensor[:, -1]))\n", + "print(\"First column: {}\".format(tensor[..., 0]))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "First row: [0. 1.]\n", + "value of top right corner: 3.0\n", + "Last column: [1. 3.]\n", + "First column: [0. 2.]\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "## Tensor Operation\n", + "\n", + "There are many operations between tensors, including arithmetic, linear algebra, matrix processing (transposing, indexing, and slicing), and sampling. The following describes several operations. The usage of tensor computation is similar to that of NumPy.\n", + "\n", + ">Common arithmetic operations include: addition (+), subtraction (-), multiplication (\\*), division (/), modulo (%), power (\\*\\*), and exact division (//)." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 9, + "source": [ + "x = Tensor(np.array([1, 2, 3]), mstype.int32)\n", + "y = Tensor(np.array([4, 5, 6]), mstype.int32)\n", + "\n", + "output_add = x + y\n", + "output_sub = x - y\n", + "output_mul = x * y\n", + "output_div = y / x\n", + "output_mod = y % x\n", + "output_floordiv = y // x\n", + "\n", + "print(\"add:\", output_add)\n", + "print(\"sub:\", output_sub)\n", + "print(\"mul:\", output_mul)\n", + "print(\"div:\", output_div)\n", + "print(\"mod:\", output_mod)\n", + "print(\"floordiv:\", output_floordiv)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "add: [5 7 9]\n", + "sub: [-3 -3 -3]\n", + "mul: [ 4 10 18]\n", + "div: [4 2 2]\n", + "mod: [0 1 0]\n", + "floordiv: [4 2 2]\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "`Concat` connects a series of tensors in a given dimension." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 10, + "source": [ + "data1 = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32))\n", + "data2 = Tensor(np.array([[4, 5], [6, 7]]).astype(np.float32))\n", + "op = ops.Concat()\n", + "output = op((data1, data2))\n", + "\n", + "print(output)\n", + "print(\"shape:\\n\", output.shape)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[[0. 1.]\n", + " [2. 3.]\n", + " [4. 5.]\n", + " [6. 7.]]\n", + "shape:\n", + " (4, 2)\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "`Stack` combines two tensors from another dimension." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 11, + "source": [ + "data1 = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32))\n", + "data2 = Tensor(np.array([[4, 5], [6, 7]]).astype(np.float32))\n", + "op = ops.Stack()\n", + "output = op([data1, data2])\n", + "\n", + "print(output)\n", + "print(\"shape:\\n\", output.shape)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "[[[0. 1.]\n", + " [2. 3.]]\n", + "\n", + " [[4. 5.]\n", + " [6. 7.]]]\n", + "shape:\n", + " (2, 2, 2)\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "## Conversion Between Tensor and NumPy\n", + "\n", + "Tensor and NumPy can be converted to each other.\n", + "\n", + "### Tensor to NumPy\n", + "\n", + "Use `asnumpy()` to convert Tensor to NumPy." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 12, + "source": [ + "zeros = ops.Zeros()\n", + "\n", + "output = zeros((2, 2), mstype.float32)\n", + "print(\"output: {}\".format(type(output)))\n", + "\n", + "n_output = output.asnumpy()\n", + "print(\"n_output: {}\".format(type(n_output)))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "output: \n", + "n_output: \n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "### NumPy to Tensor\n", + "\n", + "Use `asnumpy()` to convert NumPy to Tensor." + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 13, + "source": [ + "output = np.array([1, 0, 1, 0])\n", + "print(\"output: {}\".format(type(output)))\n", + "\n", + "t_output = Tensor(output)\n", + "print(\"t_output: {}\".format(type(t_output)))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "output: \n", + "t_output: \n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sparse Tensor\n", + "\n", + "The sparse tensor is a special kind of tensor which most of the elements are zero.\n", + "\n", + "In some scenario (e.g., Recommendation Systems, Molecular Dynamics, Graph Neural Networks), the data is sparse. If we use common dense tensors to represent the data, we may introduce many\n", + "unnecessary calculations, storage and communication costs. In this situation, it is better to use sparse tensor to\n", + "represent the data.\n", + "\n", + "MindSpore now supports the two most commonly used `CSR` and `COO` sparse data formats.\n", + "\n", + "The common structure of the sparse tensor is ``. `indices` means index of\n", + "non-zero elements, `values` means the values of these non-zero elements and shape means the dense shape of\n", + "the sparse tensor. Using this structure, we define data structure `CSRTensor`, `COOTensor` and `RowTensor`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CSRTensor\n", + "\n", + "`CSR`(Compressed Sparse Row) is efficient in both storage and computation. All the non-zero values are stored in `values`, and their positions are stored in `indptr`(row position) and `indices` (column position).\n", + "\n", + "- `indptr`: 1-D integer tensor, indicating the start and end points of the non-zero element of each row of the sparse data in `values`. Index data type only supports int32 for now.\n", + "\n", + "- `indices`: 1-D integer tensor, indicating the position of the sparse tensor non-zero element in the column and has the same length as `values`. Index data type only supports int32 for now.\n", + "\n", + "- `values`: 1-D tensor, indicating that the value of the non-zero element corresponding to the `CSRTensor` and has the same length as `indices`.\n", + "\n", + "- `shape`: indicating that the shape of a compressed sparse tensor. The data type is `Tuple`, and currently only 2-D `CSRTensor` is supported.\n", + "\n", + ">For more details of the `CSRTensor`, please see [mindspore.CSRTensor](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore/mindspore.CSRTensor.html).\n", + "\n", + "Here are some examples of how CSRTensor can be used:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Float64\n" + ] + } + ], + "source": [ + "import mindspore as ms\n", + "from mindspore import Tensor, CSRTensor\n", + "\n", + "indptr = Tensor([0, 1, 2])\n", + "indices = Tensor([0, 1])\n", + "values = Tensor([1, 2], dtype=ms.float32)\n", + "shape = (2, 4)\n", + "\n", + "# constructs CSRTensor\n", + "csr_tensor = CSRTensor(indptr, indices, values, shape)\n", + "\n", + "print(csr_tensor.astype(ms.float64).dtype)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### COOTensor\n", + "\n", + "`COOTensor` is used to compress Tensors with irregular distribution of non-zero elements. If the number of non-zero elements\n", + "is `N` and the dense shape of the sparse tensor is `ndims`:\n", + "\n", + "- `indices`: 2-D integer Tensor and each row indicates a non-zero element subscript. Shape: `[N, ndims]`. Index data type only supports int32 for now.\n", + "- `values`: 1-D tensor of any type, indicating the value of non-zero elements. Shape: `[N]`.\n", + "- `shape`: indicating a dense shape of the sparse tensor, currently only 2-D `COOTensor` is supported.\n", + "\n", + ">For more details for `COOTensor`, please see [mindspore.COOTensor](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore/mindspore.COOTensor.html).\n", + "\n", + "Here are some examples of how COOTensor can be used:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[1. 2.]\n", + "[[0 1]\n", + " [1 2]]\n", + "(3, 4)\n", + "Float64\n" + ] + } + ], + "source": [ + "import mindspore as ms\n", + "import mindspore.nn as nn\n", + "from mindspore import Tensor, COOTensor\n", + "\n", + "indices = Tensor([[0, 1], [1, 2]])\n", + "values = Tensor([1, 2], dtype=ms.float32)\n", + "shape = (3, 4)\n", + "\n", + "# constructs COOTensor\n", + "coo_tensor = COOTensor(indices, values, shape)\n", + "\n", + "print(coo_tensor.values)\n", + "print(coo_tensor.indices)\n", + "print(coo_tensor.shape)\n", + "print(coo_tensor.astype(ms.float64).dtype) # COOTensor cast to another data type" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The codes above produce a `COOTensor` as following:\n", + "\n", + "$$\n", + " \\left[\n", + " \\begin{matrix}\n", + " 0 & 1 & 0 & 0 \\\\\n", + " 0 & 0 & 2 & 0 \\\\\n", + " 0 & 0 & 0 & 0\n", + " \\end{matrix}\n", + " \\right]\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### RowTensor\n", + "\n", + "`RowTensor` is used to compress tensors that are sparse in the zeroth dimension. If the dimension of `RowTensor` is `[L0, D1, D2, ..., DN ]`. The number of non-zero elements in the zeroth dimension is `D0`, then `L0 >> D0`.\n", + "\n", + "- `indices`: 1-D integer tensor, indicating the position of non-zero elements in the zeroth dimension of the sparse tensor, shape: `[D0]`.\n", + "\n", + "- `values`: indicating the value of the corresponding non-zero element, shape: `[D0, D1, D2, ..., DN]`.\n", + "\n", + "- `dense_shape`: indicating the shape of the compressed sparse tensor.\n", + "\n", + "> `RowTensor` can only be used in the constructor of `Cell`. For the detailed contents, refer to the code example in [mindspore.RowTensor](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore/mindspore.RowTensor.html)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "non-zero values: [[1. 2.]]\n", + "non-zero indices: [0]\n", + "shape: (3, 2)\n" + ] + } + ], + "source": [ + "from mindspore import RowTensor\n", + "import mindspore.nn as nn\n", + "\n", + "class Net(nn.Cell):\n", + " def __init__(self, dense_shape):\n", + " super(Net, self).__init__()\n", + " self.dense_shape = dense_shape\n", + "\n", + " def construct(self, indices, values):\n", + " x = RowTensor(indices, values, self.dense_shape)\n", + " return x.values, x.indices, x.dense_shape\n", + "\n", + "indices = Tensor([0])\n", + "values = Tensor([[1, 2]], dtype=mstype.float32)\n", + "out = Net((3, 2))(indices, values)\n", + "\n", + "print(\"non-zero values:\", out[0])\n", + "print(\"non-zero indices:\", out[1])\n", + "print(\"shape:\", out[2])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:mindspore_py39] *", + "language": "python", + "name": "conda-env-mindspore_py39-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/tutorials/source_en/beginner/train.md b/tutorials/source_en/beginner/train.md new file mode 100644 index 0000000000000000000000000000000000000000..8868486c9f56ef29a175d28d350add6651949960 --- /dev/null +++ b/tutorials/source_en/beginner/train.md @@ -0,0 +1,132 @@ +# Training the Model + + + +After learning how to create a model and build a dataset in the preceding tutorials, you can start to learn how to set hyperparameters and optimize model parameters. + +## Hyperparameters + +Hyperparameters can be adjusted to control the model training and optimization process. Different hyperparameter values may affect the model training and convergence speed. At present, deep learning models are mostly optimized by a batch stochastic gradient descent algorithm, and the principle of the stochastic gradient descent algorithm is as follows: +$$ +w_{t+1}=w_{t}-\eta \frac{1}{n} \sum_{x \in \mathcal{B}} \nabla l\left(x, w_{t}\right) +$$ +where $n$ is the batch size, and $η$ is a learning rate. In addition, $w_{t}$ is the weight parameter in the training batch t, and $\nabla l$ is the derivative of the loss function. It can be known that in addition to the gradient itself, these two factors directly determine the weight update of the model, and from the optimization itself, they are the most important parameters that affect the performance convergence of the model. Generally, the following hyperparameters are defined for training: + +- Epoch: specifies number of times that the dataset is traversed during training. +- Batch size: The dataset is trained for batch reading, setting the size of each batch of data. The batch size is too small, takes a lot of time, and the gradient oscillation is serious, which is not conducive to convergence. The batch size is too large, and the gradient direction of different batches does not change at all, which is easy to fall into local minimum values. In this way, an appropriate batch size needs to be chosen, to effectively improve the accuracy of the model and global convergence. +- Learning rate: If the learning rate is low, the convergence speed slows down. If the learning rate is high, unpredictable results such as no training convergence may occur. Gradient descent is a parameter optimization algorithm that is widely used to minimize model errors. Gradient descent estimates the parameters of the model by iterating and minimizing the loss function at each step. The learning rate is that during the iteration process, the learning progress of the model will be controlled. + +![learning-rate](https://gitee.com/mindspore/docs/raw/master/tutorials/source_zh_cn/beginner/images/learning_rate.png) + +```python +epochs = 10 +batch_size = 32 +momentum = 0.9 +learning_rate = 1e-2 +``` + +## Loss Functions + +The **loss function** is used to evaluate the difference between **predicted value** and **target value** of a model. Here, the absolute error loss function `L1Loss` is used: +$$ +\text { L1 Loss Function }=\sum_{i=1}^{n}\left|y_{true}-y_{predicted}\right| +$$ + `mindspore.nn.loss` provides many common loss functions, such as `SoftmaxCrossEntropyWithLogits`, `MSELoss`, and `SmoothL1Loss`. + +Given the predicted value and the target value, we calculate the error (loss value) between the predicted value and the target value by means of a loss function, which is used as follows: + +```python +import numpy as np +import mindspore.nn as nn +from mindspore import Tensor + +loss = nn.L1Loss() +output_data = Tensor(np.array([[1, 2, 3], [2, 3, 4]]).astype(np.float32)) +target_data = Tensor(np.array([[0, 2, 5], [3, 1, 1]]).astype(np.float32)) +print(loss(output_data, target_data)) +``` + +```text + 1.5 +``` + +## Optimizer Functions + +An optimizer is used to compute and update the gradient. The selection of the model optimization algorithm directly affects the performance of the final model. A poor effect may be caused by the optimization algorithm instead of the feature or model design. + +All optimization logic of MindSpore is encapsulated in the `Optimizer` object. Here, the Momentum optimizer is used. `mindspore.nn` provides many common optimizers, such as `Adam`, `SGD` and `RMSProp`. + +You need to build an `Optimizer` object. This object can retain the current parameter status and update parameters based on the computed gradient. To build an `Optimizer`, we need to provide an iterator that contains parameters (must be variable objects) to be optimized. For example, set parameters to `net.trainable_params()` for all `parameter` that can be trained on the network. + +Then, you can set the `Optimizer` parameter options, such as the learning rate and weight attenuation. + +A code example is as follows: + +```python +from mindspore import nn +from mindvision.classification.models import lenet + +net = lenet(num_classes=10, pretrained=False) +optim = nn.Momentum(net.trainable_params(), learning_rate, momentum) +``` + +## Model Training + +A model training process is generally divided into four steps. + +1. Define a neural network. +2. Build a dataset. +3. Define hyperparameters, a loss function, and an optimizer. +4. Enter the epoch and dataset for training. + +The model training sample code is as follows: + +```python +import mindspore.nn as nn +from mindspore.train import Model + +from mindvision.classification.dataset import Mnist +from mindvision.classification.models import lenet +from mindvision.engine.callback import LossMonitor + +# 1. Build a dataset +download_train = Mnist(path="./mnist", split="train", batch_size=batch_size, repeat_num=1, shuffle=True, resize=32, download=True) +dataset_train = download_train.run() + +# 2. Define a neural network +network = lenet(num_classes=10, pretrained=False) +# 3.1 Define a loss function +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') +# 3.2 Defines an optimizer function +net_opt = nn.Momentum(network.trainable_params(), learning_rate=learning_rate, momentum=momentum) +# 3.3 Initialize model parameters +model = Model(network, loss_fn=net_loss, optimizer=net_opt, metrics={'acc'}) + +# 4. Perform training on the neural network +model.train(epochs, dataset_train, callbacks=[LossMonitor(learning_rate, 1875)]) +``` + +```text +Epoch:[ 0/ 10], step:[ 1875/ 1875], loss:[0.189/1.176], time:2.254 ms, lr:0.01000 +Epoch time: 4286.163 ms, per step time: 2.286 ms, avg loss: 1.176 +Epoch:[ 1/ 10], step:[ 1875/ 1875], loss:[0.085/0.080], time:1.895 ms, lr:0.01000 +Epoch time: 4064.532 ms, per step time: 2.168 ms, avg loss: 0.080 +Epoch:[ 2/ 10], step:[ 1875/ 1875], loss:[0.021/0.054], time:1.901 ms, lr:0.01000 +Epoch time: 4194.333 ms, per step time: 2.237 ms, avg loss: 0.054 +Epoch:[ 3/ 10], step:[ 1875/ 1875], loss:[0.284/0.041], time:2.130 ms, lr:0.01000 +Epoch time: 4252.222 ms, per step time: 2.268 ms, avg loss: 0.041 +Epoch:[ 4/ 10], step:[ 1875/ 1875], loss:[0.003/0.032], time:2.176 ms, lr:0.01000 +Epoch time: 4216.039 ms, per step time: 2.249 ms, avg loss: 0.032 +Epoch:[ 5/ 10], step:[ 1875/ 1875], loss:[0.003/0.027], time:2.205 ms, lr:0.01000 +Epoch time: 4400.771 ms, per step time: 2.347 ms, avg loss: 0.027 +Epoch:[ 6/ 10], step:[ 1875/ 1875], loss:[0.000/0.024], time:1.973 ms, lr:0.01000 +Epoch time: 4554.252 ms, per step time: 2.429 ms, avg loss: 0.024 +Epoch:[ 7/ 10], step:[ 1875/ 1875], loss:[0.008/0.022], time:2.048 ms, lr:0.01000 +Epoch time: 4361.135 ms, per step time: 2.326 ms, avg loss: 0.022 +Epoch:[ 8/ 10], step:[ 1875/ 1875], loss:[0.000/0.018], time:2.130 ms, lr:0.01000 +Epoch time: 4547.597 ms, per step time: 2.425 ms, avg loss: 0.018 +Epoch:[ 9/ 10], step:[ 1875/ 1875], loss:[0.008/0.017], time:2.135 ms, lr:0.01000 +Epoch time: 4601.861 ms, per step time: 2.454 ms, avg loss: 0.017 +``` + +The loss value is printed during training. The loss value fluctuates, but in general the loss value decreases gradually and the accuracy gradually increases. The loss values run by different persons have a certain randomness and are not necessarily exactly the same. \ No newline at end of file diff --git a/tutorials/source_en/dataset.md b/tutorials/source_en/dataset.md deleted file mode 100644 index 7e83aeb86652cd5556bc386efbdca6f3003527cc..0000000000000000000000000000000000000000 --- a/tutorials/source_en/dataset.md +++ /dev/null @@ -1,252 +0,0 @@ -# Loading and Processing Data - -`Ascend` `GPU` `CPU` `Beginner` `Data Preparation` - - - -MindSpore provides APIs for loading common datasets and datasets in standard formats. You can directly use the corresponding dataset loading class in mindspore.dataset to load data. The dataset class provides common data processing APIs for users to quickly process data. - -## Data Preparation - -Execute the following command to download and decompress the CIFAR-10 and MNIST dataset to the specified location. - -```python -import os -import requests -import tarfile -import zipfile - -def download_dataset(url, target_path): - """download and decompress dataset""" - if not os.path.exists(target_path): - os.makedirs(target_path) - download_file = url.split("/")[-1] - if not os.path.exists(download_file): - res = requests.get(url, stream=True, verify=False) - if download_file.split(".")[-1] not in ["tgz","zip","tar","gz"]: - download_file = os.path.join(target_path, download_file) - with open(download_file, "wb") as f: - for chunk in res.iter_content(chunk_size=512): - if chunk: - f.write(chunk) - if download_file.endswith("zip"): - z = zipfile.ZipFile(download_file, "r") - z.extractall(path=target_path) - z.close() - if download_file.endswith(".tar.gz") or download_file.endswith(".tar") or download_file.endswith(".tgz"): - t = tarfile.open(download_file) - names = t.getnames() - for name in names: - t.extract(name, target_path) - t.close() - -download_dataset("https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/cifar-10-binary.tar.gz", "./datasets") -download_dataset("https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip", "./datasets") -``` - -The directory structure of the CIFAR-10 dataset file is as follows: - -```text -./datasets/cifar-10-batches-bin -├── batches.meta.txt -├── data_batch_1.bin -├── data_batch_2.bin -├── data_batch_3.bin -├── data_batch_4.bin -├── data_batch_5.bin -├── readme.html -└── test_batch.bin -``` - -Refer to [Quick Start](https://www.mindspore.cn/tutorials/en/master/quick_start.html#downloading-the-dataset) for the directory structure of MINIST dataset files. - -## Loading the Dataset - -In the following example, the CIFAR-10 dataset is loaded through the `Cifar10Dataset` API, and the first five samples are obtained using the sequential sampler. - -```python -import mindspore.dataset as ds - -DATA_DIR = "./datasets/cifar-10-batches-bin" -sampler = ds.SequentialSampler(num_samples=5) -dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -``` - -## Iterating Dataset - -You can use `create_dict_iterator` to create a data iterator to iteratively access data. The following shows the image shapes and labels. - -```python -for data in dataset.create_dict_iterator(): - print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label'])) -``` - -```text - Image shape: (32, 32, 3) , Label: 6 - Image shape: (32, 32, 3) , Label: 9 - Image shape: (32, 32, 3) , Label: 9 - Image shape: (32, 32, 3) , Label: 4 - Image shape: (32, 32, 3) , Label: 1 -``` - -## Customizing Datasets - -For datasets that cannot be directly loaded by MindSpore, you can build a custom dataset class and use the `GeneratorDataset` API to customize data loading. - -```python -import numpy as np - -np.random.seed(58) - -class DatasetGenerator: - def __init__(self): - self.data = np.random.sample((5, 2)) - self.label = np.random.sample((5, 1)) - - def __getitem__(self, index): - return self.data[index], self.label[index] - - def __len__(self): - return len(self.data) -``` - -You need to customize the following class functions: - -- **\_\_init\_\_** - - When a dataset object is instantiated, the `__init__` function is called. You can perform operations such as data initialization. - - ```python - def __init__(self): - self.data = np.random.sample((5, 2)) - self.label = np.random.sample((5, 1)) - ``` - -- **\_\_getitem\_\_** - - Define the `__getitem__` function of the dataset class to support random access and obtain and return data in the dataset based on the specified `index` value. - - The return value of the `__getitem__` function needs to be a tuple of numpy arrays. When returning a single numpy array, it can be written as `return (np_array_1,)`. - - ```python - def __getitem__(self, index): - return self.data[index], self.label[index] - ``` - -- **\_\_len\_\_** - - Define the `__len__` function of the dataset class and return the number of samples in the dataset. - - ```python - def __len__(self): - return len(self.data) - ``` - -After the dataset class is defined, the `GeneratorDataset` API can be used to load and access dataset samples in the user-defined mode. - -```python -dataset_generator = DatasetGenerator() -dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False) - -for data in dataset.create_dict_iterator(): - print('{}'.format(data["data"]), '{}'.format(data["label"])) -``` - -```text - [0.36510558 0.45120592] [0.78888122] - [0.49606035 0.07562207] [0.38068183] - [0.57176158 0.28963401] [0.16271622] - [0.30880446 0.37487617] [0.54738768] - [0.81585667 0.96883469] [0.77994068] -``` - -## Data Processing and Augmentation - -### Processing Data - -The dataset APIs provided by MindSpore support common data processing methods. You only need to call the corresponding function APIs to quickly process data. - -In the following example, the datasets are shuffled, and then two samples form a batch. - -```python -ds.config.set_seed(58) - -# Shuffle the data sequence. -dataset = dataset.shuffle(buffer_size=10) -# Perform batch operations on datasets. -dataset = dataset.batch(batch_size=2) - -for data in dataset.create_dict_iterator(): - print("data: {}".format(data["data"])) - print("label: {}".format(data["label"])) -``` - -```text - data: [[0.36510558 0.45120592] - [0.57176158 0.28963401]] - label: [[0.78888122] - [0.16271622]] - data: [[0.30880446 0.37487617] - [0.49606035 0.07562207]] - label: [[0.54738768] - [0.38068183]] - data: [[0.81585667 0.96883469]] - label: [[0.77994068]] -``` - -Where, - -`buffer_size`: size of the buffer for shuffle operations in the dataset. - -`batch_size`: number of data records in each group. Currently, each group contains 2 data records. - -### Data Augmentation - -If the data volume is too small or the sample scenario is simple, the model training effect is affected. You can perform the data augmentation operation to expand the sample diversity and improve the generalization capability of the model. - -The following example uses the operators in the `mindspore.dataset.vision.c_transforms` module to perform data argumentation on the MNIST dataset. - -Import the `c_transforms` module and load the MNIST dataset. - -```python -import matplotlib.pyplot as plt - -from mindspore.dataset.vision import Inter -import mindspore.dataset.vision.c_transforms as c_vision - -DATA_DIR = './datasets/MNIST_Data/train' - -mnist_dataset = ds.MnistDataset(DATA_DIR, num_samples=6, shuffle=False) - -# View the original image data. -mnist_it = mnist_dataset.create_dict_iterator() -data = next(mnist_it) -plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray) -plt.title(data['label'].asnumpy(), fontsize=20) -plt.show() -``` - -![png](./images/output_13_0.PNG) - -Define the data augmentation operator, perform the `Resize` and `RandomCrop` operations on the dataset, and insert the dataset into the data processing pipeline through `map` mapping. - -```python -resize_op = c_vision.Resize(size=(200,200), interpolation=Inter.LINEAR) -crop_op = c_vision.RandomCrop(150) -transforms_list = [resize_op, crop_op] -mnist_dataset = mnist_dataset.map(operations=transforms_list, input_columns=["image"]) -``` - -View the data augmentation effect. - -```python -mnist_dataset = mnist_dataset.create_dict_iterator() -data = next(mnist_dataset) -plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray) -plt.title(data['label'].asnumpy(), fontsize=20) -plt.show() -``` - -![png](./images/output_17_0.PNG) - -For more information, see [Data augmentation](https://www.mindspore.cn/docs/programming_guide/en/master/augmentation.html). diff --git a/tutorials/source_en/images/introduction2.png b/tutorials/source_en/images/introduction2.png deleted file mode 100644 index 61ff0d91e4ee5a54f1a2a05e4e3f000b1af30c58..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/images/introduction2.png and /dev/null differ diff --git a/tutorials/source_en/images/introduction3.png b/tutorials/source_en/images/introduction3.png deleted file mode 100644 index a66da13137d7ed238c6ead4ffae8228dc72f2543..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/images/introduction3.png and /dev/null differ diff --git a/tutorials/source_en/images/output_13_0.PNG b/tutorials/source_en/images/output_13_0.PNG deleted file mode 100644 index c8cdc908c7dadb762ebce708145994b5b753060b..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/images/output_13_0.PNG and /dev/null differ diff --git a/tutorials/source_en/images/output_17_0.PNG b/tutorials/source_en/images/output_17_0.PNG deleted file mode 100644 index 48bcec38475b66b7d7e869b1a1386f3b5955714b..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/images/output_17_0.PNG and /dev/null differ diff --git a/tutorials/source_en/index.rst b/tutorials/source_en/index.rst index cb68315cbe8f149a8d99b1cdfa6bb47d06218309..4d6a8e712ede0597ef1613e2811755362e9e6d8a 100644 --- a/tutorials/source_en/index.rst +++ b/tutorials/source_en/index.rst @@ -10,218 +10,13 @@ MindSpore Tutorial :glob: :maxdepth: 1 :caption: Quick Start - :hidden: - introduction - quick_start - tensor - dataset - model - autograd - optimization - save_load_model - inference - linear_regression - -.. raw:: html - -
-
-
-
- - -
- Filter - -
- -
-
-
-
-
Hardware
-
-
- - - - -
-
-
- -
-
-
-
Classification
-
-
- - - - - - - - - - -
- -
-
-
-
-
-
Experience
-
-
- - -
- -
- -
- -
-
- - - -
- - - - - - - - - - - -
- -
- -
-
-
+ beginner/introduction + beginner/quick_start + beginner/tensor + beginner/dataset + beginner/model + beginner/autograd + beginner/train + beginner/save_load + beginner/infer diff --git a/tutorials/source_en/inference.md b/tutorials/source_en/inference.md deleted file mode 100644 index 088d03ff4708efdb06f03a521d42c744ca6b6b82..0000000000000000000000000000000000000000 --- a/tutorials/source_en/inference.md +++ /dev/null @@ -1,433 +0,0 @@ -# Inference - -`Ascend` `Device` `Beginner` `Inference Application` - - - -This is the last tutorial. To better adapt to different inference devices, inference is classified into Ascend AI Processor inference and mobile device inference. - -## Ascend AI Processor Inference - -An Ascend AI Processor is an energy-efficient and highly integrated AI processor oriented to edge scenarios. It can implement multiple data analysis and inference computing, such as image and video analysis, and can be widely used in scenarios such as intelligent surveillance, robots, drones, and video servers. The following describes how to use MindSpore to perform inference on the Ascend AI Processors. - -### Inference Code - -Create a directory to store the inference code project, for example, `/home/HwHiAiUser/mindspore_sample/ascend910_resnet50_preprocess_sample`. You can download the [sample code](https://gitee.com/mindspore/docs/tree/master/docs/sample_code/ascend910_resnet50_preprocess_sample) from the official website. The `model` directory is used to store the exported [MindIR model file](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/sample_resources/ascend310_resnet50_preprocess_sample/resnet50_imagenet.mindir), and the `test_data` directory is used to store the images to be classified, the images can be selected in [ImageNet2012](http://image-net.org/download-images) validation dataset. The directory structure of the inference code project is as follows: - -```text -└─ascend910_resnet50_preprocess_sample - ├── CMakeLists.txt // Build script - ├── README.md // Usage description - ├── main.cc // Main function - ├── model - │ └── resnet50_imagenet.mindir // MindIR model file - └── test_data - ├── ILSVRC2012_val_00002138.JPEG // Input sample image 1 - ├── ILSVRC2012_val_00003014.JPEG // Input sample image 2 - ├── ... // Input sample image n. -``` - -Namespaces that reference `mindspore` and `mindspore::dataset`. - -```c++ -namespace ms = mindspore; -namespace ds = mindspore::dataset; -``` - -Initialize the environment, specify the hardware platform used for inference, and set DeviceID. - -Set the hardware to Ascend 910 and set DeviceID to 0. The code example is as follows: - -```c++ -auto context = std::make_shared(); -auto ascend910_info = std::make_shared(); -ascend910_info->SetDeviceID(0); -context->MutableDeviceInfo().push_back(ascend910_info); -``` - -Load the model file. - -```c++ -// Load the MindIR model. -ms::Graph graph; -ms::Status ret = ms::Serialization::Load(resnet_file, ms::ModelType::kMindIR, &graph); -// Build a model using a graph. -ms::Model resnet50; -ret = resnet50.Build(ms::GraphCell(graph), context); -``` - -Obtain the input information required by the model. - -```c++ -std::vector model_inputs = resnet50.GetInputs(); -``` - -Load the image file. - -```c++ -// ReadFile is a function used to read images. -ms::MSTensor ReadFile(const std::string &file); -auto image = ReadFile(image_file); -``` - -Preprocess images. - -```c++ -// Use the CPU operator provided by MindData to preprocess images. - -// Create an operator to encode the input into the RGB format. -std::shared_ptr decode(new ds::vision::Decode()); -// Create an operator to resize the image to the specified size. -std::shared_ptr resize(new ds::vision::Resize({256})); -// Create an operator to normalize the input of the operator. -std::shared_ptr normalize(new ds::vision::Normalize( - {0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255})); -// Create an operator to perform central cropping. -std::shared_ptr center_crop(new ds::vision::CenterCrop({224, 224})); -// Create an operator to transform shape (H, W, C) into shape (C, H, W). -std::shared_ptr hwc2chw(new ds::vision::HWC2CHW()); - -// Define a MindData data preprocessing function that contains the preceding operators in sequence. -ds::Execute preprocessor({decode, resize, normalize, center_crop, hwc2chw}); - -// Call the data preprocessing function to obtain the processed image. -ret = preprocessor(image, &image); -``` - -Start inference. - -```c++ -// Create an output vector. -std::vector outputs; -// Create an input vector. -std::vector inputs; -inputs.emplace_back(model_inputs[0].Name(), model_inputs[0].DataType(), model_inputs[0].Shape(), - image.Data().get(), image.DataSize()); -// Call the Predict function of the model for inference. -ret = resnet50.Predict(inputs, &outputs); -``` - -Obtain the inference result. - -```c++ -// Maximum value of the output probability. -std::cout << "Image: " << image_file << " infer result: " << GetMax(outputs[0]) << std::endl; -``` - -### Build Script - -Add the header file search path for the compiler: - -```cmake -option(MINDSPORE_PATH "mindspore install path" "") -include_directories(${MINDSPORE_PATH}) -include_directories(${MINDSPORE_PATH}/include) -``` - -Search for the required dynamic library in MindSpore. - -```cmake -find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib) -file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*) -``` - -Use the specified source file to generate the target executable file and link the target file to the MindSpore library. - -```cmake -add_executable(resnet50_sample main.cc) -target_link_libraries(resnet50_sample ${MS_LIB} ${MD_LIB}) -``` - ->For details, see -> - -### Building Inference Code - -Go to the project directory `ascend910_resnet50_preprocess_sample` and set the following environment variables: - -> If the device is Ascend 310, go to the project directory `ascend310_resnet50_preprocess_sample`. The following code uses Ascend 910 as an example. By the way, MindSpore is supporting data preprocess + model inference in one key on Ascend 310 platform. If you are interented in it, kindly refer to [more details](https://www.mindspore.cn/docs/programming_guide/en/master/multi_platform_inference_ascend_310_mindir.html). - -```bash -# Control the log print level. 0 indicates DEBUG, 1 indicates INFO, 2 indicates WARNING (default value), 3 indicates ERROR, and 4 indicates CRITICAL. -export GLOG_v=2 - -# Select the Conda environment. -LOCAL_ASCEND=/usr/local/Ascend # Root directory of the running package - -# Library on which the running package depends -export LD_LIBRARY_PATH=${LOCAL_ASCEND}/ascend-toolkit/latest/fwkacllib/lib64:${LOCAL_ASCEND}/driver/lib64/common:${LOCAL_ASCEND}/driver/lib64/driver:${LOCAL_ASCEND}/opp/op_impl/built-in/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH} - -# Libraries on which MindSpore depends -export LD_LIBRARY_PATH=`pip3 show mindspore-ascend | grep Location | awk '{print $2"/mindspore/lib"}' | xargs realpath`:${LD_LIBRARY_PATH} - -# Configure necessary environment variables. -export TBE_IMPL_PATH=${LOCAL_ASCEND}/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe # Path of the TBE operator -export ASCEND_OPP_PATH=${LOCAL_ASCEND}/ascend-toolkit/latest/opp # OPP path -export PATH=${LOCAL_ASCEND}/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin/:${PATH} # Path of the TBE operator build tool -export PYTHONPATH=${TBE_IMPL_PATH}:${PYTHONPATH} # Python library that TBE depends on -``` - -Run the `cmake` command. In the command, `pip3` needs to be modified based on the actual situation: - -```bash -cmake . -DMINDSPORE_PATH=`pip3 show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath` -``` - -Run the `make` command for building. - -```bash -make -``` - -After building, the executable file is generated in `ascend910_resnet50_preprocess_sample`. - -### Performing Inference and Viewing the Result - -After the preceding operations are complete, you can learn how to perform inference. - -Log in to the Ascend 910 environment, and create the `model` directory to store the `resnet50_imagenet.mindir` file, for example, `/home/HwHiAiUser/mindspore_sample/ascend910_resnet50_preprocess_sample/model`. -Create the `test_data` directory to store images, for example, `/home/HwHiAiUser/mindspore_sample/ascend910_resnet50_preprocess_sample/test_data`. -Then, perform the inference. - -```bash -./resnet50_sample -``` - -Inference is performed on all images stored in the `test_data` directory. For example, if there are 2 images whose label is 0 in the [ImageNet2012](http://image-net.org/download-images) validation set, the inference result is as follows: - -```text -Image: ./test_data/ILSVRC2012_val_00002138.JPEG infer result: 0 -Image: ./test_data/ILSVRC2012_val_00003014.JPEG infer result: 0 -``` - -## Mobile Device Inference - -MindSpore Lite is the device part of the device-edge-cloud AI framework MindSpore and can implement intelligent applications on mobile devices such as phones. MindSpore Lite provides a high-performance inference engine and ultra-lightweight solution. It supports mobile phone operating systems such as iOS and Android, LiteOS-embedded operating systems, various intelligent devices such as mobile phones, large screens, tablets, and IoT devices, and MindSpore/TensorFlow Lite/Caffe/ONNX model applications. - -The following provides a demo that runs on the Windows and Linux operating systems and is built based on the C++ API to help users get familiar with the on-device inference process. The demo uses the shuffled data as the input data, performs the inference on the MobileNetV2 model, and directly displays the output data on the computer. - -> For details about the complete instance running on the mobile phone, see [Android Application Development Based on JNI](https://www.mindspore.cn/lite/docs/en/master/quick_start/quick_start.html). - -### Model Conversion - -The format of a model needs to be converted before the model is used for inference on the device. Currently, MindSpore Lite supports four types of AI frameworks: MindSpore, TensorFlow Lite, Caffe, and ONNX. - -The following uses the [mobilenetv2.mindir](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/1.5/mobilenetv2.mindir) model trained by MindSpore as an example to describe how to generate the `mobilenetv2.ms` model used in the demo. - -> The following describes the conversion process. Skip it if you only need to run the demo. -> -> The following describes only the model used by the demo. For details about how to use the conversion tool, see [Converting Models for Inference](https://www.mindspore.cn/lite/docs/en/master/use/converter_tool.html#). - -- Download the conversion tool. - - Download the [conversion tool package](https://www.mindspore.cn/lite/docs/en/master/use/downloads.html) based on the OS in use, decompress the package to a local directory, obtain the `converter` tool, and configure environment variables. - -- Use the conversion tool. - - - For Linux - - Go to the directory where the `converter_lite` executable file is located, place the downloaded `mobilenetv2.mindir` model in the same path, and run the following command on the PC to convert the model: - - ```cpp - ./converter_lite --fmk=MINDIR --modelFile=mobilenetv2.mindir --outputFile=mobilenetv2 - ``` - - - For Windows - - Go to the directory where the `converter_lite` executable file is located, place the downloaded `mobilenetv2.mindir` model in the same path, and run the following command on the PC to convert the model: - - ```cpp - call converter_lite --fmk=MINDIR --modelFile=mobilenetv2.mindir --outputFile=mobilenetv2 - ``` - - - Parameter description - - During the command execution, three parameters are set. `--fmk` indicates the original format of the input model. In this example, this parameter is set to `MINDIR`, which is the export format of the MindSpore framework training model. `--modelFile` indicates the path of the input model. `--outputFile` indicates the output path of the model. The suffix `.ms` is automatically added to the converted model. - -### Environment Building and Running - -#### Building and Running the Linux System - -- Build. Renference [Building MindSpore Lite](https://mindspore.cn/lite/docs/en/master/use/build.html#environment-requirements) to get the Environment Requirements. - - Run the build script in the `mindspore/lite/examples/quick_start_cpp` directory to automatically download related files and build the demo. - - ```bash - bash build.sh - ``` - -- Inference - - After the build is complete, go to the `mindspore/lite/examples/quick_start_cpp/build` directory and run the following command to perform MindSpore Lite inference on the MobileNetV2 model. - - ```bash - ./mindspore_quick_start_cpp ../model/mobilenetv2.ms - ``` - - After the execution is complete, the following information is displayed, including the tensor name, tensor size, number of output tensors, and the first 50 pieces of data: - - ```text - tensor name is: Softmax-65 tensor size is: 4004 tensor elements num is: 1001 - output data is: 1.74225e-05 1.15919e-05 2.02728e-05 0.000106485 0.000124295 0.00140576 0.000185107 0.000762011 1.50996e-05 5.91942e-06 6.61469e-06 3.72883e-06 4.30761e-06 2.38897e-06 1.5163e-05 0.000192663 1.03767e-05 1.31953e-05 6.69638e-06 3.17411e-05 4.00895e-06 9.9641e-06 3.85127e-06 6.25101e-06 9.08853e-06 1.25043e-05 1.71761e-05 4.92751e-06 2.87637e-05 7.46446e-06 1.39375e-05 2.18824e-05 1.08861e-05 2.5007e-06 3.49876e-05 0.000384547 5.70778e-06 1.28909e-05 1.11038e-05 3.53906e-06 5.478e-06 9.76608e-06 5.32172e-06 1.10386e-05 5.35474e-06 1.35796e-05 7.12652e-06 3.10017e-05 4.34154e-06 7.89482e-05 1.79441e-05 - ``` - -#### Building and Running the Windows System - -- Build. Renference [Building MindSpore Lite](https://mindspore.cn/lite/docs/en/master/use/build.html#environment-requirements) to get the Environment Requirements. - - - Download the library: Manually download the MindSpore Lite model inference framework [mindspore-lite-{version}-win-x64.zip](https://www.mindspore.cn/lite/docs/en/master/use/downloads.html) whose hardware platform is CPU and operating system is Windows-x64. Copy the `libmindspore-lite.a` file in the decompressed `inference/lib` directory to the `mindspore/lite/examples/quick_start_cpp/lib` directory. Copy the `inference/include` directory to the `mindspore/lite/examples/quick_start_cpp/include` directory. - - - Download the model: Manually download the model file [mobilenetv2.ms](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/1.5/mobilenetv2.ms) and copy it to the `mindspore/lite/examples/quick_start_cpp/model` directory. - - > You can use the mobilenetv2.ms model file obtained in "Model Conversion". - - - Build: Run the build script in the `mindspore/lite/examples/quick_start_cpp` directory to automatically download related files and build the demo. - - ```bash - call build.bat - ``` - -- Inference - - After the build is complete, go to the `mindspore/lite/examples/quick_start_cpp/build` directory and run the following command to perform MindSpore Lite inference on the MobileNetV2 model. - - ```bash - call ./mindspore_quick_start_cpp.exe ../model/mobilenetv2.ms - ``` - - After the execution is complete, the following information is displayed, including the tensor name, tensor size, number of output tensors, and the first 50 pieces of data: - - ```text - tensor name is: Softmax-65 tensor size is: 4004 tensor elements num is: 1001 - output data is: 1.74225e-05 1.15919e-05 2.02728e-05 0.000106485 0.000124295 0.00140576 0.000185107 0.000762011 1.50996e-05 5.91942e-06 6.61469e-06 3.72883e-06 4.30761e-06 2.38897e-06 1.5163e-05 0.000192663 1.03767e-05 1.31953e-05 6.69638e-06 3.17411e-05 4.00895e-06 9.9641e-06 3.85127e-06 6.25101e-06 9.08853e-06 1.25043e-05 1.71761e-05 4.92751e-06 2.87637e-05 7.46446e-06 1.39375e-05 2.18824e-05 1.08861e-05 2.5007e-06 3.49876e-05 0.000384547 5.70778e-06 1.28909e-05 1.11038e-05 3.53906e-06 5.478e-06 9.76608e-06 5.32172e-06 1.10386e-05 5.35474e-06 1.35796e-05 7.12652e-06 3.10017e-05 4.34154e-06 7.89482e-05 1.79441e-05 - ``` - -### Inference Code Parsing - -The following analyzes the inference process in the demo source code and shows how to use the C++ API. - -#### Model Reading - -Read the MindSpore Lite model from the file system and store it in the memory buffer. - -```c++ -// Read model file. -size_t size = 0; -char *model_buf = ReadFile(model_path, &size); -if (model_buf == nullptr) { - std::cerr << "Read model file failed." << std::endl; - return -1; -} -``` - -#### Creating and Configuring Context - -```c++ -// Create and init context, add CPU device info -auto context = std::make_shared(); -if (context == nullptr) { - std::cerr << "New context failed." << std::endl; - return -1; -} -auto &device_list = context->MutableDeviceInfo(); -auto device_info = std::make_shared(); -if (device_info == nullptr) { - std::cerr << "New CPUDeviceInfo failed." << std::endl; - return -1; -} -device_list.push_back(device_info); -``` - -#### Model Creating Loading and Building - -Use Build of [Model](https://www.mindspore.cn/lite/api/en/master/generate/classmindspore_Model.html#class-model) to load the model directly from the memory buffer and build the model. - -```c++ -// Create model -auto model = new (std::nothrow) mindspore::Model(); -if (model == nullptr) { - std::cerr << "New Model failed." << std::endl; - return -1; -} -// Build model -auto build_ret = model->Build(model_buf, size, mindspore::kMindIR, context); -delete[](model_buf); -if (build_ret != mindspore::kSuccess) { - std::cerr << "Build model error " << build_ret << std::endl; - return -1; -} -``` - -There is another method that uses Load of [Serialization](https://www.mindspore.cn/lite/api/en/master/generate/classmindspore_Serialization.html#class-serialization) to load [Graph](https://www.mindspore.cn/lite/api/en/master/generate/classmindspore_Graph.html#class-graph) and use Build of [Model](https://www.mindspore.cn/lite/api/en/master/generate/classmindspore_Model.html#class-model) to build the model. - -```c++ -// Load graph. -mindspore::Graph graph; -auto load_ret = mindspore::Serialization::Load(model_buf, size, mindspore::kMindIR, &graph); -delete[](model_buf); -if (load_ret != mindspore::kSuccess) { - std::cerr << "Load graph file failed." << std::endl; - return -1; -} - -// Create model -auto model = new (std::nothrow) mindspore::Model(); -if (model == nullptr) { - std::cerr << "New Model failed." << std::endl; - return -1; -} -// Build model -mindspore::GraphCell graph_cell(graph); -auto build_ret = model->Build(graph_cell, context); -if (build_ret != mindspore::kSuccess) { - delete model; - std::cerr << "Build model error " << build_ret << std::endl; - return -1; -} -``` - -#### Model Inference - -Model inference includes input data injection, inference execution, and output obtaining. In this example, the input data is randomly generated, and the output result is printed after inference. - -```c++ -auto inputs = model->GetInputs(); -// Generate random data as input data. -auto ret = GenerateInputDataWithRandom(inputs); -if (ret != mindspore::kSuccess) { - delete model; - std::cerr << "Generate Random Input Data failed." << std::endl; - return -1; -} -// Get Output -auto outputs = model->GetOutputs(); - -// Model Predict -auto predict_ret = model->Predict(inputs, &outputs); -if (predict_ret != mindspore::kSuccess) { - delete model; - std::cerr << "Predict model error " << predict_ret << std::endl; - return -1; -} - -// Print Output Tensor Data. -for (auto tensor : outputs) { - std::cout << "tensor name is:" << tensor.Name() << " tensor size is:" << tensor.DataSize() - << " tensor elements num is:" << tensor.ElementNum() << std::endl; - auto out_data = reinterpret_cast(tensor.Data().get()); - std::cout << "output data is:"; - for (int i = 0; i < tensor.ElementNum() && i <= 50; i++) { - std::cout << out_data[i] << " "; - } - std::cout << std::endl; -} -``` - -#### Memory Release - -If the inference process of MindSpore Lite is complete, release the created `Model`. - -```c++ -// Delete model. -delete model; -``` diff --git a/tutorials/source_en/introduction.md b/tutorials/source_en/introduction.md deleted file mode 100644 index 043c72d75e16316d53a0f0f12111227e410717d5..0000000000000000000000000000000000000000 --- a/tutorials/source_en/introduction.md +++ /dev/null @@ -1,71 +0,0 @@ -# Overview - -`Ascend` `GPU` `CPU` `Device` `Beginner` - - - -The following describes the Huawei AI full-stack solution and introduces the position of MindSpore in the solution. Developers who are interested in MindSpore can visit the [MindSpore community](https://gitee.com/mindspore/mindspore) and click [Watch, Star, and Fork](https://gitee.com/mindspore/mindspore). - -## Introduction to Huawei Ascend AI Full-Stack Solution - -Ascend computing is a full-stack AI computing infrastructure and application based on the Ascend series processors. It includes the Ascend series chips, Atlas series hardware, CANN chip enablement, MindSpore AI framework, ModelArts, and MindX application enablement. - -Huawei Atlas AI computing solution is based on Ascend series AI processors and uses various product forms such as modules, cards, edge stations, servers, and clusters to build an all-scenario AI infrastructure solution oriented to device, edge, and cloud. It covers data center and intelligent edge solutions, as well as the entire inference and training processes in the deep learning field. - -- **Atlas series**: provides AI training, inference cards, and training servers ([learn more](https://e.huawei.com/en/products/cloud-computing-dc/atlas/)). -- **CANN at heterogeneous computing architecture**: a driver layer that enables chips ([learn more](https://www.hiascend.com/en/software/cann)). -- **MindSpore**: all-scenario AI framework ([learn more](https://www.mindspore.cn/en)). -- **MindX SDK**: Ascend SDK that provides application solutions ([learn more](https://www.hiascend.com/en/software/mindx-sdk)). -- **ModelArts**: HUAWEI CLOUD AI development platform ([learn more](https://www.huaweicloud.com/product/modelarts.html)). -- **MindStudio**: E2E development toolchain that provides one-stop IDE for AI development ([learn more](https://www.hiascend.com/en/software/mindstudio)). - -For details, click [Huawei Ascend official website](https://e.huawei.com/en/products/servers/ascend). - -## MindSpore Introduction - -MindSpore is a deep learning framework in all scenarios, aiming to achieve easy development, efficient execution, and all-scenario coverage. Easy development features friendly APIs and easy debugging. Efficient execution is reflected in computing, data preprocessing, and distributed training. All-scenario coverage means that the framework supports cloud, edge, and device scenarios. - -The following figure shows the overall MindSpore architecture: - -- **ModelZoo**: ModelZoo provides available deep learning algorithm networks, and more developers are welcome to contribute new networks. -- **MindSpore Extend**: The expansion package of MindSpore expands the support of new fields, such as GNN/deep probabilistic programming/reinforcement learning, etc. We look forward to more developers to contribute and build together. -- **MindScience**:MindScience is a scientific computing kits for various industries based on the converged MindSpore framefork. It contains the industry-leading datasets, basic network structures, high-precision pre-trained models, and pre-and post-processing tools that accelerate application development of the scientific computing. -- **MindExpression**: Python-based frontend expression and programming interfaces. In the future, more frontends based on C/C++ will be provided. Cangjie, Huawei's self-developed programming language frontend, is now in the pre-research phase. In addition, Huawei is working on interconnection with third-party frontends to introduce more third-party ecosystems. -- **MindData**: Provides functions and programming interfaces such as efficient data processing, loading of commonly used datasets, and supports users' flexible definition processing registration and pipeline parallel optimization -- **MindCompiler**: The core compiler of the layer, which implements three major functions based on the unified device-cloud MindIR, including hardware-independent optimization (type derivation, automatic differentiation, and expression simplification), hardware-related optimization (automatic parallelism, memory optimization, graph kernel fusion, and pipeline execution) and optimization related to deployment and inference (quantification and pruning). -- **MindRT**: MindSpore runtime system, which covers the cloud-side host-side runtime system, the device-side and the lightweight runtime system of the smaller IoT. -- **MindInsight**: Provides MindSpore's visual debugging and tuning tools, and supports users to debug and tune the training network. -- **MindArmour**: For enterprise-level applications, security and privacy protection related enhancements, such as anti-robustness, model security testing, differential privacy training, privacy leakage risk assessment, data drift detection, etc. technology. - -![MindSpore](images/introduction2.png) - -### API Level Structure - -To support network building, entire graph execution, subgraph execution, and single-operator execution, MindSpore provides users with three levels of APIs. In ascending order, these are Low-Level Python API, Medium-Level Python API, and High-Level Python API. - -![MindSpore API](images/introduction3.png) - -- High-Level Python API - - High-level APIs are at the first layer. Based on the medium-level API, these advanced APIs include training and inference management, mixed precision training, and debugging and optimization, enabling users to control the execution process of the entire network and implement training, inference, and optimization of the neural network. For example, by utilizing the Model API, users can specify the neural network model to be trained as well as related training settings, train the neural network model, and debug the neural network performance through the Profiler API. - -- Medium-Level Python API - - Medium-level APIs are at the second layer, which encapsulates low-cost APIs and provides such modules as the network layer, optimizer, and loss function. Users can flexibly build neural networks and control execution processes through the medium-level API to quickly implement model algorithm logic. For example, users can call the Cell API to build neural network models and computing logic, add the loss function and optimization methods to the neural network model by using the loss module and Optimizer API, and use the dataset module to process data for model training and derivation. - -- Low-Level Python API - - Low-level APIs are at the third layer, including tensor definition, basic operators, and automatic differential modules, enabling users to easily define tensors and perform derivative computation. For example, users can customize tensors by using the Tensor API, and use the GradOperation operator in the ops.composite module to calculate the derivative of the function at a specified position. - -## Joining the Community - -Welcome every developer to the MindSpore community and contribute to this all-scenario AI framework. - -- **MindSpore official website**: provides comprehensive MindSpore information, including installation, tutorials, documents, community, resources, and news ([learn more](https://www.mindspore.cn/en)). -- **MindSpore code**: - - - [MindSpore Gitee](https://gitee.com/mindspore/mindspore): Top 1 Gitee open-source project in 2020, where you can track the latest progress of MindSpore by clicking Watch, Star, and Fork, discuss issues, and commit code. - - - [MindSpore Github](https://github.com/mindspore-ai/mindspore): MindSpore code image of Gitee. Developers who are accustomed to using GitHub can learn MindSpore and view the latest code implementation here. - -- **MindSpore forum**: We are dedicated to serving every developer. You can find your voice in MindSpore, regardless of whether you are an entry-level developer or a master. Let's learn and grow together. ([Learn more](https://bbs.huaweicloud.com/forum/forum-1076-1.html)) diff --git a/tutorials/source_en/linear_regression.ipynb b/tutorials/source_en/linear_regression.ipynb deleted file mode 100644 index 62e7d31f89b03944970ac179b6b2bf81538e76bc..0000000000000000000000000000000000000000 --- a/tutorials/source_en/linear_regression.ipynb +++ /dev/null @@ -1,528 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Simple Linear Function Fitting\n", - "\n", - "Author: [Yi Yang](https://github.com/helloyesterday)    Editor: [Mingfu Lv](https://gitee.com/lvmingfu)\n", - "\n", - "[![Download Notebook](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_notebook_en.png)](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/master/tutorials/en/mindspore_linear_regression.ipynb)  [![View Source On Gitee](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source_en.png)](https://gitee.com/mindspore/docs/blob/master/tutorials/source_en/linear_regression.ipynb)" - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "## Overview\n", - "\n", - "Regression algorithms usually use a series of properties to predict a value, and the predicted values are consecutive. For example, the price of a house is predicted based on some given feature data of the house, such as area and the number of bedrooms; or future temperature conditions are predicted by using the temperature change data and satellite cloud images in the last week. If the actual price of the house is CNY5 million, and the value predicted through regression analysis is CNY4.99 million, the regression analysis is considered accurate. For machine learning problems, common regression analysis includes linear regression, polynomial regression, and logistic regression. This example describes the linear regression algorithms and how to use MindSpore to perform linear regression AI training.\n", - "\n", - "The whole process is as follows:\n", - "\n", - "1. Generate datasets.\n", - "2. Define a training network.\n", - "3. Define and associate the forward and backward propagation networks.\n", - "4. Prepare for fitting process visualization.\n", - "5. Perform training.\n", - "\n", - "> This document is applicable to CPU, GPU and Ascend environments. The source code address of this example: .\n", - "\n", - "## Environment Preparation\n", - "\n", - "Complete MindSpore running configuration." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 1, - "source": [ - "from mindspore import context\n", - "\n", - "context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "`GRAPH_MODE`: graph mode.\n", - "\n", - "`device_target`: sets the MindSpore training hardware to CPU.\n", - "\n", - "> Third-party support package: `matplotlib` and `IPython`. If this package is not installed, run the `pip install matplotlib IPython` command to install it first.\n", - "\n", - "## Generating Datasets\n", - "\n", - "### Defining the Dataset Generation Function\n", - "\n", - "`get_data` is used to generate training and test datasets. Since linear data is fitted, the required training datasets should be randomly distributed around the objective function. Assume that the objective function to be fitted is $f(x)=2x+3$. $f(x)=2x+3+noise$ is used to generate training datasets, and `noise` is a random value that complies with standard normal distribution rules." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 2, - "source": [ - "import numpy as np\n", - "\n", - "def get_data(num, w=2.0, b=3.0):\n", - " for _ in range(num):\n", - " x = np.random.uniform(-10.0, 10.0)\n", - " noise = np.random.normal(0, 1)\n", - " y = x * w + b + noise\n", - " yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "Use `get_data` to generate 50 groups of test data and visualize them." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 3, - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "eval_data = list(get_data(50))\n", - "x_target_label = np.array([-10, 10, 0.1])\n", - "y_target_label = x_target_label * 2 + 3\n", - "x_eval_label, y_eval_label = zip(*eval_data)\n", - "\n", - "plt.scatter(x_eval_label, y_eval_label, color=\"red\", s=5)\n", - "plt.plot(x_target_label, y_target_label, color=\"green\")\n", - "plt.title(\"Eval data\")\n", - "plt.show()" - ], - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "
" - ], - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEICAYAAAC6fYRZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAAq0ElEQVR4nO3dd3xUVfrH8c+TUJZeQ682FNwVJRZUFhALooJd7EoAccUVscvPBQFXUbGjiGWVXbAigqDSDCIgJSAdkSJKKCF0ECnJnN8fM5kMYUISZiaTTL7v12temXvvmXuf3EyeOXPuueeYcw4REYlNcdEOQEREIkdJXkQkhinJi4jEMCV5EZEYpiQvIhLDlORFRGKYkrxIDmY23cy657NsOzNLjXRMIsdLSV6KLTNbb2Z/mtm+gMcb0Y4rN2Z2l5nNjHYcUrKUinYAIiG6yjk3NdpBiBRVqslLzDGzsma2y8xOD1iX4Kv11zKzamY2wczSzWyn73mDfO67nJl94HvdCuDsHNsfN7O1ZrbXzFaY2TW+9acBw4HWvm8cu3zrrzCzn8xsj5ltMLMBYToNIoCSvMQg59xB4Avg5oDVNwLfO+e24n3f/wdoDDQC/gTy28zTHzjR97gMuDPH9rVAG6AK8DTwPzOr65xbCfQCfnTOVXTOVfWV/wO4A6gKXAHca2ZX5/d3FcmLkrwUd1/6au1Zjx6+9aOBrgHlbvGtwzm33Tk3xjm33zm3F3gGaJvP490IPOOc2+Gc2wC8FrjROfeZc26Tc87jnPsEWA2ck9vOnHPTnXNLfeWXAB8VIBaRPKlNXoq7q3Npk08GypvZuUAa0BIYC2Bm5YGXgY5ANV/5SmYW75zLzON49YANAcu/BW40szuAvkAT36qKQM3cduaL7zngdKAMUBb4LI8YRPJNNXmJSb5k/SneJpubgQm+WjvAQ0Az4FznXGXg7771lo9dbwYaBiw3ynpiZo2Bd4DeQA1fk8yygP0GG/J1NDAeaOicq4K33T4/cYjki5K8xLLRwE3Arb7nWSrhbYffZWbV8baz59enwBO+i7cNgPsDtlXAm8jTAczsbrw19CxpQAMzK5Mjlh3OuQNmdg7eZiWRsFGSl+Luqxz95MdmbXDOzcV7YbMe8E3Aa14BygHbgDnAtwU43tN4m2h+BSYD/w043gpgKPAj3oT+V2BWwGu/A5YDW8xsm2/dP4CBZrYX+BfeDxGRsDFNGiIiErtUkxcRiWEhJ3kza2hmyb4bP5ab2QO+9QPMbKOZLfI9OoUeroiIFETIzTVmVheo65xbaGaVgAXA1Xj7E+9zzr0YcpQiInJcQu4n75zbjLdbGc65vWa2Eqgf6n5FRCR0Yb3wamZNgBl4u431Be4C9gApwEPOuZ1BXtMT6AlQoUKFVqeeemrY4hERKQkWLFiwzTmXEGxb2JK8mVUEvsd7y/cXZlYbbxc1BwzC26TT7Vj7SExMdCkpKWGJR0SkpDCzBc65xGDbwtK7xsxKA2OAUc65LwCcc2nOuUznnAfvXYC5jt8hIiKREY7eNQa8B6x0zr0UsL5uQLFr8N7eLSIihSgcA5RdANwOLDWzRb51TwI3m1lLvM0164F7wnAsEREpgHD0rplJ8AGVvg513yIiEhrd8SoiEsOU5EVEYpiSvIhIDFOSFxGJIo/zcOeXd/L5is8jsn8leRGRKJm6ZjLxA+MZuXgk3cYd817R46Y5XkVEjsXjgfR0qFULLDwzMx7KPMRJr53Ehj3e6YJb7anI3CHbw7LvnFSTFxHJjccD7dtDgwbQrp13+Vhl09Igj6FiPl3+KWUHl/Un+DnvQMprB4jfviOMgWdTkhcRyU16OsyeDRkZ3p/p6cHL5fVh4PGwL3UdpQaW4qbPbwKg8ymd8XzXhnPTSsH553u/KUSAmmtERHJTq5Y3Ac+efexEHOzDoHZt7zaPhzdva8Z9zdb4i6/4xwpOSzgNbgp/U1BOSvIiIrkxg+TkvBNx4IfB2WdDgnfU3+37t1PzhZrQzFus50Lj7RGbIcH3ARAXl/1hECFqrhEROZasRHysmrYZTJsGiYkwbx60b8+A5P7eBO/z+2vxvL2rTcSaZXKjmryISDhs3w4pKWyokEmji2bAjBkA9G/bnwF//xf0imyzTG6U5EVEwqFWLXrdncDb9Tf7V217ZBs1ytfwLkS4WSY3aq4REQnRivQV2MA4f4IfdvkbuP4uO8FHkWryIiLHyTlH5487M+GXCQCUiivFrsd2UaFMhShHlk1JXkTkOMxJnUPr91r7lz+5/hNubHFjFCMKLuQkb2YNgZFAbbyzQI1wzr1qZtWBT4AmeGeGutE5tzPU44mIRFOmJ5Nz3j2HhZsXAtCoSiNW37+aMvFlohxZcOFok88AHnLONQfOA+4zs+bA48A059zJwDTfsohIsfXtmm8pNaiUP8FPuX0Kv/X5rcgmeAjP9H+bgc2+53vNbCVQH+gCtPMV+xCYDjwW6vFERArbwYyDNH6lMWl/pAHQukFrZnabSZwV/b4rYW2TN7MmwJnAXKC27wMAYAve5pxgr+kJ9ARo1KhROMMREQnZ6KWjufWLW/3L83vMJ7FeYhQjKpiwJXkzqwiMAfo45/ZYQId/55wzs6BDsznnRgAjABITE489fJuISCHZe3AvlZ+r7F++7rTr+OyGz7BCvpkpVGH5rmFmpfEm+FHOuS98q9PMrK5ve11gaziOJSISdjmGCX51zqtHJPhVvVfx+Y2fF7sED+HpXWPAe8BK59xLAZvGA3cCz/l+jgv1WCIiYZc1TPDs2aS3TaRWmzn+Tb1P78br175b6EMRhFM4avIXALcDF5nZIt+jE97kfomZrQYu9i2LiBS+Y03o4RsmuN/fM45I8Kkzz+X1riPzniykiAtH75qZQG4fcx1C3b+ISEgCauqcf7536OC47Prt+jL7afp/Gf7lwe0H0e/kJHi6vvdD4YcfvB8QdetGI/qQ6Y5XEYltx5jQo9u4bvxn0X/8RXc8sp1q5avDli3ZtX7njt1cE4E5YMOp6HfyFBEJRdaEHqWyp9lbmrYUe9r8CX7ElSNw/Z03wYP3Q6BNG4iP9/7MbQTJgswBGyWqyYtIbAuY3cklJHD5qMuZtHYSAOVKlWPbo9soX7r80a+ZPj3vGvqxpv0rIlSTF5HYFxfHrINriBsU70/wY24cw/5++49O8AGvyXNGqCDfEooa1eRFJKZlejI58+0zWbp1KQAnVjuRlfetpHR86dB3nt85YKNISV5EYtaEXyZw1UdX+Ze/u+M72jdtH96DFMJk3KFQkheRous4e64cyDhA/Zfqs+PPHQC0bdyW7+78rlgMKBZuJe83FpHi4Th7roxcPJJyz5TzJ/iFPRcy/a7puSf4Y90oFQNUkxeRoqmAPVd2H9hN1SFV/cs3n34zo68bfexj5HGjVCyIrd9GRGJHAXquDJ099IgEv/r+1XkneAj+QRJjVJMXkaIpHz1X0valUWdoHf/yg+c9yEuXvXRUuVxlfZBk1eSLYBfIUCnJi0jRdYyeK49OeZQXZr/gX97UdxN1KxVwfJli0AUyVEryIlKsrNu5jhNfO9G//FyH53js/Ee8ibpiHuPMBFPEu0CGSkleRIqN2764jVFLR/mXdz62k6plKsf8xdNQKMmLSJG3eMtiWr7d0r/8Xuf36HZmN+9CWlqRHz8mmpTkRaTIcs7RYWQHktcnA1C5TCW2PLSFcmUCxpspARdPQxGuOV7fN7OtZrYsYN0AM9uYY7YoEZF8mfHbDOIGxvkT/JdLmrP7X39S7pLLj7wxKuviaWqqd+TIGLx4Gopw1eQ/AN4ARuZY/7Jz7sUwHUNESoAMTwYt3mzBL9t/AeC0mqex5NoplGrUJPcmmRi/eBqKsNTknXMzgB3h2JeIlFxf/vwlpQeV9if4GXfNYMV9KyhVp16RH9K3qIp0m3xvM7sDSAEecs7tzFnAzHoCPQEaNWoU4XBEpCj68/Cf1HqxFvsO7QOgQ9MOTLl9CpbV9FIC+rNHSiT7Gb0FnAi0BDYDQ4MVcs6NcM4lOucSExISIhiOiBRF7//0PuX/Xd6f4Bf3WszUO6ZmJ/gs+ZnEQ44SsZq8cy4t67mZvQNMiNSxRKT42XVgF9WGVPMv3/a32/jvNf+NYkSxKWI1eTMLvL/4GmBZbmVFpGR5buZzRyT4tf9cqwQfIWGpyZvZR0A7oKaZpQL9gXZm1hJwwHrgnnAcS0SKr817N1PvpXr+5UfPf5QhlwyJYkSxLyxJ3jl3c5DV74Vj3yISGx789kFemfuKf3nLD+dR+6lnoxdQCaE7XkUkolZvX80pb5ziXx46JY6+szxQKkVDEBQCjeIjIhHhnKPr512PSPC7H9tF3/gL1d+9EKkmLyJht3DzQlqNaOVfHnn1SG4/43bvgvq7FyoleREJG4/z0PaDtsz8fSYANcvXZMODG/hLqb9kF9IQBIVKzTUiEhbJvyYTPzDen+AnfBxH+oTm/CWuTJQjK9lUkxeR/PN4jmpqOZx5mGZvNOPXXb8CcEaNFizo+zPxhzNhjcZ3jzbV5EUkfzwe7wxMDRpAu3bg8TBmxRjKDC7jT/Czus1i0X1LiW99gS6uFhGqyYtI/qSn+2dg2j9vFtX/XZ6DmQcBuPyky5l4y8Ts8WaCXVwN8i1AIk81eRHJH98MTG+fE0eFxzP9CX7pvUv5+tavsxN8sGQe5FuAFA4leRHJlx0HdmIXzaBXJ2+C7tayG66/4/Rap2cXyi2ZB3wL8E/6IYVCSV5E8jR4xmBqPF/Dv7z+gfW81yXIyCW5JfOseVjVTl/o1CYvIrnauGcjDV5u4F/u16Yfgy8anPsLcptUW5N+RI2SvIgE1fvr3gybP8y/vPXhrSRUyGNin2Mlc90EFRVK8iJyhFXbVnHqsFP9y692fJV/nvvP/O9AybxIUZIXEcA7oNh1n17H2J/H+tfteXwPlcpWimJUEqqwXHg1s/fNbKuZLQtYV93MppjZat/Pasfah4hEz/yN84kbGOdP8KOvHY3r75TgY0C4etd8AHTMse5xYJpz7mRgmm9ZRIoQj/Nw7rvncs675wBQr1I9Dv7fQW7+a7B5gKQ4CkuSd87NAHbkWN0F+ND3/EPg6nAcS0TCY8raKcQPjGfexnkAfHvrt2zsu5Ey8RpQLJZEsk2+tnNus+/5FiDolRgz6wn0BGjUqFEEwxEpoXLcgXoo8xAnvnYiqXtSAUisl8icpDnEx8Xn+hopvgrlZijnnMM7oXewbSOcc4nOucSEhDy6Z4lIweS4A/WTpR9RdnBZf4KfkzSH+T3mH53gNQRBzIhkTT7NzOo65zabWV1gawSPJSLBat++O1D3xWVQuf0M3BczAOjcrDNf3vRl9ngzgYLdtaoukcVWJGvy44E7fc/vBMZF8FgiJVtute9atRh2QxMqPQnOl89X/GMF47qOC57gfa/REASxIyw1eTP7CGgH1DSzVKA/8BzwqZklAb8BN4bjWCISRJDa97ZK8SS8kADNvEV6tbqHt64cnve+NARBTAlLknfO5dbfqkM49i8iecgxZkz/FcMYOGOQf/PvfX6nYZWG+d+f7lqNGbrjVSQW+Grfv6/7icajEmGGt+19QNv+9G83ILqxSVRpqGGRGNFzYi9vgvfZNjSe/v2T1TumhFOSFynmVqSvwJ423ln4DgDDLnwWN7gUNfZmaoIOUXONSHHlnOOqj65i4uqJAJSOK83Ox3ZSoXR5OP+bo8d0lxJJSV6kGJqTOofW77X2L396/afc0OKG7ALqHSM+SvIixUimJ5Oz3zmbn7b8BEDjKo355f5fjh5vRr1jxEdJXqSY+Gb1N3Qa3cm/PPX2qXQ4Qb2U5diU5EWKuIMZB2n0SiO2/uEdGaR1g9bM7DaTOFO/CcmbkrxIETZqyShuG3ubf3l+j/kk1ks8xitEjqQkL1IE7T24l8rPVfYvX9/8ej69/tPcx5sRyYWSvEgR8+qcV+kzqY9/eVXvVZxS45ToBSTFmpK8SBGx9Y+t1H4xu0fM/efcz2uXvxbFiCQWKMmLFAFPTnuSZ2c+619OfTCV+pXrRzEiiRVK8iJRtH7Xepq+2tS/PLj9YPr9vV8UI5JYoyQvEm75nB/17nF388GiD/zLOx7dQbVy1QohQClJ1NFWJJzyMT/q0rSl2NPmT/AjrhyB6++oVrYKpKWBCzodsshxiXhN3szWA3uBTCDDOadOvhK7jjE/qnOOjqM6MnntZAAqlK7A1ke2Ur50+ewPh6xBxZKTvUMTiISosN5F7Z1zLZXgJeblMj/qrN9nETcwzp/gx9w4hn1P7vMmeAj+4SASBmqTFwmnHPOjZrhMWr7VkuXpywE4ufrJLO+1lNI7dnmbZbLa7HNM36fhgSVcCqMm74DJZrbAzHrm3GhmPc0sxcxS0lV7keLE4wnehu4bAXLC6omUHlTan+CT70zml/t+pvTFlx7dZp/14ZCaCtOna3hgCRtzEb7IY2b1nXMbzawWMAW43zk3I1jZxMREl5KSEtF4RMIisA397LPhhx8gPh6AAxkHqDu0LrsO7AKgbeO2fHfnd94BxdLSvAk+I8PbpJOaqiGBJWRmtiC35vCI1+Sdcxt9P7cCY4FzIn1MkYgLbEP/8Ue48ELwePhw0YeUe6acP8Ev7LmQ6XdNzx4xMpc2e5FIiWibvJlVAOKcc3t9zy8FBkbymCKFolYtbw3+xx8B2L1kHlUHxfs333z6zYy+bvTRr8vRZq9mGYm0SF94rQ2M9Y2cVwoY7Zz7NsLHFIk8M28TzYUX8mL8XB65JLs//Or7V3NS9ZNyf61mbZJCFNEk75xbB5wRyWOIRMuWP9Op23GOf/nB8x7kpcteimJEIkdTF0qR4/Dw5IcZ+uNQ//KmvpuoW6luFCMSCU5JXqQA1u5Yy0mvZzfFDLl4CI9e8GgUIxI5NiV5kXy668u7+HDxh/7lnY/tpOpfqkYvIJF80OAYInnYsm8L1396vT/B/6fLf3D9nRK8FAuqyYvkwjnHh4s/pO+kvuw/vJ9/X/Rv+pzXh3Kly0U7NJF8U5IXCWL9rvXcM+EeJq+dzIWNLuTdq96lWc1m0Q5LpMCU5EUCeJyHYfOG8cS0JzAz3rj8De49+97sO1ZFihkleRGflekr6f5Vd2ZvmE3Hkzoy/IrhNK7aONphiYRESV5KvMOZh3lh9gs8/f3TVCxTkZFXj+S2v92GacgBiQFK8lKiLdy8kKTxSSzasogbmt/A65e/Tu2KGnJAYoeSvJRIfx7+k4HfD+SF2S+QUCGBL278gmtOuybaYYmEnZK8lDg//PYD3b/qzi/bf6Fby268eOmLVCtXLdphiUSEkryUDB4Pezf+yuNLhvJmyls0qdqEKbdO4uJKZ4BuapIYpiQvsc/j4ZvrzuCepstIrQx9znuAwe0GUeGyK7PnVE1O9g4BLBJj9K6WmLZ9/3bu+OQmOrVcRsVDMOuDeF4+8wkq7N6fPbPT7NneSTxEYpCSvMQk5xyfLfuE5m+cykdrvuSpXxvx07vxtG58gXdGJk3DJyVExJtrzKwj8CoQD7zrnHsu0seUkm3T3k3cN/EffLlqHK02weTUszhj3FwYsv3IKfc0DZ+UAJGe4zUeGAZcAqQC881svHNuRSSPKyWTc473f3qfhyY/xMGMgzw/NY4HZ3soFbcEtm8/eso9TcMnJUCkm2vOAdY459Y55w4BHwNdInxMKYHW7VzHJf+9hO5fdeeMOmewpNdiHom7kFJxao6Rki3SzTX1gQ0By6nAuYEFzKwn0BOgUaNGEQ5HYk2mJ5PX571Ov+/6EW/xvHXFW/Rs1dM7oJiaY0Si34XSOTcCGAGQmJjoohyOFCMr0leQND6JOalz6HRyJ4ZfMZyGVRpmF1BzjEjEk/xGIOC/jga+dSLH7VDmIYbMHMLgHwZTqUwl/nfN/7jlr7doQDGRICKd5OcDJ5tZU7zJvStwS4SPKTEsZVMKSeOTWJK2hK6nd+XVjq9Sq4La20VyE9Ek75zLMLPewCS8XSjfd84tj+QxJTbtP7yfAdMHMPTHodSpWIdxXcfRuVnnaIclUuRFvE3eOfc18HWkjyOx6/v139P9q+6s2bGGHmf14PlLntck2iL5FPULryK52XNwD49NeYzhC4ZzQrUTmHbHNC5qelG0wxIpVpTkpUia+MtEek3sxaa9m+h7Xl8GXTSI8qXLRzsskWJHSV6KlG37t9Hn2z6MWjqKFgkt+PyGzzm3wbl5v1BEglKSl+jweI64Uck5xyfLP+H+b+5n94Hd9G/bnyfbPEmZ+DLRjlSkWNMolFL4PB5o3x4aNIB27di4awNdPu7CzWNupmnVpizouYAB7QYowYuEgWryUvjS02H2bFxGBu/+OZOH32rBYU8GQy8dygPnPkB8XHy0IxSJGUryUvhq1WJthzPpUTeF5CYe2tdL5J2r3uHE6idGOzKRmKMkL4Uq05PJq3Nf5f/aLKN0XEVGXPoi3c/qoSEJRCJESV4KzbKty0gan8S8jfO4qvGlvHXNe9Sv0iDaYYnENF14lYg7dPgAAyY8zFlvn8W6nev4aNmpjOsxjfqdb/VehBWRiFGSl4iat2EOZz1Zg6cXDOWGTVVZeX0yXb9cg2VkagJtkUKgJC8Rsf/wfh6a9BCt/3MBuzz7+Wo0jHp3JzXL1dQE2iKFSG3y4pXj5qRQJP+aTPevurNu5zp6tbqHIS8tpfK6ed6kXru2ZmwSKUSqyctRNycdbzv57gO76flVTy4aeRFxFsf0O6fz1pXDqTz1B0hNhenTvUk9a8YmJXiRiFNNXvw3J5GRkd1OXsBp88avGs+9E+9ly74tPHL+IwxoNyB7QDFNwycSNRGryZvZADPbaGaLfI9OkTqWhKhWreNuJ9/6x1a6ft6VLh93oUa5GsztPpfnL3n+yBEjPR5ISwOnKXxFCluka/IvO+dejPAxJFRmBW4nd84xeuloHvj2AfYc3MPAdgN57MLHjh5vJqspaPZs7wdIcrK3Zi8ihULNNeJVgCaVDbs3cO/Ee5m4eiLnNTiPd696lxa1WgQvHIamIBE5fpGuUvU2syVm9r6ZVYvwsSTCPM7D8JThtHizBcnrk3nlsleYeffM3BM8hNQUJCKhMxdCO6mZTQXqBNnUD5gDbAMcMAio65zrFmQfPYGeAI0aNWr122+/HXc8Ejmrt6+mx1c9+P637+nQtAMjrhrBCdVOyN+Lw9g9U0SOZmYLnHOJQbeFkuQLEEATYIJz7vRjlUtMTHQpKSkRj0fyL8OTwcs/vsy/pv+LsvFleemyl7i75d0aUEykCDlWko9Ym7yZ1XXObfYtXgMsi9SxJDIWb1lM0vgkFmxeQJdmXXjzijepV6lewXekmrxI1ESyTf55M1tqZkuA9sCDETyWhNHBjIM89d1TJL6TyIZdv/Hp9Z8w9qaxx5/gw3CjlYgcn4jV5J1zt0dq3xI5P274kaTxSazctpLbN9fi5dHbqTFxGCRff3y1cPWuEYkqdVgWAP449Ad9vu3DBe9fwL5D+/i60yhGvreDGntDHC1SvWtEokr95IWp66bS46serN+1nvvOvo9nOzxLpTIV4fy3s29iOt7kfBw3WolI+CjJx6p8XOzc+edOHp78MO8vep+Tq5/MjLtm0KZxm+wC4UrOGrtGJGrUXBOL8nGxc+zKsTR/szkfLv6Qxy94nMW9Fh+Z4EGjRYrEACX5WBR4sXPWLFixwj84WNq+NG787Eau/fRaapetwdykOTx78bOUK10u+/UaUEwkZijJx6Ksi53x8VCxIpx5Jq5dW0b+9AGnDTuNcavG8czaJsx/6Gda3fLQkTV9dXkUiSlK8rEo62LnokXwxx/8XiGDTo1mcuf4uzkt4TQWXz+NJz9KpfShID1ngnV5FJFiS0k+VsXF4Wl+GsNuaEKLf8APTYzXOr7KD3f/wKnNLsju1ti6tbdZJqtpRl0eRWKKknyMWrV1JW3fuYDezdbQ+sS2LOu7hvvP/SdxFpdd0//9d+/zhg2zm2aytgVO1ycixZaSfIw5nHmY5354ljPeaMGyX+fynxWnMOmuaTSp1vTIgnFx3kewphn1qhGJGeonH0N+2vwTSeOT+GnLT1z7izFsItQ5sA62bQveTz2raSbUG55EpMhSTT4GHMg4QL9p/Tj7nbPZtHcTn9/wGWM2t6HOgTza1dU0IxLzVJMv5mb9Pouk8Ums2r6Ku1rexdBLh1K9XHVIvjZ/d6vqblSRmKYkX0ztO7SPJ6c9yRvz3qBRlUZMum0Sl554aXYBJW8RQUm+WJq0ZhI9J/Rkw+4N9D6nN//u8G8qlqkY7bBEpAhSki9Gdvy5g76T+vLh4g9pVqMZP9z9Axc0uiDaYYlIERbShVczu8HMlpuZx8wSc2x7wszWmNkqM7sstDBLqIAxZMasGEPzYc3535L/0a9NPxb1WqQELyJ5CrUmvwy4Fng7cKWZNQe6Ai2AesBUMzvFOZcZ4vFKDt8YMpuXzKL3LdX4otY2zqxzJt/e9i0t67SMdnQiUkyEVJN3zq10zq0KsqkL8LFz7qBz7ldgDXBOKMcqadzWrXywbybN78lkYvVtPHduP+b1mKcELyIFEql+8vWBDQHLqb51RzGznmaWYmYp6RoMC4D1u9Zz2ZQ7uLuzh9PTYXFKIo9dNohScbqEIiIFk2fWMLOpQJ0gm/o558aFGoBzbgQwAiAxMbFED2Ce6clk2PxhPDntScyMYZe/Qa9G1xJXu45uVBKR45JnknfOXXwc+90INAxYbuBbJ7lYmb6S7l91Z/aG2XQ8qSPDrxhO46qNox2WiBRzkWquGQ90NbOyZtYUOBmYF6FjFWuHMw/zzIxnaPl2S37e9jMjrx7J17d8rQQvImERUiOvmV0DvA4kABPNbJFz7jLn3HIz+xRYAWQA96lnzdEWbl5It3HdWJy2mBtb3MhrHV+jdkXdpSoi4RNSknfOjQXG5rLtGeCZUPYfq/48/CdPf/80L85+kYQKCYy9aSxXn3p1tMMSkRik7hqFbMZvM+g+vjurd6wm6cwkXrjkBaqVqxbtsEQkRinJF5I9B/fwxNQneDPlTZpUbcKU26dw8QnHc01bRCT/lOQLwTerv+GeCfeQuieVPuf2YfBFg6lQpkK0wxKREkBJPoK279/Og5Me5L9L/kvzhObM6jaL1g1bRzssESlBlOTDyeOB9HRcQgKfrfyc3l/3ZueBnTz196fo16YfZUuVjXaEIlLCKMmHi29AsU1LZvGPW6syLmE7req2YuodU/lb7b9FOzoRKaGU5MPEbd3K+/tn8lAvDwfjt/P8eU/x4CX/0ngzIhJVmsg7DNbtXMfFk26l+5UezkiDJSln88ilTyvBi0jUKQuFINOTyevzXqffd/2It3je6jSMng2v0YBiIlJkKMkfp+Vbl5M0Pom5G+dyxclX8NYVb9GwSsO8XygiUoiU5AvoUOYhhswcwqAZg6hctjKjrh3FzaffjKnmLiJFkJJ8AczfOJ+k8Uks3bqUrqd35bWOr5FQISHaYYmI5EpJPh/2H95P/+T+vDTnJepUrMO4ruPo3KxztMMSEclT7Cd53w1K1Kp1XBdDp6+fTo+verBmxxp6nNWDFy55gSp/qRKBQEVEwi+2u1D6blCiQQNo1867nE+7D+ym14RetP+wPR7nYdod0xhx1QgleBEpVmK7Jp+eDrNnQ0aG92d6OtTOe1KOib9M5J4J97B532Yeav0QA9sPpHzp8oUQsIhIeIVUkzezG8xsuZl5zCwxYH0TM/vTzBb5HsNDD/U41KoF558PpUp5f9aqdczi6X+kc+sXt3LlR1dSrVw1fkz6kRcvfVEJXkSKrVBr8suAa4G3g2xb65xrGeL+Q2MGyclHtskHaaN3zvHxklH88+t/sjtjHwPaDuCJNk9QJr5MVMMXEQlVSDV559xK59yqcAUTEXFx3iaarASfo40+dU8qnUdfxS1f3s4J63ay8IOy9L+wnxK8iMSESF54bWpmP5nZ92bWJrdCZtbTzFLMLCU9PT2C4XBEG71n9ixGfP8SLd5swbRfpzF0Esx+D05ftw9+/jmycYiIFJI8k7yZTTWzZUEeXY7xss1AI+fcmUBfYLSZVQ5W0Dk3wjmX6JxLTEiI8I1Fvjb6NQnxdLivIvfMeIRWdVuxtNcS+q6oQrwDqlSB5s0jG4eISCHJs03eOVfgiUidcweBg77nC8xsLXAKkFLgCMMo03l45d9X8dT0+ZSOd4y4ZATdz+ruHZJg2zZvDb55c28Tj4hIDIhIF0ozSwB2OOcyzewE4GRgXSSOlV9L05aSND6J+Zvmc9UpV/HWFW9Rv3L97AKlSsHpp0cvQBGRCAi1C+U1ZpYKtAYmmtkk36a/A0vMbBHwOdDLObcjpEiP08GMg/RP7s9ZI85i/a71fHzdx4zrOu7IBC8iEqNCqsk758YCY4OsHwOMCWXf4TA3dS5J45NYnr6cW/96K690fIWa5WtGOywRkUITk3e8/nHoD55KfopX5rxC/cr1mXDzBK445YpohyUiUuhiLsl/9+t39PiqB+t2rqNXq14MuWQIlcsG7dgjIhLzYibJ7zqwi0cmP8K7P73LSdVPYvqd02nbpG20wxIRiaqYSPIpm1Lo8nEXtuzbwqPnP8qAdgMoV7pctMMSEYm6mEjyJ1Q7gRYJLRjXdRyJ9RLzfoGISAkRE0m+ernqTL59crTDEBEpcnRrp4hIDFOSFxGJYUryIiIxTEleRCSGKcmLiMQwJXkRkRimJC8iEsOU5EVEYpg556Idg5+ZpQO/hbCLmsC2MIUTToqrYBRXwSiugonFuBo754LOn1qkknyozCzFOVfkxjVQXAWjuApGcRVMSYtLzTUiIjFMSV5EJIbFWpIfEe0AcqG4CkZxFYziKpgSFVdMtcmLiMiRYq0mLyIiAZTkRURiWLFK8mZ2g5ktNzOPmSXm2PaEma0xs1Vmdlkur29qZnN95T4xszIRivMTM1vke6w3s0W5lFtvZkt95VIiEUuO4w0ws40BsXXKpVxH33lcY2aPF0JcL5jZz2a2xMzGmlnVXMpF/Hzl9bubWVnf33eN773UJBJxBDluQzNLNrMVvv+BB4KUaWdmuwP+vv8qpNiO+Xcxr9d852yJmZ1VCDE1CzgPi8xsj5n1yVGmUM6Xmb1vZlvNbFnAuupmNsXMVvt+VsvltXf6yqw2szuPKwDnXLF5AKcBzYDpQGLA+ubAYqAs0BRYC8QHef2nQFff8+HAvYUQ81DgX7lsWw/ULMTzNwB4OI8y8b7zdwJQxndem0c4rkuBUr7nQ4Ah0Thf+fndgX8Aw33PuwKfFNLfri5wlu95JeCXILG1AyYU1vspv38XoBPwDWDAecDcQo4vHtiC94ahQj9fwN+Bs4BlAeueBx73PX882HseqA6s8/2s5nteraDHL1Y1eefcSufcqiCbugAfO+cOOud+BdYA5wQWMDMDLgI+9636ELg6guFmHfNG4KNIHifMzgHWOOfWOecOAR/jPb8R45yb7JzL8C3OARpE8njHkJ/fvQve9w5430sdfH/niHLObXbOLfQ93wusBOpH+rhh0gUY6bzmAFXNrG4hHr8DsNY5F8rd9MfNOTcD2JFjdeD7KLdcdBkwxTm3wzm3E5gCdCzo8YtVkj+G+sCGgOVUjv4HqAHsCkgmwcqEWxsgzTm3OpftDphsZgvMrGeEY8nS2/eV+f1cviLm51xGUje8tb5gIn2+8vO7+8v43ku78b63Co2viehMYG6Qza3NbLGZfWNmLQoppLz+LtF+T3Ul94pWNM4XQG3n3Gbf8y1A7SBlwnLeitxE3mY2FagTZFM/59y4wo4nN/mM82aOXYu/0Dm30cxqAVPM7Gffp35E4gLeAgbh/acchLcpqVsoxwtHXFnny8z6ARnAqFx2E/bzVdyYWUVgDNDHObcnx+aFeJsk9vmut3wJnFwIYRXZv4vvultn4Ikgm6N1vo7gnHNmFrG+7EUuyTvnLj6Ol20EGgYsN/CtC7Qd79fEUr4aWLAy+ZZXnGZWCrgWaHWMfWz0/dxqZmPxNheE9M+R3/NnZu8AE4Jsys+5DHtcZnYXcCXQwfkaJIPsI+znK4f8/O5ZZVJ9f+MqeN9bEWdmpfEm+FHOuS9ybg9M+s65r83sTTOr6ZyL6GBc+fi7ROQ9lU+XAwudc2k5N0TrfPmkmVld59xmX9PV1iBlNuK9bpClAd7rkQUSK80144Guvp4PTfF+Gs8LLOBLHMnA9b5VdwKR/GZwMfCzcy412EYzq2BmlbKe4734uCxY2XDJ0Q56TS7Hmw+cbN6eSGXwftUdH+G4OgKPAp2dc/tzKVMY5ys/v/t4vO8d8L6XvsvtQymcfO3+7wErnXMv5VKmTtb1ATM7B+//d0Q/gPL5dxkP3OHrZXMesDugqSLScv02HY3zFSDwfZRbLpoEXGpm1XxNq5f61hVMpK8sh/OBNzGlAgeBNGBSwLZ+eHtGrAIuD1j/NVDP9/wEvMl/DfAZUDaCsX4A9Mqxrh7wdUAsi32P5XibLSJ9/v4LLAWW+N5kdXPG5VvuhLf3xtpCimsN3rbHRb7H8JxxFdb5Cva7AwPxfgAB/MX33lnjey+dEOnz4zvuhXib2ZYEnKdOQK+s9xnQ23duFuO9gH1+IcQV9O+SIy4DhvnO6VICesZFOLYKeJN2lYB1hX6+8H7IbAYO+/JXEt7rONOA1cBUoLqvbCLwbsBru/nea2uAu4/n+BrWQEQkhsVKc42IiAShJC8iEsOU5EVEYpiSvIhIDFOSFxGJYUryIiIxTEleRCSG/T+LlwbmXPtkJwAAAABJRU5ErkJggg==" - }, - "metadata": { - "needs_background": "light" - } - } - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "In the preceding figure, the green line indicates the objective function, and the red points indicate the verification data `eval_data`.\n", - "\n", - "### Defining the Data Argumentation Function\n", - "\n", - "Use the MindSpore data conversion function `GeneratorDataset` to convert the data type to that suitable for MindSpore training, and then use `batch` and `repeat` to perform data argumentation. The operation is described as follows:\n", - "\n", - "- `ds.GeneratorDataset`: converts the generated data into a MindSpore dataset and saves the x and y values of the generated data to arrays of `data` and `label`.\n", - "- `batch`: combines `batch_size` pieces of data into a batch.\n", - "- `repeat`: multiplies the number of datasets." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 4, - "source": [ - "from mindspore import dataset as ds\n", - "\n", - "def create_dataset(num_data, batch_size=16, repeat_size=1):\n", - " input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])\n", - " input_data = input_data.batch(batch_size)\n", - " input_data = input_data.repeat(repeat_size)\n", - " return input_data" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "Use the dataset argumentation function to generate training data and view the training data format." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 5, - "source": [ - "data_number = 1600\n", - "batch_number = 16\n", - "repeat_number = 1\n", - "\n", - "ds_train = create_dataset(data_number, batch_size=batch_number, repeat_size=repeat_number)\n", - "print(\"The dataset size of ds_train:\", ds_train.get_dataset_size())\n", - "dict_datasets = next(ds_train.create_dict_iterator())\n", - "\n", - "print(dict_datasets.keys())\n", - "print(\"The x label value shape:\", dict_datasets[\"data\"].shape)\n", - "print(\"The y label value shape:\", dict_datasets[\"label\"].shape)" - ], - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "The dataset size of ds_train: 100\n", - "dict_keys(['data', 'label'])\n", - "The x label value shape: (16, 1)\n", - "The y label value shape: (16, 1)\n" - ] - } - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "Use the defined `create_dataset` to perform argumentation on the generated 1600 data records and set them into 100 datasets with the shape of 16 x 1.\n", - "\n", - "## Defining the Training Network\n", - "\n", - "In MindSpore, use `nn.Dense` to generate a linear function model of single data input and single data output.\n", - "\n", - "$$f(x)=wx+b\\tag{1}$$\n", - "\n", - "Use the Normal operator to randomly initialize the weights $w$ and $b$." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 6, - "source": [ - "from mindspore.common.initializer import Normal\n", - "from mindspore import nn\n", - "\n", - "class LinearNet(nn.Cell):\n", - " def __init__(self):\n", - " super(LinearNet, self).__init__()\n", - " self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))\n", - "\n", - " def construct(self, x):\n", - " x = self.fc(x)\n", - " return x" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "Call the network to view the initialized model parameters." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 7, - "source": [ - "net = LinearNet()\n", - "model_params = net.trainable_params()\n", - "for param in model_params:\n", - " print(param, param.asnumpy())" - ], - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Parameter (name=fc.weight, shape=(1, 1), dtype=Float32, requires_grad=True) [[0.00422094]]\n", - "Parameter (name=fc.bias, shape=(1,), dtype=Float32, requires_grad=True) [-0.01474356]\n" - ] - } - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "After initializing the network model, visualize the initialized network function and training dataset to understand the model function before fitting." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 8, - "source": [ - "from mindspore import Tensor\n", - "\n", - "x_model_label = np.array([-10, 10, 0.1])\n", - "y_model_label = (x_model_label * Tensor(model_params[0]).asnumpy()[0][0] +\n", - " Tensor(model_params[1]).asnumpy()[0])\n", - "plt.axis([-10, 10, -20, 25])\n", - "plt.scatter(x_eval_label, y_eval_label, color=\"red\", s=5)\n", - "plt.plot(x_model_label, y_model_label, color=\"blue\")\n", - "plt.plot(x_target_label, y_target_label, color=\"green\")\n", - "plt.show()" - ], - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "
" - ], - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAD8CAYAAAB3u9PLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAAqH0lEQVR4nO3dd3xUVf7/8dcnoSgBFSEgUsSCCKgUIyqCFAvK2l0V/eradgFXXRC7uEYBC6jYRUFd1NXVFUX8IaJUC1gA6aBUF0FKpCNSkvn8/pjJEEIaTE3yfj4eeeSWM/d+uBnmM/ecc88xd0dERCQl0QGIiEhyUEIQERFACUFEREKUEEREBFBCEBGRECUEEREBopAQzKy+mU00s/lmNs/Meoa2P2RmK81sZuinS+ThiohIrFikzyGYWR2gjrv/YGbVgOnAxcAVwFZ3fzLiKEVEJOYqRHoAd18FrAotbzGzBUDdSI8rIiLxFfEdwh4HM2sIfAkcD/QGrgc2A9OAO9x9QwGv6QZ0A0hLSzvpuOOOi1o8IiLlwfTp039z9/RIjxO1hGBmVYEvgEfc/UMzqw38BjjQj2C10o1FHSMjI8OnTZsWlXhERMoLM5vu7hmRHicqvYzMrCLwAfC2u38I4O5r3D3H3QPAUKB1NM4lIiKxEY1eRga8Bixw90F5ttfJU+wSYG6k5xIRkdiJuFEZOB24FphjZjND2+4HrjKzFgSrjH4GukfhXCIiEiPR6GX0NWAF7Bod6bFFRCR+9KSyiIgASggiIhKihCAiIoASgohIqbXhjw00fKZh1I4XjV5GIiISR+7OVcO78t78/0b1uLpDEBGJtkAA1qyBKA4NlGvYzGGk9E0JJ4PMZQ2idmzdIYiIRFMgAB07wpQp0KYNTJwIKUV89w4EICsLatUCK6gHf9D8rPk0e6lZeL3lKvj2Vahkv/JolD7LdYcgIhJNWVnBZJCdHfydlVV42dzkUa8edOgQXM+3//cVS2nwdIM9ksGyfyzlhwVnUMkqQJs27ILsaISuOwQRkWiqVSt4Z5B7h1CrVuFlC0oetWsH9wUC3Nq9Pi/W+zVcfMSVI7j4uIuDKxMn7r6zKOoOZB8oIYiIRJPZnh/WRVQD7ZE8Tj4Z0oMjWI/8cSQXv3cx1AsWu3Wq8fxrq3YnCwgmgbzrUaCEICISbSX9sDaD8eOhXTv4/nt+Pu80jmzzfXh3/e2VWfBsNmknn170nUaUKCGIiCTSunXs/GEqp/41hxl1dieDuTfPpVnNJtCrBHcaUaJGZRGRBHp4wWAq35/DjNCEAf+68HU802lWq9nuO404JAPQHYKISEJMXDaRTm92Cq9fefRF/OfqD7EoNRDvDyUEEZE4WrN1DYc9dVh4vXJqZVbdsYrqB1ZPYFRB0Zgxrb6ZTTSz+WY2z8x6hrYfamZjzWxR6Hfi/7UiIgmSE8jh3H+fu0cy+Pamb9n+wPakSAYQnTaEbOAOd28KnArcYmZNgXuB8e7eCBgfWhcRKXde+P4FKvSrwGdLPgNg0DmD8EznlHqnJDiyPUVjxrRVwKrQ8hYzWwDUBS4COoSKvQFMAu6J9HwiIqXF9F+nkzE0I7zesWFHPr/2cyqkJGdtfVSjMrOGQEvgO6B2KFkArAai+wSFiEiS2rR9E/Wfrs+WnVvC237t/St1qtVJYFTFi1pztplVBT4Aern75rz73N2BAof9M7NuZjbNzKZlFTXmh4hIknN3rvnwGg4ZcEg4GYy9diye6UmfDCBKCcHMKhJMBm+7+4ehzWvMrE5ofx1gbUGvdfch7p7h7hnpoce2RUSSWgHDW789+21S+qbw9py3Abi/7f14pnPWUWclKsp9FnGVkZkZ8BqwwN0H5dn1MXAd8Hjo98hIzyUiknD5hrf+8f3BNBm8eyTSE2qdwNS/TaVyhcoJDHL/ROMO4XTgWqCTmc0M/XQhmAjONrNFwFmhdRGR5Fbc5DahEUq3WTZHtfxyj2Sw+MVUZr9bncopFeMUbHRFo5fR10Bhz1WfGenxRUTipiST29Sqxe1/qc0zDVaGNw0/51UuO6NHcBjrDfmGsS5FNJaRiEiuYia3GbVwFNY3JZwMup/UjcCDAS5rfT2kpQULpaVBzZrFnyuG02zur+TsDCsikgiFTG6zfNNyjnjmiHCx2mm1WfyPxVStVDW44bffYOvW4PLWrcH1ou4Q9nWazThRQhARyZVvcptdgWza/qst36/cPSz1rB6zOLH2iXu+rlYtOP30ks2SBkXPlJZAiU9JIiLJJDTk9CNfPUql/pXCyWDoBUPxTN87GcDuRLJiBUyaVPxw1bl3IhUqlCyBxInuEERE8vjqf19xxrAzwuuXHHcJw68YTooV8/15X6a03JdpNuNICUFEBMj6PYtaT+7+pp5iKay5cw01q5SggXh/xGBO5EipykhESrcIe+sEPMD575y/RzKYfONkch7MiV0ySFJKCCJSeuX21qlXDzp0CK7vg5envUxq31Q+WfQJAI+f+Tie6bSp36bocyZZd9FoUZWRiJRe+9lbZ8aqGbQa0iq83q5BOyZcN6H4YamTtLtotCghiEjpVchzA4XZvGMzDZ9pyIbtG8Lbfrn9F+odVK9k50vS7qLRUnZSm4iUPyXs7unu3DDyBg5+/OBwMvj0/z7FM73kyQCStrtotOgOQURKt2J667w7912u+uCq8Ppdbe5i4NkD9+9cSdpdNFqUEESkTFq0bhHHvnBseL1xjcbM7DGTA1IqBRuF9/cDPQm7i0aLEoKIlCl/7PqD5i83Z9H6ReFtC29dSKMajcp8o3CkdCVEpMy4e+zdVHm0SjgZvHvZu3imB5MBFDuaaXmnOwQRKfXGLB7DeW+fF16/ocX1vHbh61j+KqF97JVU3kQlIZjZ68D5wFp3Pz607SHgb0BuCr7f3UdH43wiIgArN6+k3tO7ewkduqsCy552DspYChf43m0EZbxROFLRqjIaBpxbwPan3b1F6EfJQESiIjuQTdvX2+6RDH7481jWDYCDtuUUXR2U2yisZLCXqCQEd/8SWB+NY4mIFGXg5IFU7FeRyb9MBmDwnwbjmU7LpmeW6WcE4iHWbQi3mtlfgGnAHe6+IX8BM+sGdANo0KBBjMMRkdJq8vLJtP1X2/D6+ceez8iuI3cPS63qoIiZR2mAJjNrCIzK04ZQG/gNcKAfUMfdbyzqGBkZGT5t2rSoxCMiZcO6betIfyIdZ/dn1do715Kelp7AqJKLmU1394xIjxOzbqfuvsbdc9w9AAwFWsfqXCJS9gQ8wCXvXULNJ2qGk8EX13+BZ7qSQYzErMrIzOq4+6rQ6iXA3FidS0TKlqHTh9JtVLfw+iOdHuH+dvcnMKLyIVrdTv8DdABqmtkKIBPoYGYtCFYZ/Qx0j8a5RKTsmr1mNs1fbh5eP6XuKXx1w1dUTK2YwKjKj6gkBHe/qoDNr0Xj2CJS9m3ZsYVjnj+Gtb+vDW/73+TWNBgzRUNLxJGeVBaRhHF3uo/qztAfhoa3/b93Uzj/xwBU+KHMzTeQ7JR6RSQhhs8fTkrflHAy6HVKL/zBAOfXaqtnCRJEdwgiEldL1i/hmOePCa8fXf1o5tw8hwMrHhjcoGcJEkYJQUTiYkf2Dk4achLzsuaFty34+3yOS2+yZ8EyPN9AslOVkYhERyAQnHimgIdd+4zvwwGPHBBOBm/Na4z3r8Bxf+4RfJ0kBSUEEYlc7sQz9epBhw7hD/mxS8ZiDxuPfv0oANeceA2B7qu4ZsQSzUmQhFRlJCKRyzfxzK8/z6HuWy3Cu6tVqsby25dzyAGHBO8gNCdBUtIdgohELjTxTHbFVDrekrZHMpj6t6lsvm9zMBnA7kHoVqyASZN2NxwXUeUk8aGEICKRM2PQYxdSsU8Ok6pvAuC5c5/DM52Mw/OMuZb7oW+255wEhVQ5SXypykhEIvLdiu849bVTw+udj+7MJ1d/QmpK6p4Fi5rgvqC5jtXTKO6UEERkv6z/Yz11nqrDzpyd4W2r71hN7aqFfJAX9aGvuY6TgqqMRGSfuDtXvH8FNQbWCCeDCX+ZgGd64ckAdn/oF/QUcmHtChJXukMQkRIbNnMYN4y8Ibz+UPuHyOyQWbIXFzejmR5ISzglBBEp1ry18zh+8PHh9ZPqnMSUm6ZQKbXSvh1IH/pJTQlBRAr1+87fafxCY1ZuWRnetqznMhoe0jBxQUnMRKUNwcxeN7O1ZjY3z7ZDzWysmS0K/a4ejXOJSHzcOvpWqj5WNZwMRlw5As90JYMyLFqNysOAc/NtuxcY7+6NgPGhdRFJch/9+BH2sPHi1BcBuPXkW/FM5+LjLk5sYBJz0Zox7Usza5hv80UEp9UEeAOYBNwTjfOJSAQCgQIbdpdtWMZRzx0VXq9/UH0W3LKAtEppxb5WyoZYdjut7e6rQsurgQJbksysm5lNM7NpWRrkSiS2CngieGfOTlq90mqPZDD35rksv3353slATxOXaXF5DsHdHShwgBJ3H+LuGe6ekZ6eHo9wRMqvfA+HZY6+i8r9KzNj9QwAhl00DM90mtVqVuxrNUpp2RPLhLDGzOoAhH6vLaa8iERTQYPFhR4Om3h0CvZANn2nDwLgymZXEngwwHUtriv8eEU9WCZlQiy7nX4MXAc8Hvo9MobnEpG8Chk3aPXva6jT6ctwsQMqHMCvvX+l+oEl6ARY3INlUupFJSGY2X8INiDXNLMVQCbBRPBfM7sJ+B9wRTTOJSIlkK96J2fNas4bdz1jl44NF/n2pm85pd4p+3ZcPVhWpkWrl9FVhew6MxrHF5F9lGewuOevOIJ/DKkb3jXonEHcftrtCQxOkpWeVBYpi8yY9vYTnPzaKcASADrVbcvnN0wkNVX/7aVgemeIlDEbt2+k3qB6/L7r9/C2X78+hTqTvoW3ztxzHgKRPPSuECkj3J2rP7ia6gOqh5PB2GvH4j1WU2fSdHUXlWLpDkGkDHhr1lv85aO/hNf7tOtD/079gyua1F5KSAlBpBRbkLWApi81Da+fUOsEpv5tKpUrVN5dSN1FpYSUEERKoW27tnH8S8ezbOOy8LbFty3m6EOPLvgF6i4qJaA2BJFSpteYXqQ9mhZOBsMvH45neuHJQKSEdIcgUkqMWjiKC/5zQXi9+0ndGfynwZiqgCRKlBBEktzyTcs54pkjwuu102qz+B+LqVqpagKjkrJICUEkSe3K2cXpr5/O1F+nhrfN7jGbE2qfkMCopCxTG4JIEur/ZX8q9a8UTgZDLxiKZ7qSgcSU7hBEksiX//uS9sPah9cvbXIp71/+Pimm724Se0oIIklg7e9rqf3k7m6hqZbKmjvXUKNKjQRGJeWNvnaIxEpBE9TkL+IBurzdZY9kMPnGyWQ/mK1kIHGnhCASCyWYf3jw1MGk9k3l08WfAjDgrAF4ptOm7qnFJhKRWIh5lZGZ/QxsAXKAbHfPiPU5RRKuoPmHQ08Kz1g1g1ZDWoWLtmvQjgnXTaBCSoVCZzoTiYd4tSF0dPff4nQukcTLM0FN7oBym7Zv4ohnjmDTjk3hYiuumUHdo5rvHl+oiEQiEmv66iGyv4pqI8gdUG7FCnziRK4feQOHDDgknAzGXD0an3AGdY87ec8qJU1kLwkUj4TgwOdmNt3MuuXfaWbdzGyamU3L0jjtUlqUoI2AlBTezZpISr9U3pj1BgB3t7kbz3Q6H9Rq7zsB2CORMGmSRiaVuDKPccOVmdV195VmVgsYC9zm7l8WVDYjI8OnTZsW03hEomLNmmAyyM4OfpufMQOaNQt/gC9ct5DGLzQOF29cozEze8zkgAoHBDe4BxNJbpWSPvwlAmY2PRrtszFvQ3D3laHfa81sBNAaKDAhiJQaedsI0tKgRQs4/XT++Hw0zYe0ZNH6ReGiC29dSKMajfZ8veYokCQU0yojM0szs2q5y8A5wNxYnlMkLnI/0GfMgK1bISeHuw78iiqPVw0ng3cvexfP9L2TQa7cOQqUDCRJxPoOoTYwIjQ8bwXgHXcfE+NzisRHSgo0a8anFzShS4u5BJvL4MYWN/Lqha9qWGopdWKaENx9KdA8lucQSZQVm1dQ/+n60CK4XuPAGiztuZSDKh+U0LhE9pfGMhLZR7tydtF+WHu+WfFNeNuM7jNocViLxAUlEgV6DkFkHwz4egCV+lcKJ4PBfxqMZ7qSgZQJukMQKYH8s5ZdcOwFfNT1Iw1LLWWKEoJIEbbt2sbAyQMZOHkgACmWwuo7VpOelp7gyESiTwlBpADuznvz3uPusXfzy+ZfuLLZlQw4awBHHHJE8S8WKaWUEETymf7rdHqO6cnkXybT8rCWvH3p27Q7ol2iwxKJOSUEkZDVW1fTZ3wf/jXzX6SnpfPqBa9yfYvrSU1JTXRoInGhhCDl3o7sHTz33XP0+7If27O3c8dpd/DAGQ9w8AEHJzo0kbhSQpByy90ZtXAUvT/vzeL1izn/2PN56pynOLbGsYkOTSQhlBCkXJq3dh63f3Y7Y5eOpUnNJoz5vzF0PqZzosMSSSglBClX1v+xnocmPcRLU1+iWuVqPHvus9yccTMVUysmOjSRhFNCkLIvECB7zSpe+WUED07KZOP2jXQ/qTt9O/alZpWaiY5OJGkoIUjZFggw/tIW9Kw7h3m1oGPDjjx77rOckN4sOBfBga7hp0VC9Ny9lFlL1i/hkrfO56yWc9hWET58P4Xxnd8JJoPipr8UKYeUEKTM2bJjC/eOu5emLzVl7MoveWxJQ+a/nMolNdpitWsH7wwKms9YpJyLeUIws3PN7CczW2xm98b6fFJ+BTzAsJnDOPaFYxkweQBXHX8VC29byL3DlnDA/1bunrc4d/rLChWCv2vVSnToIkkhpm0IZpYKvAicDawApprZx+4+P5bnlfJnyi9T6DmmJ9N+ncapm6ox8oNUWjdcBhcetnuqylyaz1ikQLFuVG4NLA7NnIaZvQtcBBSYEDZuhBEjgsvuu7fv63J5f30yxRLr12/yFYz1e5jNO1TjcC7b/irNnl3OpwH4dEUKfvc2qFq1gNen4F670OMWtDxrFnzyCSJlVqwTQl3glzzrK4BT8hYws25At+DaSVx6aYwjkrKhwh/Q5klo+zik5MDkB9gy+R4+2FmVD3LLBICnij5M3puD4pazs6MQt0gSS3i3U3cfAgwBaNo0w//zn9379uU/a/7l0v76ZIol0a/Pu+zufPjTcO4ZdyfLNy/nsiZ/ZsCZT9DwoYa7ywUC2G+7q4NUIyRlXbTe47FOCCuB+nnW64W2FahKFWjePMYRSak1Y9UMeo7pyVfLv6J57ea8ecmbtG/Yfu+CKSlwWO29t4tIkWKdEKYCjczsSIKJoCtwdYzPKWXM2t/X8sCEB3j1h1epUaUGr5z/Cje1vEnDUotEWUwTgrtnm9mtwGdAKvC6u8+L5Tml7NiZs5MXvn+Bh794mG27ttHr1F482P5BDjngkESHJlImxbwNwd1HA6NjfR4pO9yd0YtG0/vz3ixct5Aujbow6JxBNK7ZONGhiZRpCW9UFslrQdYCen/emzGLx9C4RmM+ufoTujTqkuiwRMoFJQRJChv+2MDDXzzMi1NfJK1iGoPOGcQtrW+hUmqlRIcmUm4oIUhC5QRyGPrDUB6Y8ADr/1hPt5O60a9jP9LT0hMdmki5o4Qg8REI7DVUxMRlE+n1WS9mr5lN+yPa88y5z9DisBaJjVOkHNNopxJ7gcAew00vW7eEy/57GZ3e7MSm7ZsYfvlwJl43UclAJMF0hyCxFxpuemtKNo9V+oqnBjcjNSWV/h370/u03hxY8cBERygiKCFIHATSa/LvS4/m3vo/saqac22zK3jszMeoe1DdRIcmInkoIUhMfbfiO/4x5h983/QnWtdqyYfnv8ip9U9LdFgiUgAlBImJlZtXct/4+3hr9lvUqVKbNy4axjXNryXF1GwlkqyUECSqtu/cxqDx/Xl0xnPsCuzivp/rc99/f6XaqNdh4rWgkUdFkpYSgkSFu/Ph/OHc+da1/HzgDi5dW5Mn/jGOo45vB9k5u+curq1RSEWSle7fJWKzVs+i05ud+PPwK6i2eQfj34APhmzkqIMbau5ikVJECaE8CQRgzZq956jcT1m/Z9FjVA9aDWnFnDVzeKnLi/wwvx2dfgklgNq1g3MXr1ixe4J7EUlaqjIqL3IfDpsyJfhhPXFicCKZ/bArZxcvTn2RhyY9xNadW7mt9W1kts+k+oHVYUKPPZ9INlM1kUgpoYRQXoQeDiM7O6L6/E8XfUrvz3vz428/0vnozgzqPIim6U13F0hJUQIQKaViVmVkZg+Z2Uozmxn60RjGiVSrVkT1+T/99hN/eudPdHmnCzmBHEZdNYpP/+/TPZMBRL1aSkTiJ9Z3CE+7+5MxPoeUhFmwmijfAHPF2bh9I/2+6Mdz3z9HlYpVePLsJ7ntlNsKHpY6itVSIhJ/qjIqT/ahOicnkMNrM17jgQkP8Nu23/hrq7/Sv1N/aqUVcWcRpWopEUmMWH99u9XMZpvZ62ZWvaACZtbNzKaZ2bSsrKwYhyMl8cXPX3DSkJPoPqo7x9U8jundpjPkgiFFJwOIuFpKRBLLPIK6XjMbBxxWwK4+wLfAb4AD/YA67n5jUcfLyMjwadOm7Xc8EpmfN/7M3WPv5v3579Pg4AY8cfYTXN70cmxfuosWMO+BiMSWmU1394xIjxNRlZG7n1WScmY2FBgVybkkdn7f+TuPf/04T37zJIbxcIeHubPNnVSpWCXRoYlIHMWsDcHM6rj7qtDqJcDcWJ1L9o+7886cd7hn3D2s3LKSq0+4msfPfJz6B9ffvwOqUVmkVItlo/JAM2tBsMroZ6B7DM8l+2jqyqn0HNOTb1Z8w0npJ/LeZe9y+hFtIzuoGpVFSrWYfX1z92vd/QR3P9HdL8xztyAJtGrLKq7/6Hpav9qapRuW8vqCY/m+5zxO/0uf4Df8SKhRWaRUU7fTcmJ79nae+fYZHvnqEXbm7OSe0+/h/sZ/5aD7mkRvNNL9fNZBRJKDEkJptA89edydkT+N5I7P72DphqVc1PginjznSY459Jjg08Rt2uyu84/GN3oNXSFSaikhlDb70HA7Z80cen3WiwnLJtAsvRljrx3LWUfl6Rimb/QikocSQmmTv+F2zZpgQsjzgb5u2zoenPggL09/mYMrH8wL571A94zuVEjJ8+fOe5ehb/QiguZDKH3yNtyedhp07Qr16kGHDuzatYPnvnuORs834pXpr/D3jJtZ1HUyt5z8972TQceO4ddF3JgsImWC7hBKm7zVPO5Qvz5kZ/P56q/pNfgEFmxYxFlHncUzZw+i2ZW3wpQT965aUvdQESmA7hBKo9yG29q1WXRmSy682uh8dYCdBBjZdSSfX/M5zazW3h/6udQ9VEQKoDuE0igQYNOKJfT/8RWePX0mB6SmMbD9P/nHKT2pXKFysEzuh/7kyZCRAenpu1+vxmQRKYASQimTk72LYdc04/76i8hKgxta3sAjZz7KYVXzjTFoBuPHwxlnwNSpwTaDvNVG6h4qIvmoyqgU+Xr517R+JYO/NlnEMevh+9dTee3Ux/ZOBrnWrQsmg4KqjURE8lFCKAWWb1pO1+FdafevdqzduZ535h3H12+mknHk6UXX/6utQET2gaqMkti2XdsYOHkgAycPxHEy22dyV5u7SOt5YMnq/9VWICL7QAkhCbk77817j7vG3sWKzSu4stmVDDx7IA0ObrC7UEnr/9VWICIlpISQZKb/Op2eY3oy+ZfJtDysJe9c+g7tjmiX6LBEpBxQQkgSq7eups/4Pvxr5r9IT0vn1Qte5foW15Oakpro0ESknIioUdnMLjezeWYWMLOMfPvuM7PFZvaTmXWOLMwyJhAIjkHkzo7sHQycPJBjnz+Wt2a/xR2n3cHCWxdyU6ublAxEJK4ivUOYC1wKvJJ3o5k1BboCzYDDgXFmdqy750R4vtIvNI6QT5nM/7vwOHq3386SDUu44NgLeOqcp2hUo1GiIxSRciqihODuCwBs794rFwHvuvsOYJmZLQZaA99Ecr4yISuLeQsnc/tVOYw9eh5NaMSY/xtD52N0EyUiiRWrNoS6wLd51leEtu3FzLoB3QAaNGhQUJEyY/0f68mc3o/B3XKotgOeW3g0Pd6YS8UKlRIdmohI8QnBzMYBBT0K28fdR0YagLsPAYYAZGRkeKTHS0bZgWxemfYKD056kI3bN9Kj9c083OxWah7RRM8GiEjSKDYhuPtZxZUpwEqgfp71eqFt5c64pePoNaYX87Lm0enITjzT+RlOqH1CosMSEdlLrIau+BjoamaVzexIoBHwfYzOlZSWrF/Cxe9ezNlvnc22XdsYceUIxl07TslARJJWRG0IZnYJ8DyQDnxiZjPdvbO7zzOz/wLzgWzglvLSw2jLji088tUjPP3t01RMqchjZz5Gr1N7cUCFAxIdmohIkSLtZTQCGFHIvkeARyI5fmkS8ABvznqT+8bfx+qtq7mu+XU8euajHF7t8ESHJiJSInpSOQqm/DKFnmN6Mu3XaZxa71RGdh1J67qtEx2WiMg+UUKIwIrNK7hn3D28M+cdDq92OP++5N9cfcLVBT2XISKS9JQQ9sO2Xdt4csqTDJg8gIAHeKDdA9zT9h6qVqqa6NBERPabEsI+cHfen/8+d429i+WblnN508sZePZAGh7SMNGhiYhETAmhOIEAZGUxI2clPT/rxVfLv6J57ea8efGbtG/YPtHRiYhEjRJCUQIB1p5zOn2qfsdrLZwaaTV55fxXuKmlRiIVkbJHCaEQO3N28vz4R+l78rdsqwi3f2f885XJHNLg2ESHJiISE0oI+bg7nyz6hN6f9WbR+kV02Xoog/67icbHnQ71NTS1iJRdsRq6olRakLWA894+jwv+cwEplsLoq0fzybNZNJ69EiZN0kB0IlKm6Q4B2PDHBh7+4mFe+P4FqlaqytOdn+aWk2+hYmrFYAFNUi8i5UC5TgjZgWyGTh/KPyf+kw3bN/C3Vn+jX8d+pKelJzo0EZG4K7cJYcKyCfQa04s5a+fQ/oj2PHvuszQ/rHmiwxIRSZhylxCWbljKXWPv4sMFH9LwkIYMv3w4lza5VMNNiEi5V3oSQugBMWrV2q/G3S07tvDY148x6JtBpKak0r9jf3qf1psDKx4Yg2BFREqf0pEQAgHo2BGmTIE2bWDiREgpWQepgAf49+x/c++4e1m1dRXXnngtj535GHUPKnCKZxGRciuibqdmdrmZzTOzgJll5Nne0Mz+MLOZoZ+XI4oyKyuYDLKzg7+zskr0sm9XfMtpr53GdR9dR/2D6/PNTd/w5iVvKhmIiBQg0juEucClwCsF7Fvi7i0iPH5QrVrBO4PcO4RatYosvnLzSu4dfy//nv1v6lStwxsXv8E1J15DiumxCxGRwkQ6Y9oCIPYNsmbBaqLcNgR3WLt2r/aEP3b9waApT/Ho14+S4wHub3sf97W7X8NSi4iUQCy/Mh9pZjPM7Aszaxfx0VJSgg+IuQfbE+rVgw4dIBDA3Rk+fzhNX2rKA5P+ybmz/2D+oB088vDXVK1QJfJ/iYhIOVDsHYKZjQMOK2BXH3cfWcjLVgEN3H2dmZ0EfGRmzdx9cwHH7wZ0A2jQoEHxEedrT5i1YBI9p/bli/99wQmHNmHCWyl0XBIIls1tb9CTxiIixSr2DsHdz3L34wv4KSwZ4O473H1daHk6sAQocJhQdx/i7hnunpGeXoInhEPtCVkHpdLjhnRafXA2c9fOZfCfBvPD32fRsW7b3dVIJWhvEBGRoJh0OzWzdGC9u+eY2VFAI2BpNI69M7CLFx+7iIe/mMnv2VncdvJtZLbPpPqB1YMFJk6ENWuCSaF2bQ1IJyJSQhElBDO7BHgeSAc+MbOZ7t4ZOAPoa2a7gADQw93XRxrsp4s+5fbPbuendT/R+ejOPN35aZqkN9mzUEoK1KkT6alERMqdSHsZjQBGFLD9A+CDSI6d10+//UTvz3szetFoGh3aiFFXjaJLoy4abkJEJIqS+knljds30veLvjz//fNUqViFJ89+kttOuY1KqZUSHZqISJmTlAkhJ5DDazNeo8+EPqzbto6/tvor/Tv1p1aaGohFRGIl6RLCFz9/Qc8xPZm1ZhbtGrTj2XOfpWWdlokOS0SkzEuqhLB0w1I6vNGBBgc34L0/v8flTS9XO4GISJwkVULYuH0jfTv05c42d2pYahGRODN3T3QMYSe2PNFnz5id6DBEREoVM5vu7hnFlyxaUg3/qd5DIiKJk1QJQUREEkcJQUREACUEEREJUUIQERFACUFEREKUEEREBFBCEBGRECUEEREBlBBERCQkooRgZk+Y2Y9mNtvMRpjZIXn23Wdmi83sJzPrHHGkIiISU5HeIYwFjnf3E4GFwH0AZtYU6Ao0A84FXjKz1AjPJSIiMRRRQnD3z909O7T6LVAvtHwR8K6773D3ZcBioHUk5xIRkdiK5vDXNwLvhZbrEkwQuVaEtu3FzLoB3UKrO8xsbhRjipWawG+JDqIEFGd0Kc7oKQ0xQumJs3E0DlJsQjCzccBhBezq4+4jQ2X6ANnA2/sagLsPAYaEjjMtGkO4xprijC7FGV2lIc7SECOUrjijcZxiE4K7n1VMINcD5wNn+u7JFVYC9fMUqxfaJiIiSSrSXkbnAncDF7r7tjy7Pga6mlllMzsSaAR8H8m5REQktiJtQ3gBqAyMDc19/K2793D3eWb2X2A+waqkW9w9pwTHGxJhPPGiOKNLcUZXaYizNMQI5SzOpJpCU0REEkdPKouICKCEICIiIXFPCGZ2uZnNM7OAmWXk21fscBdmdqSZfRcq956ZVYpDzO+Z2czQz89mNrOQcj+b2ZxQuah0A9sXZvaQma3ME2uXQsqdG7rGi83s3gTEWeiQJ/nKxf16FndtQh0l3gvt/87MGsYjrnwx1DeziWY2P/R/qWcBZTqY2aY874UH4x1nKI4i/4YW9Fzoes42s1YJiLFxnus008w2m1mvfGUScj3N7HUzW5v3+SwzO9TMxprZotDv6oW89rpQmUVmdl2JTujucf0BmhB8iGISkJFne1NgFsFG6iOBJUBqAa//L9A1tPwycHOc438KeLCQfT8DNeN9TfOc/yHgzmLKpIau7VFApdA1bxrnOM8BKoSWBwADkuF6luTaAH8HXg4tdwXeS8DfuQ7QKrRcjeCwMfnj7ACMinds+/o3BLoAnwIGnAp8l+B4U4HVwBHJcD2BM4BWwNw82wYC94aW7y3o/w9wKLA09Lt6aLl6ceeL+x2Cuy9w958K2FXscBcW7MrUCRge2vQGcHEMw91D6PxXAP+J1zljoDWw2N2XuvtO4F2C1z5uvPAhTxKtJNfmIoLvOwi+D88MvS/ixt1XufsPoeUtwAIKGQmgFLgIeNODvgUOMbM6CYznTGCJu/8vgTGEufuXwPp8m/O+Bwv7DOwMjHX39e6+geC4c+cWd75kakOoC/ySZ72g4S5qABvzfJgUOiRGjLQD1rj7okL2O/C5mU0PDcmRCLeGbr1fL+RWsiTXOZ5uJPgNsSDxvp4luTbhMqH34SaC78uECFVZtQS+K2D3aWY2y8w+NbNm8Y0srLi/YbK9H7tS+Be+ZLieALXdfVVoeTVQu4Ay+3VdozmWUZiVYLiLZFPCmK+i6LuDtu6+0sxqEXw248dQho9LnMBgoB/B/4T9CFZv3RjN85dUSa6nFT/kScyvZ2lmZlWBD4Be7r453+4fCFZ7bA21JX1E8AHReCs1f8NQe+SFhEZtzidZruce3N3NLGrPDsQkIXgxw10UoiTDXawjeEtZIfTtLGpDYhQXs5lVAC4FTiriGCtDv9ea2QiCVRBRffOX9Nqa2VBgVAG74jKsSAmu5/XsPeRJ/mPE/HrmU5Jrk1tmReg9cTDB92VcmVlFgsngbXf/MP/+vAnC3Ueb2UtmVtPd4zpQWwn+hsk0zM15wA/uvib/jmS5niFrzKyOu68KVa+tLaDMSoLtHrnqEWy3LVIyVRkVO9xF6INjIvDn0KbrgHjdcZwF/OjuKwraaWZpZlYtd5lgw2lcR27NV/d6SSHnnwo0smBvrUoEb5E/jkd8uazwIU/ylknE9SzJtfmY4PsOgu/DCYUltFgJtVm8Bixw90GFlDkst23DzFoT/L8e18RVwr/hx8BfQr2NTgU25akOibdCawCS4Xrmkfc9WNhn4GfAOWZWPVR1fE5oW9ES0Gp+CcH6rB3AGuCzPPv6EOzl8RNwXp7to4HDQ8tHEUwUi4H3gcpxinsY0CPftsOB0XnimhX6mUewaiTe1/YtYA4wO/SmqZM/ztB6F4I9U5YkKM7FBOs3Z4Z+Xs4fZ6KuZ0HXBuhLMHkBHBB63y0OvQ+PSsD1a0uwWnB2nmvYBeiR+x4Fbg1dt1kEG+7bJCDOAv+G+eI04MXQ9Z5Dnp6HcY41jeAH/MF5tiX8ehJMUKuAXaHPzZsItlmNBxYB44BDQ2UzgFfzvPbG0Pt0MXBDSc6noStERARIriojERFJICUEEREBlBBERCRECUFERAAlBBERCVFCEBERQAlBRERC/j9HLr0Cake7VAAAAABJRU5ErkJggg==" - }, - "metadata": { - "needs_background": "light" - } - } - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "As shown in the preceding figure, the initialized model function in blue differs greatly from the objective function in green.\n", - "\n", - "## Defining and Associating the Forward and Backward Propagation Networks\n", - "\n", - "Define the loss function of the model. The mean squared error (MSE) method is used to determine the fitting effect. The smaller the MSE value difference, the better the fitting effect. The loss function formula is as follows:\n", - "\n", - "$$J(w)=\\frac{1}{2m}\\sum_{i=1}^m(h(x_i)-y^{(i)})^2\\tag{2}$$\n", - "\n", - "Assuming that the $i$th data record in the training data is $(x_i,y^{(i)})$, parameters in formula 2 are described as follows:\n", - "\n", - "- $J(w)$ specifies the loss value.\n", - "\n", - "- $m$ specifies the amount of sample data. In this example, the value of $m$ is `batch_number`.\n", - "\n", - "- $h(x_i)$ is a predicted value obtained after the $x_i$ value of the $i$th data record is substituted into the model network (formula 1).\n", - "\n", - "- $y^{(i)}$ is the $y^{(i)}$ value (label value) of the $i$th data record.\n", - "\n", - "### Defining the Forward Propagation Network\n", - "\n", - "A forward propagation network consists of two parts:\n", - "\n", - "1. Bring parameters into the model network to obtain the predicted value.\n", - "2. Use the predicted value and training data to compute the loss value.\n", - "\n", - "The following method is used in MindSpore:" - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 9, - "source": [ - "net = LinearNet()\n", - "net_loss = nn.loss.MSELoss()" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "### Defining the Backward Propagation Network\n", - "\n", - "The objective of the backward propagation network is to continuously change the weight value to obtain the minimum loss value. Generally, the weight update formula is used in the linear network:\n", - "\n", - "$$w_{t}=w_{t-1}-\\alpha\\frac{\\partial{J(w_{t-1})}}{\\partial{w}}\\tag{3}$$\n", - "\n", - "Parameters in formula 3 are described as follows:\n", - "\n", - "- $w_{t}$ indicates the weight after training steps.\n", - "- $w_{t-1}$ indicates the weight before training steps.\n", - "- $\\alpha$ indicates the learning rate.\n", - "- $\\frac{\\partial{J(w_{t-1}\\ )}}{\\partial{w}}$ is the differentiation of the loss function to the weight $w_{t-1}$.\n", - "\n", - "After all weight values in the function are updated, transfer the values to the model function. This process is the backward propagation. To implement this process, the optimizer function in MindSpore is required." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 10, - "source": [ - "opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "### Associating the Forward and Backward Propagation Networks\n", - "\n", - "After forward propagation and backward propagation are defined, call the `Model` function in MindSpore to associate the previously defined networks, loss functions, and optimizer function to form a complete computing network." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 11, - "source": [ - "from mindspore import Model\n", - "\n", - "model = Model(net, net_loss, opt)" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "## Preparation for Fitting Process Visualization\n", - "\n", - "### Defining the Visualization Function\n", - "\n", - "To make the entire training process easier to understand, the test data, objective function, and model network of the training process need to be visualized. The following defines a visualization function which is called after each training step to display a fitting process of the model network." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 12, - "source": [ - "import matplotlib.pyplot as plt\n", - "import time\n", - "\n", - "def plot_model_and_datasets(net, eval_data):\n", - " weight = net.trainable_params()[0]\n", - " bias = net.trainable_params()[1]\n", - " x = np.arange(-10, 10, 0.1)\n", - " y = x * Tensor(weight).asnumpy()[0][0] + Tensor(bias).asnumpy()[0]\n", - " x1, y1 = zip(*eval_data)\n", - " x_target = x\n", - " y_target = x_target * 2 + 3\n", - "\n", - " plt.axis([-11, 11, -20, 25])\n", - " plt.scatter(x1, y1, color=\"red\", s=5)\n", - " plt.plot(x, y, color=\"blue\")\n", - " plt.plot(x_target, y_target, color=\"green\")\n", - " plt.show()\n", - " time.sleep(0.2)" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "### Defining the Callback Function\n", - "\n", - "MindSpore provides tools to customize the model training process. The following calls the visualization function in `step_end` to display the fitting process. For more information, see [Customized Debugging Information](https://www.mindspore.cn/docs/programming_guide/en/master/custom_debugging_info.html#callback).\n", - "\n", - "- `display.clear_output`:Clear the printed content to achieve dynamic fitting effect." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 13, - "source": [ - "from IPython import display\n", - "from mindspore.train.callback import Callback\n", - "\n", - "class ImageShowCallback(Callback):\n", - " def __init__(self, net, eval_data):\n", - " self.net = net\n", - " self.eval_data = eval_data\n", - "\n", - " def step_end(self, run_context):\n", - " plot_model_and_datasets(self.net, self.eval_data)\n", - " display.clear_output(wait=True)" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "## Performing Training\n", - "\n", - "After the preceding process is complete, use the training parameter `ds_train` to train the model. In this example, `model.train` is called. The parameters are described as follows:\n", - "\n", - "- `epoch`: Number of times that the entire dataset is trained.\n", - "- `ds_train`: Training dataset.\n", - "- `callbacks`: Required callback function during training.\n", - "- `dataset_sink_mode`: Dataset offload mode, which supports the Ascend and GPU computing platforms. In this example, this parameter is set to False for the CPU computing platform." - ], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 14, - "source": [ - "epoch = 1\n", - "imageshow_cb = ImageShowCallback(net, eval_data)\n", - "model.train(epoch, ds_train, callbacks=[imageshow_cb], dataset_sink_mode=False)\n", - "\n", - "plot_model_and_datasets(net, eval_data)\n", - "for param in net.trainable_params():\n", - " print(param, param.asnumpy())" - ], - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "
" - ], - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD8CAYAAACSCdTiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAArxklEQVR4nO3dd3xUVfrH8c8TQhMbCAEUEOwCFiQioqwodl354S4ssisqSkCFte2iomsBsSKg9FBFOhI6SEfpEHpXOqGE0HsgmfP7YwaNMYGEmckkw/f9euWVO/feuefJnckzZ8499xxzziEiIuEpItQBiIhI8CjJi4iEMSV5EZEwpiQvIhLGlORFRMKYkryISBjzO8mbWVkzm2Fma8xstZm95lv/kZntMLNlvp/H/Q9XRESyw/ztJ29mpYHSzrklZnYJsBj4P6A+cNQ5187vKEVE5LxE+nsA59wuYJdv+YiZrQWu8ve4IiLiP79r8n84mFl54GegMvAm8DxwGIgH3nLOHcjgOTFADECRIkWq3nTTTQGLR0TkQrB48eK9zrkSGW0LWJI3s4uBn4C2zrk4MysJ7AUc0AZvk07jsx0jOjraxcfHByQeEZELhZktds5FZ7QtIL1rzCw/MAIY6JyLA3DOJTrnUp1zHqAnUC0QZYmISNYFoneNAb2Btc659mnWl06zW11glb9liYhI9vh94RW4B3gWWGlmy3zrWgHPmNnteJtrtgBNA1CWiIhkQyB618wGLINNE/w9toiI+Ed3vIqIhDEleRGRMKYkLyISxpTkRURCKNhTsCrJi4iEgHOONiMGU/TtKuw+8KfBAAJGSV5E5Gw8HkhMhADWuNfs2EaF9/7KB6sacvyQh2V1GnvLCQIleRGRzHg8cP/9UKYM1Kp19kSchQ+DVE8qMb2/oXK3imy1GdScVJfEXpt4dN44SEoKfPwoyYuIZC4pCebOhZQU7+/MEvG5Pgw8HmbMmUlUqxr0THidIvtqEld7FT/n30fRiGSoUQOiooLyJwTijlcRkfAUFeVNwHPnnj0RZ/RhULIkAMdPHqNeoyeZcNNs8BSlng3g+44NKVjQoNYM775RUWAZ3VPqPyV5EZHMmMGMLCTitB8Gd94JJbyj/vafPY1mY2M4UWkTJZc+xuipm7hr44NQ0HeciIjfPgyCRUleRORsspKIzWDaNKhZExYuZPeD9/HE/dewxNOfiOTreGNuU9rN60PEPXcHrVkmM0ryIiKBsG8fLn4RbSuW4+Po1aScns+Ne1sx/p33ubZMQUj6OKjNMplRkhcRCYA1Kcd54rnKbCm7nPw77qD9fX14o/Vtv+8Q5GaZzCjJi4j4IcWTwivffUOvjR/goiK4Z287xnz+GsWK5Y70mjuiEBHJg6avXUq9/k3YX2gxRfY8Sb96Xfn7Q2VDHdYfBGJmqLJmNsPM1pjZajN7zbe+mJlNMbNffb+L+h+uiEjoHTt1nCc6/pfaQ+5kf0oCf/cMY++3Y3JdgofA3AyVArzlnKsIVAdeNbOKwDvANOfc9cA032MRkTyt/5zJlPiwMhMOtSNqR2PmNFzL8I/rUahQzl5QzapAzAy1C9jlWz5iZmuBq4A6QC3fbt8BM4G3/S1PRCQUdh1K4q+d32RxygDs+A38O2om7XvcR758oY7s7ALaJm9m5YEqwAKgpO8DAGA3EJpLyyIifnDO0Xbc93w8/01S8h3m+sT/Mf6dVlxfoVCoQ8uSgCV5M7sYGAG87pw7bGn6gjrnnJllOGqPmcUAMQDlypULVDgiIn5bvXMjT3ZvxpZ8U8m/rwbt7onlzY8r5XRXd78EZIAyM8uPN8EPdM7F+VYnmllp3/bSwJ6Mnuuci3XORTvnokv4bgUWEclR6UaQPJ16mpjvvuSWbrewJWUBd+/vys62s3jr2byV4CEwvWsM6A2sdc61T7NpDPCcb/k5YLS/ZYmIBFy6ESSnrVlIqQ/upOeWt7lox8MMuXcNc795meJX5M1BewMR9T3As8ADZrbM9/M48DnwkJn9CjzoeywikvPONta7bwTJIxEpPHlRIg8OvZv9yXuoO/U1kvpP4x9f/DNoE3rkhED0rpkNZPYFpra/xxcR8cuZmvqZ4YJnzPAOOnZGVBT9H7mLZtf/yonL11NiSzNGNGhJzW9ugNQ/Dx2c1+TN7x8iIll1lok/dh5KpOrnDXnuzjmcTLmCFoVnsbNnN2o+WA6KFPHuVKQIFC+e+fGDMD1gICnJi0h4OzPWe2TkbxN/OOdoM64P5b68mSXH47h2+8eseW0p37a8l8hIYO9eOHrU+/yjR72PM5Kd6QFDRGPXiEh4Szfxx+rdG3iiRwxbbSaRSTX5onos/2190x97zURFwT33+DUjVG6hJC8i4S8iglPFi9J84Gf0+qU17nQh7jocy+hPXqRkVAYNGuczI1QQ52n1h5K8iIS9aesWUH/AS+zPv4qLtv+dnk9/S8O/lj77k7I6I1RWPgxCSEleRMLWkeQjNOj5HhP2dobjV1En/2gGdn7qt2uqAZED87T6Q0leRHIvj+e8a8n95o3llXGvcCL/Dq7Y9Co/vNyWWndfGqRAcy/1rhGR3Ok8e67sOLSL6C/q88Lkpzh58HJeKTiXXb07ZZ7gc3kXSH8pyYtI7nSW/u0Z8TgPrcfHcvVXN7P46BgqbG7Lqn8vpsu71cmfP7Mn5f4ukP5SkheR3CmD/u2ZWbFrHde0rsWH8U2x3VX4tOwKNvZtRcUbC5y9jGx+kORFapMXkdwpCz1XTqWe4tXBn9P7l7a4U0W480BvxnzyAqVKZbH9Pg90gfSXkryI5F5n6bkyZd0cGgyMYX/kGgpvaUCPuh15tm42e7nkgS6Q/lKSF5E85dDJQzTs8y4TkrrB0XI8aeMZ1OlRLjmZ5L14mt1Encu7QPpLbfIikmf0nT+S0p9UZEJiD4r98jpTn17N2HaPcslT4X3x1B+qyYtIrpdwaAd1erRgyYmR2P7baFpqFN/2vZMCBfB2f8zl48eEkpK8iORaHufhkx970HruO6S6U5Tf1JYx7/2HWyql6TVzAVw89UdAkryZ9QGeBPY45yr71n0ENAHO9Elq5ZybEIjyRCT8Ld+1mjq9YtjqmUu+HQ/SZmlVWq38iIgdk/448ccFcPHUH4GqyfcDOgP9063v4JxrF6AyROQCcDLlJC2GfUrv9Z/jTl5K1b3fMarFo5SpdlXmMzWF+cVTfwQkyTvnfjaz8oE4lohcuCat+5lnBsVwIN96Cm/4F13qtOeF+iW8vWbUJHNegt0m39zMGgHxwFvOuQPpdzCzGCAGoFy5ckEOR0Ryo4MnD9Kwb0sm7ukJh8vzuOdHBnZ5hMsv9+2gJpnzFswulN2Aa4HbgV3A1xnt5JyLdc5FO+eiS5QoEcRwRCS3cc7Re/5wrmx7MxN396bo2v/wY51VjP82TYI/40yTjBJ8tgStJu+cSzyzbGY9gXHBKktE8p5tB7fzfz1fZenxsZB0By+WGE/nfndQqFCoIwsvQUvyZlbaObfL97AusCpYZYlI3pHqSaXNpK58Mq8Vqakeym1sx6h3X6PKberRHQyB6kI5GKgFFDezBOBDoJaZ3Q44YAvQNBBliUjetWzXSur0asI2zwLybXuED2/vxv9aVyBfvlBHFr4C1bvmmQxW9w7EsUUk7ztx+gTNf2hD33Vf4U4U5fYf/8Oowpu5us/VGlwlyPT9SESC6sd1M2g4OIYDERsotP45Oo07wovH2mGRkRqCIAfoM1REgmL/if081r0xjw19gAMHHY8kTmVHl768VHWvN8Grv3uOUE1eRALKOUfvhUNpMf41Tto+Ll/zDgObfMDjDxf27qD+7jlKSV5EAmbLga3U7f0yy45NhN138kKxSXT+7nYuuijNThqCIEepuUZEss7j8Q7t69wfVqd6UvlwYgeu61CRZQd+psz0d1nU10Ofea9xUSGN7x5KSvIikjUeD9z/58k5luxcxjWfVaf1wjdxW2rRKv8CNs9pT3TK4rCdHDsvUXONiGRNUtIfJuc4vnMrLWZ1p8+6r+F4cW5JGMrIT+px7TXAjLs0mFguoSQvIlmTZnKO8Y9X5tleD3LANlFw7Ut0fPxLmn5Z9PfrqBldXPV4dME1BNRcIyJZY8be8cN59OunefKOZRzYF8mDCTPY3qUnzZ5Pk+AzSuaZNPVI8CnJi8g5OeeInT+Asl9VYtLeOC5b9j6jH1vOlJ61+MPgsZkl83RNPWqnzzlqrhGRs9p0YBN/6/Myy45Ohp3VefaynnT9vjIXX5zBzhkl85IlNQ9rCCnJi0iGUjwptJ7ckU/nfUDq6UiuXNuZEe82o3q1s4wmllky16QfIaMkLyJ/smjHYv7WrwnbU5YSseEp3qnchdbDypA//zmeeLZkrpugQkJJXkR+c+zUMZrHfUC/dR3haBSVtv5AXNunueGGbNS8lcxzFSV5EQFg7NofaTSsGQfZSoGVTWn36Oc0/+pytazkcYGaNKQP8CSwxzlX2beuGDAUKI930pD6GU3kLSKhtefYHp4d+AaTdw2CpJu4/+gsBnW5l1KlQh2ZBEKgulD2Ax5Nt+4dYJpz7npgmu+xiOQSzjm6z+/H1V/ezOSE4Vy6+CNGPLSM6f2U4MNJoGaG+tnMyqdbXQfvlIAA3wEzgbcDUZ6IZEMGNydt2L+Bv/Vryooj0yHhHp65uCfdB9zMpZdm/hzJm4J5M1TJNBN57wYyvBJjZjFmFm9m8Um6QUIksNLdnHT6dDLv//g5N31zCyuS4ikd351ZjX9m0DfpErzuTg0bOXLh1TnnzMxlsi0WiAWIjo7OcB8ROU9pbk5asHkO9T6vynbPauyXp3nrpk60/eFKChbM/Dl/uKFJ8qRgJvlEMyvtnNtlZqWBPUEsS0QyamKJiuJIzWr8u9Aa+lU7BAcPctOmkcR9+n/cfHMmx9HdqWElmM01Y4DnfMvPAaODWJbIhS2TJpbR68ZT9r7t9Kt2iPzLXqb9tWtYPeIsCR5+v6EpIQFmzlSbfB4XqC6Ug/FeZC1uZgnAh8DnwDAzexHYCtQPRFkikoF0TSy7t66m0dRPmLJzGOypRM0Dcxjc5W6uuiqLx9MNTWEjUL1rnslkU+1AHF9EzsHXxOLmzqHb09fz5nd/ITn1OBcvaUPP51vyj78XUIX8AqU7XkXCgRnrh3en/ndNWHF8Dmy5j3rWkdgBt3F5UWX3C5mSvEgedyr1FB9N+ZIv5n2CJ7kwJVfEMnjHCu5fcydsrOFtX4/Q1BEXKiV5kTxs7rZ5/GNgExJOrcbW1ue1G77h825GoeteURdIAZTkRfKkw8mHaTGqFf3XdoXDZbj+17GM+PRJbrkFcE5dIOU3SvIieUzcmlE0/qE5hzw7iVzSgra1P+Gtry4h35m5PDRBh6ShJC+SR+w8spNGQ1owbWcc7L6VGvviGNSpGldfncHO6gIpPkryIrmcx3noPC+WlpPfJjnlFEXiP6Nbo7f41zP5VUmXc1KSF8nF1iat5R8DYlh5eDZsfoCnC/QgduB1XHFFqCOTvEJJXiQXSk5J5oMpn9Fuwad4TlxCiaV9Gfjf53joIVXdJXuU5EVymVlbZ/PMoCbsOLUOW9WQV6/twJejorjoolBHJnmRkrxILnHw5EFajH6HAet6wMGruXbdBIZ/+hhVqoQ6MsnLlORFQsw5xw9r4mgS14JDKYlExr/Jx/e3puVXRYjUf6j4SW8hkRBKOJxAo6GvMmPnGNhVhbsSxzKoU1WuuSbUkUm40IAWIoHm8UBiovfO00ykelLpOLcz17avyIytUyg86yv63L2QeXFK8BJYqsmLBNKZyTvODCmQweBgq/asosGgJqw+NB82PcRTEd3pNfgaSlzhgT2JuktVAiroNXkz22JmK81smZnFB7s8kZDKaH5Un5MpJ/nvxPe5rWsVVu/cwBU/fc/4BpMY3c+X4DV5tgRBTtXk73fO7c2hskRCJ5P5UX/a8hMNh8SwM/kXWNGIpiVb0250OS6+xFdj1+TZEiRqkxc5Xxm1vaebH/XAyYM0HPIStb6rxc5dKZSfNYkFmwvRvdd1XPxkrd9r7Gc+HCIjNXKkBFROJHkHTDazxWYWk36jmcWYWbyZxSel+WorkqtlMnE2ABERuKgohqweRvl2NzN4bT/yzWvJRyVX8ku/26i2qs+fm3M0ebYESU4019zrnNthZlHAFDNb55z7+cxG51wsEAsQHR2deXcEkdwkffPKnj1QqhQA2w5t47lhrzBz53jYWZWqOycy6Nsq3HAD4ApnPta7Ro6UIAh6Td45t8P3ew8wEqgW7DJFgu5M84qZN9HXr09qymnaz/mG6ztWZOaWmRT6qQM97pzPojG+BA+qsUuOC2pN3syKABHOuSO+5YeB1sEsUyRHmMGQIVC2LKSmsnzjXBp2vIs1x5bCr4/xuOtG70FXn6nc/5Fq7JKDgt1cUxIYad7aSiQwyDn3Y5DLFMkZpUpxomZ13s8fT4e7T+MSd1B0wWD6vvUP6tRRDV1yh6AmeefcJuC2YJYhEirTNk/n2Ud2sys5GZa+QOMy7egwrhiXXhrqyER+pzteRbJp3/F9vDrmLYau/w72XUe5FdMZ/On91KgR6shE/kxJXiSLnHMMXDmIV0a/zpHTB4mY34p373mf/00qTMGCoY5OJGNK8iJZsPnAZp774WVm7ZwECdW4bVtPBnW8lYoVQx2ZyNnpjleRs0jxpPDV7K+58dvKzNo8h4LTv6VzlbksmagEL3mDavIimViyawn/HNqEdYeWwPq/8vDpLvQeWJYyZUIdmUjWKcmLpHPs1DHem/oR3y7sgDtagsvnDafnG3/jb38z3bskeY6SvEgakzdO5rnhzdidvBmWNKFR6S/oOK4oRYuGOjKR86MkLwIkHUvi1bFvMnz9ANh7I2WW/sSAtn/hvvtCHZmIf5Tk5YLmnKP/8u9pPvZNjp4+jM35gP9Wf5ePpxaiUKFQRyfiPyV5uWBt3L+R539oxuxdU2FbDSpviWVgh0rcemuoIxMJHHWhlAvO6dTTfDbrC27uVJnZWxZQYEpX2t8yi2WTleAl/KgmLxcGjweSklh0eivP/hDD+kPLYW1d7j/Rib4Dr+Lqq0MdoEhwKMlL+PN4OPrgX3i34AK6VEvFHS3NpbPj6P7vOjSonYSVdID6Rkp4UnONhL0J8YO57rZ4OldPwS1uxjM7F7NpXB2eib0fK5vB9H0iYURJXsJW4tFE/jb4GZ6Y+C8ST11L6d5DmbytMIMGleQKT7rp+zS/sISpoCd5M3vUzNab2QYzeyfY5Yk45+i9pA/XdriZuLVx2MyPeOOixWz46T4eWt7OO6vTmen7IiP/PNeqSBgJ9vR/+YAuwENAArDIzMY459YEs1y5cP2y7xeeH9GUebtmwtaa3Dz2dQaUncAdXxSAiDRT7p2ZazUpyZvgNV6BhKlgX3itBmzwzRCFmQ0B6gBK8hJQp1JP8eXsr/h4ZhtSThYi/8wetFm4gbc89Yk8aJDU9s/zqmquVbkABDvJXwVsT/M4Abgr7Q5mFgPEAJQrVy7I4Ug4mp8wn0bDm/Dr4VWwuh41j35D3+9Lce2LtWCuqTlGLmgh70LpnIsFYgGio6NdiMORPORI8hHentyKbku6wOGruPinMXRu8VcaNfK1vqg5RiToSX4HUDbN4zK+dSJ+GbN+DE1GvsqekztgYXP+XrQtXSZc8scKu5pjRIKe5BcB15tZBbzJvQHQMMhlShjbdWQXL4/9N6N//QESK1Ny4XD6tK7O44+HOjKR3CmoSd45l2JmzYFJQD6gj3NudTDLlPDkcR56LenFmxNbciz5JPzUluZ3/JfPZubn4otDHZ1I7hX0Nnnn3ARgQrDLkfC1bu86nh8Rw4Lds2Dz/dzwSw/6d7ieu+4693NFLnS641VyreSUZD6e2ZpbutzGwi2riBzfm0+un8aqn5TgRbIq5L1rRDIyZ9scnhvRhI2H18LKBtx9qCP9BpTkhhtCHZlI3qKavOQqh04eoumYl7m3771s3H6Mi0aOJ/aRwcyepAQvcj5Uk5fQ8I3vnrYP+8i1I2k6ujlJJ3fD/Deoc2lruk28mNKlQxyrSB6mmrzkPI8H7r8fyniH+d1xcDt/HViXp4c9TdLWEhQfNZ9RL7dn1DAleBF/qSYvOS/JO8yvJzWF7smz+U+nipw8lQIzviDm1jf48qf8XHZZqIMUCQ9K8pLzoqJY/dBtvHDlChaVPQ2/Vueatd35rsO13HtvqIMTCS9qrpEcdTLlJP+b8SG33bWC+OKXkm9MPz64ZjJrZivBiwSDavKSY37e+jMvxMWw6fB6WP4vovd9Tb/+UVSqFOrIRMKXavISdAeO7ePFIf/ivn73sXnbKQoPHE2X0ZezwNWn0s2aW1UkmFSTl6BxzvHD6mG8PPAF9hU4BfP+y6MRbxO7qSplPFthXqT3IqxGihQJGtXkJSi2H9rOEwOfov6IBuzbfxPFYicybNo2xvc9TZl7rtbcqiI5RDV58crg5qTzkepJpeuirrw9pRUnT3pg+te8sOVavk5qSNF7K0KpkprMQyQHKcnL7zcnzZ3rrV3PmOGdcCObViau5IWRTVicuAA2PEL5Vd3o26ECtf7igaRVvyd1MzXRiOQQNdfIbzcnkZLi/Z2UlK2nnzh9gnentqJK9ztYsnkTESMH8k7ZiayZW4Fatfh9hibV2kVyXNBq8mb2EdAEOJMxWvnGlpfcJirKW4M/U5PPRjv59M3TeXFkU7Yc2QBLn6dKUjv6fncFt92WZqcANQWJSPYFuybfwTl3u+9HCT63MvM20SQkwMyZWUrE+47v44VRjandvzZbtzkKDplKh1p9WfRzBgk+zTg1eNRlUiQnqU1evLI46bVzjiGrhvDq+Nc4cGI/zHmHBwt8QOzEwpQvn8ETMmoKUnu8SI4Jdk2+uZmtMLM+ZlY0ox3MLMbM4s0sPimbbcGSs7Yc3MJjA56gYVxDDmwqz+VDlzCw8WdMGp9Jgoffm4LUZVIkJMw5d/5PNpsKlMpg03vAfGAv4IA2QGnnXOOzHS86OtrFx8efdzwSHCmeFDot6ESrqe+TnGy4qZ/yrxtfpcPX+ShePAsHUJu8SFCZ2WLnXHRG2/xqrnHOPZjFAHoC4/wpS0Jj2e5lvDDyJZbtWQy/PEHZFV3p9XU5Hn441JGJSFYErbnGzNJO91AXWBWssiTwjp8+zttT3qZqj2hWbEnAfhjKm6XHsnZ+NhO8LryKhFQwL7x+aWa3422u2QI0DWJZEkBTNk6hyehmbD2yCRa/ROXdX9CvXzGqVj2Pg+nCq0hIBa0m75x71jl3i3PuVufcU865XcEqSwJj7/G9PBvXiIcHPMz2rZHk/24SX4y/giUF/07VKudZA9eFV5GQUhdKwTnHgBUD+PeENzh08hDMep+aqf+h17a7uM6zHub7MVrkmT74uvAqEhIa1iBceTyQmAjn6D216cAmHur/CI1GNeLQphu4ZNBS+jZqw4wZl3LdPSUDUwPXsAYiIaOafDjKwoBjKZ4UOszrwP+mf8jp5EiY3Jn6177MN7MjfBV21cBFwoGSfDhKe7FzzhxYswYqVfotUS/euZgXRr3EyqRlsK4OpZd2pufXZXjiCd/z0/Zr10VSkTxNzTXh6MzFznz54OKLoUoVqFWLYyeP8Nakt6jWsxqrtyTC0B9oUSKO9YvSJXh1eRQJG6rJh6MzFzvXrPEm+JQUftw9myadKpFwfDvEN+XmOc3oc7g51Ut8C0Vm8Nvnvbo8ioQV1eTDVUQEVKrEnvui+effjMcaetiZUITI/rNoU6kdyw7XoHrqnD+PH68ujyJhRTX5MOVSU+k3uxNv1F7P4ZORMOM9avAOvSYU5MYbHMy705vg777b2wPHud9nbdIFV5GwoZp8GNqw9xdqv1mcxjPf4PCGmyny/TJ6NPyQn6YX5MYb+T2Rb9vmXS5b9o/t7+ryKBI2VJMPI6dTT9Nubjs+ntGa0xdFwtju/N+SK+i8tARX3pZu54gI74/a30XCmmryYWJBwgKqdK9Kq+mtSF79BMVj5xG3dApxNTtx5a2ZjAes9neRsKeafB53JPkI709/n04LOxFx7EoYM4pm99fh8w0eLjvV5ezt6mp/Fwl7SvJ52LhfxtFs7CvsOJIAi17hmm2f0rvnpdSsCd4vaVloesnitH8ikjcpyedBu4/u5rWJrzFszTDy7atEvrFzeK/R3bSKg4IFQx2diOQmSvJ5iMd56L2kN/+Z3JIjJ4/DzDZUPd2SPuMKUKlSqKMTkdzIrwuvZlbPzFabmcfMotNte9fMNpjZejN7xL8wL1BpRpJcv3c9tfrdT8y4GI5uuI3CfVfQqd77zJ2lBC8imfO3Jr8KeBrokXalmVUEGgCVgCuBqWZ2g3Mu1c/yLhy+MWROzZ/DFw3K0Oa6XXhOXgQTe/HYVY3pNscoWzbUQYpIbufvRN5rAezPvTLqAEOcc8nAZjPbAFQD5vlT3gUlKYl5W+fw4kse1kZtxVb+g2ILO9L1y1LUq6eOMCKSNcHqJ38VsD3N4wTfuj8xsxgzizez+KS0Y6hcwA4nH+bV+I+55/lUfi1YCgaO44WLB/PLklLUr68ELyJZd86avJlNBUplsOk959xofwNwzsUCsQDR0dFnn8boAjBq3SheGdecXUd3woLXKLvhI3p1v4wHaiuzi0j2nTPJO+cePI/j7gDSthiX8a2TTOw8spMWE1oQty6OyL23EjE6jpb/rMYHcVC4cKijE5G8KlhdKMcAg8ysPd4Lr9cDC4NUVp7mcR5iF8fScvLbHEs+BdM/49bkt+g9Jj+33x7q6EQkr/MryZtZXaATUAIYb2bLnHOPOOdWm9kwYA2QAryqnjV/tiZpDU3GxDA3YQ75tj5AgUk9+PQ/19GihXc4GRERf/nbu2YkMDKTbW2Btv4cP1wlpyTz2ezPaPvzp5B8CYzvR+1Sjeg+26hQIdTRiUg4UX0xh83aOosmY2JYv38dtvKfFF3Ynm8+jeKf/1SvGREJPCX5HHLw5EHenvI2sUtiyX+0PIyayL/ufpT2S6B4JiMBi4j4S0k+yJxzxK2No/mEFiQeTYR5b1F608f07FKEhx8OdXQiEu6U5IMo4XACr054lTHrxxCZVAVGjeWtBlX5OA6KFAl1dCJyIVCSDySPB5KSSC1+Bd0Wd+fdqa04kZwCU7+i0onX6TUqkujocx9GRCRQlOQDxTeg2Mpf5xDT4CLmX3aEfFseIt/E7rR94xrefBPy5w91kCJyoVGSD5CTu7bzSYFZfPGSg5MRMGIAfynRkNjZxnXXhTo6EblQKckHwMwtM2kyJoYN9zoilv2TS6a1oUPH8jz/gqlbpIiElJK8H/af2E/LKS3pvbQ3+Y9eA3FTqHfLvXzzS0FKllJ2F5HQU5I/D845hq0eRouJ/2bvsX0w521K/PoBPTpfxJNPhjo6EZHfKcln07ZD23hl/CuM/3U8+ZOicSMm0aLe7bSNg0suCXV0IiJ/pCSfRameVDov7Eyrae9x6hQwuQPXH2lB75H5qF491NGJiGRMST4Llu9eTpOxTVi0cxGRWx7DxnWjzWtX07IlFCgQ6uhERDIX/kned4MSUVHZHgHsxOkTfPzTx7Sb246I5CtgzGCqF/0HPWcZN90UpHhFRAIoWHO85g6+G5QoUwZq1fI+zqKpm6ZyS7db+GLOF7DsOQr1Wkv35g34aaYSvIjkHX4leTOrZ2arzcxjZtFp1pc3sxNmtsz3093/UM9DUhLMnQspKd7fWZgofN/xfTw/6nke+v4htm+PgH7TqWO9Wbe0GE2bQkR4fyyKSJjxt7lmFfA00CODbRudc7f7eXz/REVBjRreBF+jhvdxJpxzDFo5iNd+fJ0Dxw/C7FYUW/8+Xb8tTN26OReyiEgg+Tsz1FoAy623dZrBjBl/bJPPoI1+84HNvDyuGZM2TaZA0l14hvekad1b+PwHuPzy0P4JIiL+CGbjQwUzW2pmP5lZzSCWc3YREVCy5O8JPk0bfUrKKb6e+zWVu1Zm2tqfYUInynfvy89Db6Z7dyV4Ecn7zlmTN7OpQKkMNr3nnBudydN2AeWcc/vMrCowyswqOecOZ3D8GCAGoFy5clmP/HykaaNfsmkOTXrcyZK9K8i/8UkY3ZH/HR5IK6pQqHg8UDm4sYiI5IBzJnnn3IPZPahzLhlI9i0vNrONwA1AfAb7xgKxANHR0S67ZWVLVBTH7r2LDwvOpUN1D5E798Co4dxx6dP08txDZebDZZdBxYpBDUNEJKcEpbnGzEqYWT7f8jXA9cCmYJSVHZM2TqZynR18fbcjYvlL5O+xhk4v/505syOofGAWrFwJ+/erC42IhA2/LryaWV2gE1ACGG9my5xzjwB/AVqb2WnAAzRzzu33O9rzlHQsiTcmvcHAlQMpdPRGGP4Tj97yF7ouhbJlz+wVCZXVRCMi4cXf3jUjgZEZrB8BjPDn2IHgnKP/8v68OelNDp44gs36gEvWtKJfx4LUr5/tG2BFRPKcsB3WYOP+jTQd15Rpm6dRaE8NPMN70vivFflqOBQrFuroRERyRtgl+dOpp2k/rz0fzfyI1NMFYEJXrjzQlJ5DInjggVBHJyKSs8IqyS/asYgmY5uwPHE5BTfVJXVMJ95uehUffgiFC4c6OhGRnBcWSf7oqaP8b/r/+Hbht+RPLgVxcVQuUpdeM+D220MdnYhI6IRFX8GlO1fwzYJviVjalIiua/i6SV3mz1eCFxEJi5r8qY01cB038EC1CnRfAhUqhDoiEZHcISySfO3aMD2uArVqqVukiEhaYZHkwTvumIiI/FFYtMmLiEjGlORFRMKYkryISBhTkhcRCWNK8iIiYUxJXkQkjCnJi4iEMSV5EZEw5leSN7OvzGydma0ws5Fmdnmabe+a2QYzW29mj/gdqYiIZJu/NfkpQGXn3K3AL8C7AGZWEWgAVAIeBbqemfNVRERyjl9J3jk32TmX4ns4HyjjW64DDHHOJTvnNgMbgGr+lCUiItkXyLFrGgNDfctX4U36ZyT41v2JmcUAMb6HR81svR8xFAf2+vH8YFFc2aO4skdxZU84xnV1ZhvOmeTNbCpQKoNN7znnRvv2eQ9IAQZmNzLnXCwQm93nZcTM4p1z0YE4ViApruxRXNmjuLLnQovrnEneOffg2bab2fPAk0Bt55zzrd4BlE2zWxnfOhERyUH+9q55FGgJPOWcO55m0xiggZkVNLMKwPXAQn/KEhGR7PO3Tb4zUBCYYt7ZOuY755o551ab2TBgDd5mnFedc6l+lpUVAWn2CQLFlT2KK3sUV/ZcUHHZ7y0sIiISbnTHq4hIGFOSFxEJY3kqyZtZPTNbbWYeM4tOt+2cwyiYWQUzW+Dbb6iZFQhSnEPNbJnvZ4uZLctkvy1mttK3X3wwYklX3kdmtiNNbI9nst+jvvO4wczeyYG4Mh0eI91+QT9f5/rbfZ0Jhvq2LzCz8sGII4Nyy5rZDDNb4/sfeC2DfWqZ2aE0r+8HORTbWV8X8/rWd85WmNkdORDTjWnOwzIzO2xmr6fbJ0fOl5n1MbM9ZrYqzbpiZjbFzH71/S6ayXOf8+3zq5k9d14BOOfyzA9wM3AjMBOITrO+IrAc70XgCsBGIF8Gzx8GNPAtdwdezoGYvwY+yGTbFqB4Dp6/j4D/nGOffL7zdw1QwHdeKwY5roeBSN/yF8AXoThfWfnbgVeA7r7lBsDQHHrtSgN3+JYvwTuMSPrYagHjcur9lNXXBXgcmAgYUB1YkMPx5QN2A1eH4nwBfwHuAFalWfcl8I5v+Z2M3vNAMWCT73dR33LR7Jafp2ryzrm1zrmM7og95zAK5u3+8wDwg2/Vd8D/BTHcM2XWBwYHs5wAqwZscM5tcs6dAobgPb9B4zIfHiOnZeVvr4P3vQPe91Jt3+scVM65Xc65Jb7lI8BaMrmLPBeqA/R3XvOBy82sdA6WXxvY6JzbmoNl/sY59zOwP93qtO+jzHLRI8AU59x+59wBvGOFPZrd8vNUkj+Lq4DtaR5nNIzCFcDBNMkk06EWAqgmkOic+zWT7Q6YbGaLfcM75ITmvq/MfTL5ipiVcxlMjfHW+jIS7POVlb/9t31876VDeN9bOcbXRFQFWJDB5rvNbLmZTTSzSjkU0rlel1C/pxqQeUUrFOcLoKRzbpdveTdQMoN9AnLeAjl2TUBYFoZRyA2yGOcznL0Wf69zboeZReG912Cd71M/KHEB3YA2eP8p2+BtSmrsT3mBiMtlfXiMgJ+vvMbMLgZGAK875w6n27wEb5PEUd/1llF4b0QMtlz7uviuuz2Fb4TcdEJ1vv7AOefMLGh92XNdknfnGEYhE1kZRmEf3q+Jkb4amF9DLZwrTjOLBJ4Gqp7lGDt8v/eY2Ui8zQV+/XNk9fyZWU9gXAabgjIkRRbO1/P8eXiM9McI+PlKJyt/+5l9Enyv8WV431tBZ2b58Sb4gc65uPTb0yZ959wEM+tqZsWdc0EdjCsLr0sohzl5DFjinEtMvyFU58sn0cxKO+d2+Zqu9mSwzw681w3OKIP3emS2hEtzzTmHUfAljhnA332rngOC+c3gQWCdcy4ho41mVsTMLjmzjPfi46qM9g2UdO2gdTMpbxFwvXl7IhXA+1V3TJDjymx4jLT75MT5ysrfPgbvewe876XpmX0oBZKv3b83sNY51z6TfUqduT5gZtXw/n8H9QMoi6/LGKCRr5dNdeBQmqaKYMv023Qozlcaad9HmeWiScDDZlbU17T6sG9d9gT7ynIgf/AmpgQgGUgEJqXZ9h7enhHrgcfSrJ8AXOlbvgZv8t8ADAcKBjHWfkCzdOuuBCakiWW572c13maLYJ+/74GVwArfm6x0+rh8jx/H23tjYw7FtQFv2+My30/39HHl1PnK6G8HWuP9AAIo5HvvbPC9l64J9vnxlXsv3ma2FWnO0+NAszPvM6C579wsx3sBu0YOxJXh65IuLgO6+M7pStL0jAtybEXwJu3L0qzL8fOF90NmF3Dal79exHsdZxrwKzAVKObbNxrolea5jX3vtQ3AC+dTvoY1EBEJY+HSXCMiIhlQkhcRCWNK8iIiYUxJXkQkjCnJi4iEMSV5EZEwpiQvIhLG/h8e03257HPmHwAAAABJRU5ErkJggg==" - }, - "metadata": { - "needs_background": "light" - } - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Parameter (name=fc.weight, shape=(1, 1), dtype=Float32, requires_grad=True) [[2.010547]]\n", - "Parameter (name=fc.bias, shape=(1,), dtype=Float32, requires_grad=True) [2.8695402]\n" - ] - } - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "After the training is complete, the weight parameters of the final model are printed. The value of weight is close to 2.0 and the value of bias is close to 3.0. As a result, the model training meets the expectation.\n", - "\n", - "## Summary\n", - "\n", - "We have learned the principles of the linear fitting algorithm, defined the corresponding algorithms in the MindSpore framework, understood the training process of such linear fitting models in MindSpore, and finally fitted a model function close to the objective function. In addition, you can adjust the dataset generation interval from (-10,10) to (-100,100) to check whether the weight values are closer to those of the objective function; adjust the learning rate to check whether the fitting efficiency changes; or explore how to use MindSpore to fit quadratic functions, such as $f(x)=ax^2+bx+c$, or higher-order functions." - ], - "metadata": {} - } - ], - "metadata": { - "kernelspec": { - "display_name": "MindSpore", - "language": "python", - "name": "mindspore" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/tutorials/source_en/optimization.md b/tutorials/source_en/optimization.md deleted file mode 100644 index 21a86bd27f996ead9db8b6cc4344433641969f6f..0000000000000000000000000000000000000000 --- a/tutorials/source_en/optimization.md +++ /dev/null @@ -1,186 +0,0 @@ -# Training the Model - -`Ascend` `GPU` `CPU` `Beginner` `Model Development` - - - -After learning how to create a model and build a dataset in the preceding tutorials, you can start to learn how to set hyperparameters and optimize model parameters. - -## Hyperparameters - -Hyperparameters can be adjusted to control the model training and optimization process. Different hyperparameter values may affect the model training and convergence speed. - -Generally, the following hyperparameters are defined for training: - -- Epoch: specifies number of times that the dataset is traversed during training. -- Batch size: specifies the size of each batch of data to be read. -- Learning rate: If the learning rate is low, the convergence speed slows down. If the learning rate is high, unpredictable results such as no training convergence may occur. - -```python -epochs = 5 -batch_size = 64 -learning_rate = 1e-3 -``` - -## Loss Functions - -The **loss function** is used to evaluate the difference between **predicted value** and **actual value** of a model. Here, the absolute error loss function `L1Loss` is used. `mindspore.nn.loss` provides many common loss functions, such as `SoftmaxCrossEntropyWithLogits`, `MSELoss`, and `SmoothL1Loss`. - -The output value and target value are provided to compute the loss value. The method is as follows: - -```python -import numpy as np -import mindspore.nn as nn -from mindspore import Tensor - -loss = nn.L1Loss() -output_data = Tensor(np.array([[1, 2, 3], [2, 3, 4]]).astype(np.float32)) -target_data = Tensor(np.array([[0, 2, 5], [3, 1, 1]]).astype(np.float32)) -print(loss(output_data, target_data)) -``` - -```text - 1.5 -``` - -## Optimizer - -An optimizer is used to compute and update the gradient. The selection of the model optimization algorithm directly affects the performance of the final model. A poor effect may be caused by the optimization algorithm instead of the feature or model design. All optimization logic of MindSpore is encapsulated in the `Optimizer` object. Here, the Momentum optimizer is used. `mindspore.nn` provides many common optimizers, such as `Adam` and `Momentum`. - -You need to build an `Optimizer` object. This object can retain the current parameter status and update parameters based on the computed gradient. - -To build an `Optimizer`, we need to provide an iterator that contains parameters (must be variable objects) to be optimized. For example, set `params` to `net.trainable_params()` for all `parameter` that can be trained on the network. Then, you can set the `Optimizer` parameter options, such as the learning rate and weight attenuation. - -A code example is as follows: - -```python -from mindspore import nn - -optim = nn.Momentum(net.trainable_params(), 0.1, 0.9) -``` - -## Training - -A model training process is generally divided into four steps. - -1. Define a neural network. -2. Build a dataset. -3. Define hyperparameters, a loss function, and an optimizer. -4. Enter the epoch and dataset for training. - -Execute the following command to download and decompress the dataset to the specified location. - -```python -import os -import requests -import tarfile -import zipfile - -def download_dataset(url, target_path): - """download dataset""" - if not os.path.exists(target_path): - os.makedirs(target_path) - download_file = url.split("/")[-1] - if not os.path.exists(download_file): - res = requests.get(url, stream=True, verify=False) - if download_file.split(".")[-1] not in ["tgz","zip","tar","gz"]: - download_file = os.path.join(target_path, download_file) - with open(download_file, "wb") as f: - for chunk in res.iter_content(chunk_size=512): - if chunk: - f.write(chunk) - if download_file.endswith("zip"): - z = zipfile.ZipFile(download_file, "r") - z.extractall(path=target_path) - z.close() - if download_file.endswith(".tar.gz") or download_file.endswith(".tar") or download_file.endswith(".tgz"): - t = tarfile.open(download_file) - names = t.getnames() - for name in names: - t.extract(name, target_path) - t.close() - -download_dataset("https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/cifar-10-binary.tar.gz", "./datasets") -``` - -The code example for model training is as follows: - -```python -import mindspore.dataset as ds -import mindspore.dataset.transforms.c_transforms as C -import mindspore.dataset.vision.c_transforms as CV -from mindspore import nn, Tensor, Model -from mindspore import dtype as mstype -from mindspore.train.callback import LossMonitor - -DATA_DIR = "./datasets/cifar-10-batches-bin" - -# Define a neural network. -class Net(nn.Cell): - def __init__(self, num_class=10, num_channel=3): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') - self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') - self.fc1 = nn.Dense(16 * 5 * 5, 120) - self.fc2 = nn.Dense(120, 84) - self.fc3 = nn.Dense(84, num_class) - self.relu = nn.ReLU() - self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) - self.flatten = nn.Flatten() - - def construct(self, x): - x = self.conv1(x) - x = self.relu(x) - x = self.max_pool2d(x) - x = self.conv2(x) - x = self.relu(x) - x = self.max_pool2d(x) - x = self.flatten(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.relu(x) - x = self.fc3(x) - return x - -net = Net() -epochs = 5 -batch_size = 64 -learning_rate = 1e-3 - -# Build a dataset. -sampler = ds.SequentialSampler(num_samples=128) -dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) - -# Convert the data type. -type_cast_op_image = C.TypeCast(mstype.float32) -type_cast_op_label = C.TypeCast(mstype.int32) -HWC2CHW = CV.HWC2CHW() -dataset = dataset.map(operations=[type_cast_op_image, HWC2CHW], input_columns="image") -dataset = dataset.map(operations=type_cast_op_label, input_columns="label") -dataset = dataset.batch(batch_size) - -# Define hyperparameters, a loss function, and an optimizer. -optim = nn.Momentum(net.trainable_params(), learning_rate, 0.9) -loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') -cb = LossMonitor() - -# Enter the epoch and dataset for training. -model = Model(net, loss_fn=loss, optimizer=optim) -model.train(epoch=epochs, train_dataset=dataset, callbacks=cb) -``` - -The output is as follows: - -```text -epoch: 1 step: 1, loss is 2.3025818 -epoch: 1 step: 2, loss is 2.3025775 -epoch: 2 step: 1, loss is 2.3025408 -epoch: 2 step: 2, loss is 2.3025331 -epoch: 3 step: 1, loss is 2.3024616 -epoch: 3 step: 2, loss is 2.302457 -epoch: 4 step: 1, loss is 2.3023522 -epoch: 4 step: 2, loss is 2.3023558 -epoch: 5 step: 1, loss is 2.3022182 -epoch: 5 step: 2, loss is 2.3022337 -``` diff --git a/tutorials/source_en/tensor.md b/tutorials/source_en/tensor.md deleted file mode 100644 index 8fbcdca76e4b7594d02411cd0b866ee25501c341..0000000000000000000000000000000000000000 --- a/tutorials/source_en/tensor.md +++ /dev/null @@ -1,230 +0,0 @@ -# Tensor - -`Ascend` `GPU` `CPU` `Beginner` - - - -Tensor is a basic data structure in the MindSpore network computing. - -Import the required modules and APIs: - -```python -import numpy as np -from mindspore import Tensor, context -from mindspore import dtype as mstype -context.set_context(mode=context.GRAPH_MODE, device_target="CPU") -``` - -## Initializing a Tensor - -There are multiple methods for initializing tensors. When building a tensor, you can pass the [Tensor](https://www.mindspore.cn/docs/api/en/master/api_python/mindspore/mindspore.Tensor.html), `float`, `int`, `bool`, `tuple`, `list`, and `NumPy.array` types. - -- **Generate a tensor based on data.** - -You can create a tensor based on data. The data type can be set or automatically inferred. - -```python -x = Tensor(0.1) -``` - -- **Generate a tensor from the NumPy array.** - -You can create a tensor from the NumPy array. - -```python -arr = np.array([1, 0, 1, 0]) -x_np = Tensor(arr) -``` - -If the initial value is `NumPy.array`, the generated `Tensor` data type corresponds to `NumPy.array`. - -- **Generate a tensor from the init** - -You can create a tensor with the `init`, `shape` and `dtype`. - -- `init`: Supported subclasses of incoming Subclass of [initializer](https://www.mindspore.cn/docs/api/zh-CN/master/api_python/mindspore.common.initializer.html). -- `shape`: Supported subclasses of incoming `list`, `tuple`, `int`. -- `dtype`: Supported subclasses of incoming [mindspore.dtype](https://www.mindspore.cn/docs/api/zh-CN/master/api_python/mindspore.html#mindspore.dtype). - -```python -from mindspore import Tensor -from mindspore import set_seed -from mindspore import dtype as mstype -from mindspore.common.initializer import One, Normal - -set_seed(1) - -tensor1 = Tensor(shape=(2, 2), dtype=mstype.float32, init=One()) -tensor2 = Tensor(shape=(2, 2), dtype=mstype.float32, init=Normal()) -print(tensor1) -print(tensor2) -``` - -```text - [[1. 1.] - [1. 1.]] - [[-0.00128023 -0.01392901] - [ 0.0130886 -0.00107818]] -``` - -The `init` is used for delayed initialization in parallel mode. Usually, it is not recommended to use `init` interface to initialize parameters in other conditions. - -- **Inherit attributes of another tensor to form a new tensor.** - -```python -from mindspore import ops -oneslike = ops.OnesLike() -x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) -output = oneslike(x) -print(output) -``` - -```text - [[1 1] - [1 1]] -``` - -- **Output a constant tensor of a specified size.** - -`shape` is the size tuple of a tensor, which determines the dimension of the output tensor. - -```python -import mindspore.ops as ops - -shape = (2, 2) -ones = ops.Ones() -output = ones(shape, mstype.float32) -print(output) - -zeros = ops.Zeros() -output = zeros(shape, mstype.float32) -print(output) -``` - -```text - [[1. 1.] - [1. 1.]] - [[0. 0.] - [0. 0.]] -``` - -During `Tensor` initialization, dtype can be specified to, for example, `mstype.int32`, `mstype.float32` or `mstype.bool_`. - -## Tensor Attributes - -Tensor attributes include shape and data type (dtype). - -- shape: a tuple -- dtype: a data type of MindSpore - -```python -t1 = Tensor(np.zeros([1,2,3]), mstype.float32) -print("Datatype of tensor: {}".format(t1.dtype)) -print("Shape of tensor: {}".format(t1.shape)) -``` - -```text - Datatype of tensor: Float32 - Shape of tensor: (1, 2, 3) -``` - -## Tensor Operation - -There are many operations between tensors, including arithmetic, linear algebra, matrix processing (transposing, indexing, and slicing), and sampling. The following describes several operations. The usage of tensor computation is similar to that of NumPy. - -Indexing and slicing operations similar to NumPy: - -```python -tensor = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32)) -print("First row: {}".format(tensor[0])) -print("First column: {}".format(tensor[:, 0])) -print("Last column: {}".format(tensor[..., -1])) -``` - -```text - First row: [0. 1.] - First column: [0. 2.] - Last column: [1. 3.] -``` - -`Concat` connects a series of tensors in a given dimension. - -```python -data1 = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32)) -data2 = Tensor(np.array([[4, 5], [6, 7]]).astype(np.float32)) -op = ops.Concat() -output = op((data1, data2)) -print(output) -``` - -```text - [[0. 1.] - [2. 3.] - [4. 5.] - [6. 7.]] -``` - -`Stack` combines two tensors from another dimension. - -```python -data1 = Tensor(np.array([[0, 1], [2, 3]]).astype(np.float32)) -data2 = Tensor(np.array([[4, 5], [6, 7]]).astype(np.float32)) -op = ops.Stack() -output = op([data1, data2]) -print(output) -``` - -```text - [[[0. 1.] - [2. 3.]] - - [[4. 5.] - [6. 7.]]] -``` - -Common computation: - -```python -input_x = Tensor(np.array([1.0, 2.0, 3.0]), mstype.float32) -input_y = Tensor(np.array([4.0, 5.0, 6.0]), mstype.float32) -mul = ops.Mul() -output = mul(input_x, input_y) -print(output) -``` - -```text - [ 4. 10. 18.] -``` - -## Conversion Between Tensor and NumPy - -Tensor and NumPy can be converted to each other. - -### Tensor to NumPy - -```python -zeros = ops.Zeros() -output = zeros((2,2), mstype.float32) -print("output: {}".format(type(output))) -n_output = output.asnumpy() -print("n_output: {}".format(type(n_output))) -``` - -```text - output: - n_output: -``` - -### NumPy to Tensor - -```python -output = np.array([1, 0, 1, 0]) -print("output: {}".format(type(output))) -t_output = Tensor(output) -print("t_output: {}".format(type(t_output))) -``` - -```text - output: - t_output: -``` diff --git a/tutorials/source_zh_cn/index.rst b/tutorials/source_zh_cn/index.rst index fdcb4181c4c9ced00049ab1da747a2c040c85fa0..bcba1642a357024792f865d5903006578f27f6a6 100644 --- a/tutorials/source_zh_cn/index.rst +++ b/tutorials/source_zh_cn/index.rst @@ -10,7 +10,6 @@ MindSpore教程 :glob: :maxdepth: 1 :caption: 初学教程 - :hidden: beginner/introduction beginner/quick_start