diff --git a/api/source_en/_static/logo_source.png b/api/source_en/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/api/source_en/_static/logo_source.png and b/api/source_en/_static/logo_source.png differ diff --git a/api/source_en/api/python/mindspore/mindspore.dataset.config.rst b/api/source_en/api/python/mindspore/mindspore.dataset.config.rst index 55cf3631f462bd579c8b5764b0885a0a10b4b480..459497b6a090401cdc3bb53233a4d41671d2aeca 100644 --- a/api/source_en/api/python/mindspore/mindspore.dataset.config.rst +++ b/api/source_en/api/python/mindspore/mindspore.dataset.config.rst @@ -3,3 +3,4 @@ mindspore.dataset.config .. automodule:: mindspore.dataset.config :members: + \ No newline at end of file diff --git a/api/source_en/api/python/mindspore/mindspore.dtype.rst b/api/source_en/api/python/mindspore/mindspore.dtype.rst deleted file mode 100644 index ecedea971844071ff47fa5505b9c852b5e77ff1f..0000000000000000000000000000000000000000 --- a/api/source_en/api/python/mindspore/mindspore.dtype.rst +++ /dev/null @@ -1,111 +0,0 @@ -mindspore.dtype -=============== - -Data Type ----------- - -.. class:: mindspore.dtype - -Create a data type object of MindSpore. - -The actual path of ``dtype`` is ``/mindspore/common/dtype.py``. -Run the following command to import the package: - -.. code-block:: - - import mindspore.common.dtype as mstype - -or - -.. code-block:: - - from mindspore import dtype as mstype - -Numeric Type -~~~~~~~~~~~~ - -Currently, MindSpore supports ``Int`` type, ``Uint`` type and ``Float`` type. -The following table lists the details. - -============================================== ============================= -Definition Description -============================================== ============================= -``mindspore.int8`` , ``mindspore.byte`` 8-bit integer -``mindspore.int16`` , ``mindspore.short`` 16-bit integer -``mindspore.int32`` , ``mindspore.intc`` 32-bit integer -``mindspore.int64`` , ``mindspore.intp`` 64-bit integer -``mindspore.uint8`` , ``mindspore.ubyte`` unsigned 8-bit integer -``mindspore.uint16`` , ``mindspore.ushort`` unsigned 16-bit integer -``mindspore.uint32`` , ``mindspore.uintc`` unsigned 32-bit integer -``mindspore.uint64`` , ``mindspore.uintp`` unsigned 64-bit integer -``mindspore.float16`` , ``mindspore.half`` 16-bit floating-point number -``mindspore.float32`` , ``mindspore.single`` 32-bit floating-point number -``mindspore.float64`` , ``mindspore.double`` 64-bit floating-point number -============================================== ============================= - -Other Type -~~~~~~~~~~ - -For other defined types, see the following table. - -============================ ================= -Type Description -============================ ================= -``tensor`` MindSpore's ``tensor`` type. Data format uses NCHW. For details, see [tensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/tensor.py). -``MetaTensor`` A tensor only has data type and shape. For details, see [MetaTensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/parameter.py). -``bool_`` Boolean ``True`` or ``False``. -``int_`` Integer scalar. -``uint`` Unsigned integer scalar. -``float_`` Floating-point scalar. -``number`` Number, including ``int_`` , ``uint`` , ``float_`` and ``bool_`` . -``list_`` List constructed by ``tensor`` , such as ``List[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. -``tuple_`` Tuple constructed by ``tensor`` , such as ``Tuple[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. -``function`` Function. Return in two ways, when function is not None, returns Func directly, the other returns Func(args: List[T0,T1,...,Tn], retval: T) when function is None. -``type_type`` Type definition of type. -``type_none`` No matching return type, corresponding to the ``type(None)`` in Python. -``symbolic_key`` The value of a variable is used as a key of the variable in ``env_type`` . -``env_type`` Used to store the gradient of the free variable of a function, where the key is the ``symbolic_key`` of the free variable's node and the value is the gradient. -============================ ================= - -Tree Topology -~~~~~~~~~~~~~~ - -The relationships of the above types are as follows: - -.. code-block:: - - - └─────── number - │ ├─── bool_ - │ ├─── int_ - │ │ ├─── int8, byte - │ │ ├─── int16, short - │ │ ├─── int32, intc - │ │ └─── int64, intp - │ ├─── uint - │ │ ├─── uint8, ubyte - │ │ ├─── uint16, ushort - │ │ ├─── uint32, uintc - │ │ └─── uint64, uintp - │ └─── float_ - │ ├─── float16 - │ ├─── float32 - │ └─── float64 - ├─── tensor - │ ├─── Array[Float32] - │ └─── ... - ├─── list_ - │ ├─── List[Int32,Float32] - │ └─── ... - ├─── tuple_ - │ ├─── Tuple[Int32,Float32] - │ └─── ... - ├─── function - │ ├─── Func - │ ├─── Func[(Int32, Float32), Int32] - │ └─── ... - ├─── MetaTensor - ├─── type_type - ├─── type_none - ├─── symbolic_key - └─── env_type \ No newline at end of file diff --git a/api/source_en/api/python/mindspore/mindspore.hub.rst b/api/source_en/api/python/mindspore/mindspore.hub.rst deleted file mode 100644 index 458c704fc392ff12901a1324b719303c5098eeee..0000000000000000000000000000000000000000 --- a/api/source_en/api/python/mindspore/mindspore.hub.rst +++ /dev/null @@ -1,4 +0,0 @@ -mindspore.hub -============= - -.. autofunction:: mindspore.hub.load_weights diff --git a/api/source_en/api/python/mindspore/mindspore.nn.learning_rate_schedule.rst b/api/source_en/api/python/mindspore/mindspore.nn.learning_rate_schedule.rst deleted file mode 100644 index 48f5b81586873ef714dbb7f583fa1c5fc22307d3..0000000000000000000000000000000000000000 --- a/api/source_en/api/python/mindspore/mindspore.nn.learning_rate_schedule.rst +++ /dev/null @@ -1,5 +0,0 @@ -mindspore.nn.learning_rate_schedule -=================================== - -.. automodule:: mindspore.nn.learning_rate_schedule - :members: \ No newline at end of file diff --git a/api/source_en/api/python/mindspore/mindspore.ops.composite.rst b/api/source_en/api/python/mindspore/mindspore.ops.composite.rst deleted file mode 100644 index 4dc22f1dcf4fc899a211b5d1ec7114bea7680aa5..0000000000000000000000000000000000000000 --- a/api/source_en/api/python/mindspore/mindspore.ops.composite.rst +++ /dev/null @@ -1,5 +0,0 @@ -mindspore.ops.composite -======================= - -.. automodule:: mindspore.ops.composite - :members: diff --git a/api/source_en/api/python/mindspore/mindspore.ops.operations.rst b/api/source_en/api/python/mindspore/mindspore.ops.operations.rst deleted file mode 100644 index 29bf49176bf455593d3398d8e2f1af17ebfe21a4..0000000000000000000000000000000000000000 --- a/api/source_en/api/python/mindspore/mindspore.ops.operations.rst +++ /dev/null @@ -1,5 +0,0 @@ -mindspore.ops.operations -======================== - -.. automodule:: mindspore.ops.operations - :members: diff --git a/api/source_en/api/python/mindspore/mindspore.rst b/api/source_en/api/python/mindspore/mindspore.rst index 44c49e3df3e08d66f6f8d54c23891de30b85a922..716e23d0611e24802cf9c2c71fe5967ee58ef7d6 100644 --- a/api/source_en/api/python/mindspore/mindspore.rst +++ b/api/source_en/api/python/mindspore/mindspore.rst @@ -1,5 +1,109 @@ mindspore ========= +.. class:: mindspore.dtype + + Create a data type object of MindSpore. + + The actual path of ``dtype`` is ``/mindspore/common/dtype.py``. + Run the following command to import the package: + + .. code-block:: + + import mindspore.common.dtype as mstype + + or + + .. code-block:: + + from mindspore import dtype as mstype + + * **Numeric Type** + + Currently, MindSpore supports ``Int`` type, ``Uint`` type and ``Float`` type. + The following table lists the details. + + ============================================== ============================= + Definition Description + ============================================== ============================= + ``mindspore.int8`` , ``mindspore.byte`` 8-bit integer + ``mindspore.int16`` , ``mindspore.short`` 16-bit integer + ``mindspore.int32`` , ``mindspore.intc`` 32-bit integer + ``mindspore.int64`` , ``mindspore.intp`` 64-bit integer + ``mindspore.uint8`` , ``mindspore.ubyte`` unsigned 8-bit integer + ``mindspore.uint16`` , ``mindspore.ushort`` unsigned 16-bit integer + ``mindspore.uint32`` , ``mindspore.uintc`` unsigned 32-bit integer + ``mindspore.uint64`` , ``mindspore.uintp`` unsigned 64-bit integer + ``mindspore.float16`` , ``mindspore.half`` 16-bit floating-point number + ``mindspore.float32`` , ``mindspore.single`` 32-bit floating-point number + ``mindspore.float64`` , ``mindspore.double`` 64-bit floating-point number + ============================================== ============================= + + * **Other Type** + + For other defined types, see the following table. + + ============================ ================= + Type Description + ============================ ================= + ``tensor`` MindSpore's ``tensor`` type. Data format uses NCHW. For details, see [tensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/tensor.py). + ``MetaTensor`` A tensor only has data type and shape. For details, see [MetaTensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/parameter.py). + ``bool_`` Boolean ``True`` or ``False``. + ``int_`` Integer scalar. + ``uint`` Unsigned integer scalar. + ``float_`` Floating-point scalar. + ``number`` Number, including ``int_`` , ``uint`` , ``float_`` and ``bool_`` . + ``list_`` List constructed by ``tensor`` , such as ``List[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. + ``tuple_`` Tuple constructed by ``tensor`` , such as ``Tuple[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. + ``function`` Function. Return in two ways, when function is not None, returns Func directly, the other returns Func(args: List[T0,T1,...,Tn], retval: T) when function is None. + ``type_type`` Type definition of type. + ``type_none`` No matching return type, corresponding to the ``type(None)`` in Python. + ``symbolic_key`` The value of a variable is used as a key of the variable in ``env_type`` . + ``env_type`` Used to store the gradient of the free variable of a function, where the key is the ``symbolic_key`` of the free variable's node and the value is the gradient. + ============================ ================= + + * **Tree Topology** + + The relationships of the above types are as follows: + + .. code-block:: + + + └─────── number + │ ├─── bool_ + │ ├─── int_ + │ │ ├─── int8, byte + │ │ ├─── int16, short + │ │ ├─── int32, intc + │ │ └─── int64, intp + │ ├─── uint + │ │ ├─── uint8, ubyte + │ │ ├─── uint16, ushort + │ │ ├─── uint32, uintc + │ │ └─── uint64, uintp + │ └─── float_ + │ ├─── float16 + │ ├─── float32 + │ └─── float64 + ├─── tensor + │ ├─── Array[Float32] + │ └─── ... + ├─── list_ + │ ├─── List[Int32,Float32] + │ └─── ... + ├─── tuple_ + │ ├─── Tuple[Int32,Float32] + │ └─── ... + ├─── function + │ ├─── Func + │ ├─── Func[(Int32, Float32), Int32] + │ └─── ... + ├─── MetaTensor + ├─── type_type + ├─── type_none + ├─── symbolic_key + └─── env_type + .. automodule:: mindspore - :members: \ No newline at end of file + :members: + :exclude-members: Model, DatasetHelper, connect_network_with_dataset \ No newline at end of file diff --git a/api/source_en/api/python/mindspore/mindspore.train.rst b/api/source_en/api/python/mindspore/mindspore.train.rst index eb6753e672430d68f149e034791d0d7443125a78..3d24633055440776b8db533368c656d7a7a18fce 100644 --- a/api/source_en/api/python/mindspore/mindspore.train.rst +++ b/api/source_en/api/python/mindspore/mindspore.train.rst @@ -1,6 +1,18 @@ mindspore.train =============== +mindspore.train.model +--------------------- + +.. automodule:: mindspore.train.model + :members: + +mindspore.train.dataset_helper +------------------------------ + +.. automodule:: mindspore.train.dataset_helper + :members: + mindspore.train.summary ----------------------- diff --git a/api/source_en/index.rst b/api/source_en/index.rst index 12f19eaff2f8e9bdeec2f0977238dfd4d1be8238..906c16348330b1c84b432a8c697fb88587a76665 100644 --- a/api/source_en/index.rst +++ b/api/source_en/index.rst @@ -11,26 +11,22 @@ MindSpore API :caption: MindSpore Python API api/python/mindspore/mindspore - api/python/mindspore/mindspore.dtype api/python/mindspore/mindspore.common.initializer api/python/mindspore/mindspore.communication api/python/mindspore/mindspore.context - api/python/mindspore/mindspore.hub - api/python/mindspore/mindspore.nn - api/python/mindspore/mindspore.nn.dynamic_lr - api/python/mindspore/mindspore.nn.learning_rate_schedule - api/python/mindspore/mindspore.nn.probability - api/python/mindspore/mindspore.ops - api/python/mindspore/mindspore.ops.composite - api/python/mindspore/mindspore.ops.operations - api/python/mindspore/mindspore.train api/python/mindspore/mindspore.dataset api/python/mindspore/mindspore.dataset.config api/python/mindspore/mindspore.dataset.text api/python/mindspore/mindspore.dataset.transforms api/python/mindspore/mindspore.dataset.vision api/python/mindspore/mindspore.mindrecord + api/python/mindspore/mindspore.nn + api/python/mindspore/mindspore.nn.dynamic_lr + api/python/mindspore/mindspore.nn.probability + api/python/mindspore/mindspore.ops api/python/mindspore/mindspore.profiler + api/python/mindspore/mindspore.train + .. toctree:: :maxdepth: 1 diff --git a/api/source_zh_cn/_static/logo_source.png b/api/source_zh_cn/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/api/source_zh_cn/_static/logo_source.png and b/api/source_zh_cn/_static/logo_source.png differ diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.dataset.config.rst b/api/source_zh_cn/api/python/mindspore/mindspore.dataset.config.rst index 55cf3631f462bd579c8b5764b0885a0a10b4b480..459497b6a090401cdc3bb53233a4d41671d2aeca 100644 --- a/api/source_zh_cn/api/python/mindspore/mindspore.dataset.config.rst +++ b/api/source_zh_cn/api/python/mindspore/mindspore.dataset.config.rst @@ -3,3 +3,4 @@ mindspore.dataset.config .. automodule:: mindspore.dataset.config :members: + \ No newline at end of file diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.dtype.rst b/api/source_zh_cn/api/python/mindspore/mindspore.dtype.rst deleted file mode 100644 index 633cd1e23e5c3d54077db437deb063c78aa9a9a2..0000000000000000000000000000000000000000 --- a/api/source_zh_cn/api/python/mindspore/mindspore.dtype.rst +++ /dev/null @@ -1,112 +0,0 @@ -mindspore.dtype -=============== - -Data Type ----------- - -.. class:: mindspore.dtype - -Create a data type object of MindSpore. - -The actual path of ``dtype`` is ``/mindspore/common/dtype.py``. -Run the following command to import the package: - -.. code-block:: - - import mindspore.common.dtype as mstype - -or - -.. code-block:: - - from mindspore import dtype as mstype - -Numeric Type -~~~~~~~~~~~~ - -Currently, MindSpore supports ``Int`` type, ``Uint`` type and ``Float`` type. -The following table lists the details. - -============================================== ============================= -Definition Description -============================================== ============================= -``mindspore.int8`` , ``mindspore.byte`` 8-bit integer -``mindspore.int16`` , ``mindspore.short`` 16-bit integer -``mindspore.int32`` , ``mindspore.intc`` 32-bit integer -``mindspore.int64`` , ``mindspore.intp`` 64-bit integer -``mindspore.uint8`` , ``mindspore.ubyte`` unsigned 8-bit integer -``mindspore.uint16`` , ``mindspore.ushort`` unsigned 16-bit integer -``mindspore.uint32`` , ``mindspore.uintc`` unsigned 32-bit integer -``mindspore.uint64`` , ``mindspore.uintp`` unsigned 64-bit integer -``mindspore.float16`` , ``mindspore.half`` 16-bit floating-point number -``mindspore.float32`` , ``mindspore.single`` 32-bit floating-point number -``mindspore.float64`` , ``mindspore.double`` 64-bit floating-point number -============================================== ============================= - -Other Type -~~~~~~~~~~ - -For other defined types, see the following table. - -============================ ================= -Type Description -============================ ================= -``tensor`` MindSpore's ``tensor`` type. Data format uses NCHW. For details, see [tensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/tensor.py). -``MetaTensor`` A tensor only has data type and shape. For details, see [MetaTensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/parameter.py). -``bool_`` Boolean ``True`` or ``False``. -``int_`` Integer scalar. -``uint`` Unsigned integer scalar. -``float_`` Floating-point scalar. -``number`` Number, including ``int_`` , ``uint`` , ``float_`` and ``bool_`` . -``list_`` List constructed by ``tensor`` , such as ``List[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. -``tuple_`` Tuple constructed by ``tensor`` , such as ``Tuple[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. -``function`` Function. Return in two ways, when function is not None, returns Func directly, the other returns Func(args: List[T0,T1,...,Tn], retval: T) when function is None. -``type_type`` Type definition of type. -``type_none`` No matching return type, corresponding to the ``type(None)`` in Python. -``symbolic_key`` The value of a variable is used as a key of the variable in ``env_type`` . -``env_type`` Used to store the gradient of the free variable of a function, where the key is the ``symbolic_key`` of the free variable's node and the value is the gradient. -============================ ================= - -Tree Topology -~~~~~~~~~~~~~~ - -The relationships of the above types are as follows: - -.. code-block:: - - - └─── mindspore.dtype - ├─── number - │ ├─── bool_ - │ ├─── int_ - │ │ ├─── int8, byte - │ │ ├─── int16, short - │ │ ├─── int32, intc - │ │ └─── int64, intp - │ ├─── uint - │ │ ├─── uint8, ubyte - │ │ ├─── uint16, ushort - │ │ ├─── uint32, uintc - │ │ └─── uint64, uintp - │ └─── float_ - │ ├─── float16 - │ ├─── float32 - │ └─── float64 - ├─── tensor - │ ├─── Array[float32] - │ └─── ... - ├─── list_ - │ ├─── List[int32,float32] - │ └─── ... - ├─── tuple_ - │ ├─── Tuple[int32,float32] - │ └─── ... - ├─── function - │ ├─── Func - │ ├─── Func[(int32, float32), int32] - │ └─── ... - ├─── MetaTensor - ├─── type_type - ├─── type_none - ├─── symbolic_key - └─── env_type \ No newline at end of file diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.hub.rst b/api/source_zh_cn/api/python/mindspore/mindspore.hub.rst deleted file mode 100644 index 458c704fc392ff12901a1324b719303c5098eeee..0000000000000000000000000000000000000000 --- a/api/source_zh_cn/api/python/mindspore/mindspore.hub.rst +++ /dev/null @@ -1,4 +0,0 @@ -mindspore.hub -============= - -.. autofunction:: mindspore.hub.load_weights diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.nn.learning_rate_schedule.rst b/api/source_zh_cn/api/python/mindspore/mindspore.nn.learning_rate_schedule.rst deleted file mode 100644 index c95a61428fb8e91b830a469c5162bf61b07710b4..0000000000000000000000000000000000000000 --- a/api/source_zh_cn/api/python/mindspore/mindspore.nn.learning_rate_schedule.rst +++ /dev/null @@ -1,5 +0,0 @@ -mindspore.nn.learning_rate_schedule -=================================== - -.. automodule:: mindspore.nn.learning_rate_schedule - :members: diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.ops.composite.rst b/api/source_zh_cn/api/python/mindspore/mindspore.ops.composite.rst deleted file mode 100644 index 4dc22f1dcf4fc899a211b5d1ec7114bea7680aa5..0000000000000000000000000000000000000000 --- a/api/source_zh_cn/api/python/mindspore/mindspore.ops.composite.rst +++ /dev/null @@ -1,5 +0,0 @@ -mindspore.ops.composite -======================= - -.. automodule:: mindspore.ops.composite - :members: diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.ops.operations.rst b/api/source_zh_cn/api/python/mindspore/mindspore.ops.operations.rst deleted file mode 100644 index 29bf49176bf455593d3398d8e2f1af17ebfe21a4..0000000000000000000000000000000000000000 --- a/api/source_zh_cn/api/python/mindspore/mindspore.ops.operations.rst +++ /dev/null @@ -1,5 +0,0 @@ -mindspore.ops.operations -======================== - -.. automodule:: mindspore.ops.operations - :members: diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.rst b/api/source_zh_cn/api/python/mindspore/mindspore.rst index 44c49e3df3e08d66f6f8d54c23891de30b85a922..5462d2c3f9274505ef70c3fcd0b3e36e727bfcc0 100644 --- a/api/source_zh_cn/api/python/mindspore/mindspore.rst +++ b/api/source_zh_cn/api/python/mindspore/mindspore.rst @@ -1,5 +1,109 @@ mindspore ========= +.. class:: mindspore.dtype + + Create a data type object of MindSpore. + + The actual path of ``dtype`` is ``/mindspore/common/dtype.py``. + Run the following command to import the package: + + .. code-block:: + + import mindspore.common.dtype as mstype + + or + + .. code-block:: + + from mindspore import dtype as mstype + + * **Numeric Type** + + Currently, MindSpore supports ``Int`` type, ``Uint`` type and ``Float`` type. + The following table lists the details. + + ============================================== ============================= + Definition Description + ============================================== ============================= + ``mindspore.int8`` , ``mindspore.byte`` 8-bit integer + ``mindspore.int16`` , ``mindspore.short`` 16-bit integer + ``mindspore.int32`` , ``mindspore.intc`` 32-bit integer + ``mindspore.int64`` , ``mindspore.intp`` 64-bit integer + ``mindspore.uint8`` , ``mindspore.ubyte`` unsigned 8-bit integer + ``mindspore.uint16`` , ``mindspore.ushort`` unsigned 16-bit integer + ``mindspore.uint32`` , ``mindspore.uintc`` unsigned 32-bit integer + ``mindspore.uint64`` , ``mindspore.uintp`` unsigned 64-bit integer + ``mindspore.float16`` , ``mindspore.half`` 16-bit floating-point number + ``mindspore.float32`` , ``mindspore.single`` 32-bit floating-point number + ``mindspore.float64`` , ``mindspore.double`` 64-bit floating-point number + ============================================== ============================= + + * **Other Type** + + For other defined types, see the following table. + + ============================ ================= + Type Description + ============================ ================= + ``tensor`` MindSpore's ``tensor`` type. Data format uses NCHW. For details, see [tensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/tensor.py). + ``MetaTensor`` A tensor only has data type and shape. For details, see [MetaTensor](https://www.gitee.com/mindspore/mindspore/blob/master/mindspore/common/parameter.py). + ``bool_`` Boolean ``True`` or ``False``. + ``int_`` Integer scalar. + ``uint`` Unsigned integer scalar. + ``float_`` Floating-point scalar. + ``number`` Number, including ``int_`` , ``uint`` , ``float_`` and ``bool_`` . + ``list_`` List constructed by ``tensor`` , such as ``List[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. + ``tuple_`` Tuple constructed by ``tensor`` , such as ``Tuple[T0,T1,...,Tn]`` , where the element ``Ti`` can be of different types. + ``function`` Function. Return in two ways, when function is not None, returns Func directly, the other returns Func(args: List[T0,T1,...,Tn], retval: T) when function is None. + ``type_type`` Type definition of type. + ``type_none`` No matching return type, corresponding to the ``type(None)`` in Python. + ``symbolic_key`` The value of a variable is used as a key of the variable in ``env_type`` . + ``env_type`` Used to store the gradient of the free variable of a function, where the key is the ``symbolic_key`` of the free variable's node and the value is the gradient. + ============================ ================= + + * **Tree Topology** + + The relationships of the above types are as follows: + + .. code-block:: + + + └─────── number + │ ├─── bool_ + │ ├─── int_ + │ │ ├─── int8, byte + │ │ ├─── int16, short + │ │ ├─── int32, intc + │ │ └─── int64, intp + │ ├─── uint + │ │ ├─── uint8, ubyte + │ │ ├─── uint16, ushort + │ │ ├─── uint32, uintc + │ │ └─── uint64, uintp + │ └─── float_ + │ ├─── float16 + │ ├─── float32 + │ └─── float64 + ├─── tensor + │ ├─── Array[Float32] + │ └─── ... + ├─── list_ + │ ├─── List[Int32,Float32] + │ └─── ... + ├─── tuple_ + │ ├─── Tuple[Int32,Float32] + │ └─── ... + ├─── function + │ ├─── Func + │ ├─── Func[(Int32, Float32), Int32] + │ └─── ... + ├─── MetaTensor + ├─── type_type + ├─── type_none + ├─── symbolic_key + └─── env_type + .. automodule:: mindspore - :members: \ No newline at end of file + :members: + :exclude-members: Model, DatasetHelper, connect_network_with_dataset \ No newline at end of file diff --git a/api/source_zh_cn/api/python/mindspore/mindspore.train.rst b/api/source_zh_cn/api/python/mindspore/mindspore.train.rst index eb6753e672430d68f149e034791d0d7443125a78..3d24633055440776b8db533368c656d7a7a18fce 100644 --- a/api/source_zh_cn/api/python/mindspore/mindspore.train.rst +++ b/api/source_zh_cn/api/python/mindspore/mindspore.train.rst @@ -1,6 +1,18 @@ mindspore.train =============== +mindspore.train.model +--------------------- + +.. automodule:: mindspore.train.model + :members: + +mindspore.train.dataset_helper +------------------------------ + +.. automodule:: mindspore.train.dataset_helper + :members: + mindspore.train.summary ----------------------- diff --git a/api/source_zh_cn/index.rst b/api/source_zh_cn/index.rst index 502e8495e2162735d542889ada4167c8e27fbf6d..1e2ac4a7dad79b521b683adb9c3acdcef010ecbc 100644 --- a/api/source_zh_cn/index.rst +++ b/api/source_zh_cn/index.rst @@ -11,32 +11,34 @@ MindSpore API :caption: 编程指南 programming_guide/api_structure + programming_guide/data_type + programming_guide/compute_component + programming_guide/data_pipeline + programming_guide/execution_management + programming_guide/auto_parallel + programming_guide/advanced_use + .. toctree:: :maxdepth: 1 :caption: MindSpore Python API api/python/mindspore/mindspore - api/python/mindspore/mindspore.dtype api/python/mindspore/mindspore.common.initializer api/python/mindspore/mindspore.communication api/python/mindspore/mindspore.context - api/python/mindspore/mindspore.hub - api/python/mindspore/mindspore.nn - api/python/mindspore/mindspore.nn.dynamic_lr - api/python/mindspore/mindspore.nn.learning_rate_schedule - api/python/mindspore/mindspore.nn.probability - api/python/mindspore/mindspore.ops - api/python/mindspore/mindspore.ops.composite - api/python/mindspore/mindspore.ops.operations - api/python/mindspore/mindspore.train api/python/mindspore/mindspore.dataset api/python/mindspore/mindspore.dataset.config api/python/mindspore/mindspore.dataset.text api/python/mindspore/mindspore.dataset.transforms api/python/mindspore/mindspore.dataset.vision api/python/mindspore/mindspore.mindrecord + api/python/mindspore/mindspore.nn + api/python/mindspore/mindspore.nn.dynamic_lr + api/python/mindspore/mindspore.nn.probability + api/python/mindspore/mindspore.ops api/python/mindspore/mindspore.profiler + api/python/mindspore/mindspore.train .. toctree:: :maxdepth: 1 diff --git a/api/source_zh_cn/programming_guide/advanced_use.rst b/api/source_zh_cn/programming_guide/advanced_use.rst new file mode 100644 index 0000000000000000000000000000000000000000..44d483602dcf4a8e700d770a31ce2a448854cb92 --- /dev/null +++ b/api/source_zh_cn/programming_guide/advanced_use.rst @@ -0,0 +1,12 @@ +进阶用法 +=========== + +.. toctree:: + :maxdepth: 1 + + train + infer + performance_optimization + user_defined + security_and_privacy + extension \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/api_structure.md b/api/source_zh_cn/programming_guide/api_structure.md index 9a42ef664223fdccb211cc09fa9034ce1f1a83a7..bf7c1f392ef2356309c94220d9f9ab50aa53dd11 100644 --- a/api/source_zh_cn/programming_guide/api_structure.md +++ b/api/source_zh_cn/programming_guide/api_structure.md @@ -3,6 +3,7 @@ - [MindSpore API概述](#mindsporeapi概述) + - [总体架构](#总体架构) - [设计理念](#设计理念) - [层次结构](#层次结构) @@ -10,13 +11,48 @@ +## 总体架构 +MindSpore是一个全场景深度学习框架,旨在实现易开发、高效执行、全场景覆盖三大目标,其中易开发表现为API友好、调试难度低以及额外的自动化属性,高效执行包括计算效率、数据预处理效率和分布式训练效率,全场景则指框架同时支持云、边缘以及端侧场景。 + +MindSpore总体架构分为前端表示层(Mind Expression,ME)、计算图引擎(Graph Engine,GE)和后端运行时三个部分。ME提供了用户级应用软件编程接口(Application Programming Interface,API),用于构建和训练神经网络,并将用户的Python代码转换为数据流图。GE是算子和硬件资源的管理器,负责控制从ME接收的数据流图的执行。后端运行时包含云、边、端上不同环境中的高效运行环境,例如CPU、GPU、Ascend AI处理器、 Android/iOS等。更多总体架构的相关内容请参见[总体架构](https://www.mindspore.cn/docs/zh-CN/master/architecture.html)。 + ## 设计理念 MindSpore源于全产业的最佳实践,向数据科学家和算法工程师提供了统一的模型训练、推理和导出等接口,支持端、边、云等不同场景下的灵活部署,推动深度学习和科学计算等领域繁荣发展。 -MindSpore提供了动态图和静态图统一的编码方式,用户无需开发多套代码,仅变更一行代码便可切换动态图/静态图模式,从而拥有更轻松的开发调试及性能体验。 +MindSpore提供了Python编程范式,用户使用Python原生控制逻辑即可构建复杂的神经网络模型,AI编程变得简单,具体示例请参见[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)。 + +目前主流的深度学习框架的执行模式有两种,分别为静态图模式和动态图模式。静态图模式拥有较高的训练性能,但难以调试。动态图模式相较于静态图模式虽然易于调试,但难以高效执行。MindSpore提供了动态图和静态图统一的编码方式,大大增加了静态图和动态图的可兼容性,用户无需开发多套代码,仅变更一行代码便可切换动态图/静态图模式,例如设置`context.set_context(mode=context.PYNATIVE_MODE)`切换成动态图模式,设置`context.set_context(mode=context.GRAPH_MODE)`即可切换成静态图模式,用户可拥有更轻松的开发调试及性能体验。 + +神经网络模型通常基于梯度下降算法进行训练,但手动求导过程复杂且梯度难以计算。MindSpore的基于源码转换(Source Code Transformation,SCT)的自动微分(Automatic Differentiation)机制采用函数式可微分编程架构,在接口层提供Python编程接口,包括控制流的表达。用户可聚焦于模型算法的数学原生表达,无需手动进行求导,在动态图模式下自动微分的样例代码如下所示。 + +```python +import mindspore as ms +from mindspore.ops import composite as C +from mindspore import context +from mindspore.common import Tensor + + +context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + + +def cost(x, y): return x * (x + y) + + +def test_grad(x, y): + return C.GradOperation(get_all=True)(cost)(Tensor(x, dtype=ms.float32), Tensor(y, dtype=ms.float32)) + + +def main(): + return test_grad(2, 1) + +``` + +其中,第一步定义了一个函数(计算图),第二步利用MindSpore提供的反向接口进行自动微分,定义了一个反向函数(计算图),最后给定一些输入就能获取第一步定义的函数在指定处的导数,求导结果为`(5, 2)`。 + +此外,SCT能够将Python代码转换为函数中间表达(Intermediate Representation,IR),函数中间表达构造出能够在不同设备解析和执行的计算图,并且在执行该计算图前,应用了多种软硬件协同优化技术,端、边、云等不同场景下的性能和效率得到针对性的提升。 -此外,由于MindSpore统一了单机和分布式训练的编码方式,开发者无需编写复杂的分布式策略,在单机代码中添加少量代码即可实现分布式训练,大大降低了AI开发门槛。 +随着神经网络模型和数据集的规模不断增加,分布式并行训练成为了神经网络训练的常见做法,但分布式并行训练的策略选择和编写十分复杂,这严重制约着深度学习模型的训练效率,阻碍深度学习的发展。MindSpore统一了单机和分布式训练的编码方式,开发者无需编写复杂的分布式策略,在单机代码中添加少量代码即可实现分布式训练,例如设置`context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)`便可自动建立代价模型,为用户选择一种较优的并行模式,提高神经网络训练效率,大大降低了AI开发门槛,使用户能够快速实现模型思路。 ## 层次结构 @@ -26,12 +62,12 @@ MindSpore向用户提供了3个不同层次的API,支撑用户进行网络构 - Low-Level Python API - 第一层为低阶API,主要包括张量定义、基础算子、自动微分等模块,用户可使用低阶API轻松实现张量操作和求导计算。 + 第一层为低阶API,主要包括张量定义、基础算子、自动微分等模块,用户可使用低阶API轻松实现张量定义和求导计算,例如用户可通过`Tensor`接口自定义张量,使用`ops.composite`模块下的`GradOperation`算子计算函数在指定处的导数。 - Medium-Level Python API - 第二层为中阶API,其封装了低价API,提供网络层、优化器、损失函数等模块,用户可通过中阶API灵活构建神经网络和控制执行流程,快速实现模型算法逻辑。 + 第二层为中阶API,其封装了低价API,提供网络层、优化器、损失函数等模块,用户可通过中阶API灵活构建神经网络和控制执行流程,快速实现模型算法逻辑,例如用户可调用`Cell`接口构建神经网络模型和计算逻辑,通过使用`loss`模块和`Optimizer`接口为神经网络模型添加损失函数和优化方式。 - High-Level Python API - 第三层为高阶API,其在中阶API的基础上又提供了训练推理的管理、Callback、混合精度训练等高级接口,方便用户控制整网的执行流程和实现神经网络的训练及推理。 + 第三层为高阶API,其在中阶API的基础上又提供了训练推理的管理、Callback、混合精度训练等高级接口,方便用户控制整网的执行流程和实现神经网络的训练及推理,例如用户使用`Model`接口,指定要训练的神经网络模型和相关的训练设置,即可对神经网络模型进行训练。 diff --git a/api/source_zh_cn/programming_guide/augmentation.md b/api/source_zh_cn/programming_guide/augmentation.md index 74fd8b0ba812a83678302bdedf5be17cea0ea403..09a2de388ba8167873a990e3b3ea80dd277947bd 100644 --- a/api/source_zh_cn/programming_guide/augmentation.md +++ b/api/source_zh_cn/programming_guide/augmentation.md @@ -10,7 +10,7 @@ - [Resize](#resize) - [Invert](#invert) - [py_transforms](#py_transforms) - - [ComposeOp](#composeop) + - [Compose](#compose) - [使用说明](#使用说明) @@ -21,15 +21,14 @@ 在计算机视觉任务中,数据量过小或是样本场景单一等问题都会影响模型的训练效果,用户可以通过数据增强操作对图像进行预处理,从而提升模型的泛化性。 -MindSpore提供了c_transforms模块和py_transforms模块供用户进行数据增强操作,用户也可以自定义函数或者算子进行数据增强。 - -MindSpore目前支持的常用数据增强算子如下表所示,更多数据增强算子参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html)。 +MindSpore提供了`c_transforms`模块和`py_transforms`模块供用户进行数据增强操作,用户也可以自定义函数或者算子进行数据增强。 | 模块 | 实现 | 说明 | | ---- | ---- | ---- | | c_transforms | 基于C++的OpenCV实现 | 具有较高的性能。 | -| py_transforms | 基于Python的PIL实现 | 该模块提供了多种图像增强功能,并提供了PIL Image和numpy数组之间的传输方法。| +| py_transforms | 基于Python的PIL实现 | 该模块提供了多种图像增强功能,并提供了PIL Image和NumPy数组之间的传输方法。| +MindSpore目前支持的常用数据增强算子如下表所示,更多数据增强算子参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html)。 | 模块 | 算子 | 说明 | | ---- | ---- | ---- | @@ -40,46 +39,38 @@ MindSpore目前支持的常用数据增强算子如下表所示,更多数据 | py_transforms | RandomCrop | 在图像随机位置裁剪指定大小子图像。 | | | Resize | 将图像缩放到指定大小。 | | | Invert | 将图像进行反相。 | -| |ComposeOp | 将列表中的数据增强操作依次执行。 | +| |Compose | 将列表中的数据增强操作依次执行。 | ## c_transforms -下面将简要介绍几种常用的c_transforms模块数据增强算子的使用方法,更多的c_transforms模块数据增强算子参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html#module-mindspore.dataset.vision.c_transforms)。 +下面将简要介绍几种常用的`c_transforms`模块数据增强算子的使用方法。 ### RandomCrop 对输入图像进行在随机位置的裁剪。 **参数说明:** -- *size: 裁剪图像的尺寸。* -- *padding: 填充的像素数量。* -- *pad_if_needed: 原图小于裁剪尺寸时,是否需要填充。* -- *fill_value: 在常量填充模式时使用的填充值。* -- *padding_mode: 填充模式。* +- `size`:裁剪图像的尺寸。 +- `padding`:填充的像素数量。 +- `pad_if_needed`:原图小于裁剪尺寸时,是否需要填充。 +- `fill_value`:在常量填充模式时使用的填充值。 +- `padding_mode`:填充模式。 -```python -# 对输入图像进行在随机位置的裁剪 +下面的样例首先使用顺序采样器加载CIFAR-10数据集,然后对已加载的图片进行长宽均为10的随机裁剪,最后输出裁剪前后的图片形状及对应标签,并对图片进行了展示。 +```python import matplotlib.pyplot as plt import mindspore.dataset as ds -import mindspore.dataset.transforms.vision.c_transforms as c_trans +import mindspore.dataset.vision.c_transforms as c_trans -# 下载Cifar10数据集,将其解压到Cifar10Data目录 DATA_DIR = "../data/dataset/testCifar10Data2" -# 指定一个顺序采样器SequentialSampler,按照读取顺序获取3个样本数据 sampler = ds.SequentialSampler(num_samples=3) - -# 使用Cifar10Dataset读取数据集,指定sampler为上述采样器 dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 创建一个随机裁剪算子,裁剪后的长宽分为10个像素 random_crop = c_trans.RandomCrop([10, 10]) +dataset2 = dataset1.map(operations=random_crop, input_columns=["image"]) -# 使用map算子将其作用到数据管道的数据集中 -dataset2 = dataset1.map(input_columns=["image"], operations=random_crop) - -# 启动数据管道,输出3个样本数据 image_list1, label_list1 = [], [] image_list2, label_list2 = [], [] for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_iterator()): @@ -89,33 +80,37 @@ for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_it image_list2.append(data2['image']) label_list2.append(data2['label']) print("Cropped image Shape:", data2['image'].shape, ", Cropped label:", data2['label']) - print("") + print("------") -# 将原图与裁剪后的图可视化 num_samples = len(image_list1) + len(image_list2) for i in range(num_samples): if i < len(image_list1): plt.subplot(2, len(image_list1), i + 1) - plt.imshow(image_list1[i]) - plt.title(label_list1[i]) + plt.imshow(image_list1[i].asnumpy()) + plt.title(label_list1[i].asnumpy()) else: plt.subplot(2, len(image_list2), i + 1) - plt.imshow(image_list2[i % len(image_list2)]) - plt.title(label_list2[i % len(image_list2)]) + plt.imshow(image_list2[i % len(image_list2)].asnumpy()) + plt.title(label_list2[i % len(image_list2)].asnumpy()) plt.show() ``` +输出结果如下: + ``` Source image Shape : (32, 32, 3) , Source label : 6 Cropped image Shape: (10, 10, 3) , Cropped label: 6 - +------ Source image Shape : (32, 32, 3) , Source label : 9 Cropped image Shape: (10, 10, 3) , Cropped label: 9 - +------ Source image Shape : (32, 32, 3) , Source label : 9 Cropped image Shape: (10, 10, 3) , Cropped label: 9 +------ ``` +图片展示如下: + ![randomcrop](./images/randomcrop.png) ### RandomHorizontalFlip @@ -123,34 +118,25 @@ Cropped image Shape: (10, 10, 3) , Cropped label: 9 对输入图像进行随机水平翻转。 **参数说明:** -- *prob: 单张图片发生翻转的概率。* +- `prob`: 单张图片发生翻转的概率。 -```python -# 对输入图像进行随机水平翻转 +下面的样例首先使用随机采样器加载CIFAR-10数据集,然后对已加载的图片进行概率为0.8的随机水平翻转,最后输出翻转前后的图片形状及对应标签,并对图片进行了展示。 +```python import matplotlib.pyplot as plt import mindspore.dataset as ds -import mindspore.dataset.transforms.vision.c_transforms as c_trans +import mindspore.dataset.vision.c_transforms as c_trans -# 设置全局随机种子 ds.config.set_seed(6) -# 下载Cifar10数据集,将其解压到Cifar10Data目录 DATA_DIR = "../data/dataset/testCifar10Data2" -# 指定一个随机采样器RandomSampler,按照读取顺序获取4个样本数据 sampler = ds.RandomSampler(num_samples=4) - -# 使用Cifar10Dataset读取数据集,指定sampler为上述采样器 dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 创建一个随机翻转算子,设置翻转概率为0.8 random_horizontal_flip = c_trans.RandomHorizontalFlip(prob=0.8) +dataset2 = dataset1.map(operations=random_horizontal_flip, input_columns=["image"]) -# 使用map算子将其作用到数据管道的数据集中 -dataset2 = dataset1.map(input_columns=["image"], operations=random_horizontal_flip) - -# 启动数据管道,输出4个样本数据 image_list1, label_list1 = [], [] image_list2, label_list2 = [], [] for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_iterator()): @@ -160,36 +146,40 @@ for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_it image_list2.append(data2['image']) label_list2.append(data2['label']) print("Flipped image Shape:", data2['image'].shape, ", Flipped label:", data2['label']) - print("") + print("------") -# 将原图与裁剪后的图可视化 num_samples = len(image_list1) + len(image_list2) for i in range(num_samples): if i < len(image_list1): plt.subplot(2, len(image_list1), i + 1) - plt.imshow(image_list1[i]) - plt.title(label_list1[i]) + plt.imshow(image_list1[i].asnumpy()) + plt.title(label_list1[i].asnumpy()) else: plt.subplot(2, len(image_list2), i + 1) - plt.imshow(image_list2[i % len(image_list2)]) - plt.title(label_list2[i % len(image_list2)]) + plt.imshow(image_list2[i % len(image_list2)].asnumpy()) + plt.title(label_list2[i % len(image_list2)].asnumpy()) plt.show() ``` +输出结果如下: + ``` Source image Shape : (32, 32, 3) , Source label : 3 Flipped image Shape: (32, 32, 3) , Flipped label: 3 - +------ Source image Shape : (32, 32, 3) , Source label : 6 Flipped image Shape: (32, 32, 3) , Flipped label: 6 - +------ Source image Shape : (32, 32, 3) , Source label : 6 Flipped image Shape: (32, 32, 3) , Flipped label: 6 - +------ Source image Shape : (32, 32, 3) , Source label : 9 Flipped image Shape: (32, 32, 3) , Flipped label: 9 +------ ``` +图片展示如下: + ![randomhorizontalflip](./images/randomhorizontalflip.png) ### Resize @@ -197,29 +187,23 @@ Flipped image Shape: (32, 32, 3) , Flipped label: 9 对输入图像进行缩放。 **参数说明:** -- *self: 缩放的目标大小。* -- *interpolation: 缩放时采用的插值方式。* +- `self`:缩放的目标大小。 +- `interpolation`:缩放时采用的插值方式。 -```python -# 对输入图像进行指定大小缩放 +下面的样例首先加载MNIST数据集,然后将已加载的图片缩放至(101, 101)大小,最后输出缩放前后的图片形状及对应标签,并对图片进行了展示。 +```python import matplotlib.pyplot as plt import mindspore.dataset as ds -import mindspore.dataset.transforms.vision.c_transforms as c_trans +import mindspore.dataset.vision.c_transforms as c_trans -# 下载MNIST数据集,将其解压到MnistData目录 DATA_DIR = "../data/dataset/testMnistData2" -# 使用MnistDataset读取数据集 dataset1 = ds.MnistDataset(DATA_DIR, num_samples=4, shuffle=False) -# 创建一个缩放算子,将MNIST的图片从(28, 28)缩放到(101, 101) resize = c_trans.Resize(size=[101, 101]) +dataset2 = dataset1.map(operations=resize, input_columns=["image"]) -# 使用map算子将其作用到数据管道的数据集中 -dataset2 = dataset1.map(input_columns=["image"], operations=resize) - -# 启动数据管道 image_list1, label_list1 = [], [] image_list2, label_list2 = [], [] for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_iterator()): @@ -229,68 +213,63 @@ for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_it image_list2.append(data2['image']) label_list2.append(data2['label']) print("Flipped image Shape:", data2['image'].shape, ", Flipped label:", data2['label']) - print("") + print("------") -# 将原图与裁剪后的图可视化 num_samples = len(image_list1) + len(image_list2) for i in range(num_samples): if i < len(image_list1): plt.subplot(2, len(image_list1), i + 1) - plt.imshow(image_list1[i].squeeze(), cmap=plt.cm.gray) - plt.title(label_list1[i]) + plt.imshow(image_list1[i].asnumpy().squeeze(), cmap=plt.cm.gray) + plt.title(label_list1[i].asnumpy()) else: plt.subplot(2, len(image_list2), i + 1) - plt.imshow(image_list2[i % len(image_list2)].squeeze(), cmap=plt.cm.gray) - plt.title(label_list2[i % len(image_list2)]) + plt.imshow(image_list2[i % len(image_list2)].asnumpy().squeeze(), cmap=plt.cm.gray) + plt.title(label_list2[i % len(image_list2)].asnumpy()) plt.show() ``` +输出结果如下: + ``` Source image Shape : (28, 28, 1) , Source label : 5 Flipped image Shape: (101, 101, 1) , Flipped label: 5 - +------ Source image Shape : (28, 28, 1) , Source label : 0 Flipped image Shape: (101, 101, 1) , Flipped label: 0 - +------ Source image Shape : (28, 28, 1) , Source label : 4 Flipped image Shape: (101, 101, 1) , Flipped label: 4 - +------ Source image Shape : (28, 28, 1) , Source label : 1 Flipped image Shape: (101, 101, 1) , Flipped label: 1 +------ ``` +图片展示如下: + ![ctrans_resize](./images/ctrans_resize.png) ### Invert 对输入图像进行反相处理。 -```python -# 对输入图像进行反相处理 +下面的样例首先加载CIFAR-10数据集,然后同时定义缩放和反相操作并作用于已加载的图片,最后输出缩放与反相前后的图片形状及对应标签,并对图片进行了展示。 +```python import matplotlib.pyplot as plt import mindspore.dataset as ds -import mindspore.dataset.transforms.vision.c_transforms as c_trans +import mindspore.dataset.vision.c_transforms as c_trans -# 设置全局随机种子 ds.config.set_seed(8) -# 下载Cifar10数据集,将其解压到Cifar10Data目录 DATA_DIR = "../data/dataset/testCifar10Data2" -# 使用Cifar10Dataset读取数据集 dataset1 = ds.Cifar10Dataset(DATA_DIR, num_samples=4, shuffle=True) -# 创建一个缩放算子,将图片缩放到(101, 101) resize = c_trans.Resize(size=[101, 101]) - -# 创建一个反相算子 invert = c_trans.Invert() +dataset2 = dataset1.map(operations=[resize, invert], input_columns=["image"]) -# 使用map算子将其作用到数据管道的数据集中(两个算子按顺序起作用) -dataset2 = dataset1.map(input_columns=["image"], operations=[resize, invert]) - -# 启动数据管道 image_list1, label_list1 = [], [] image_list2, label_list2 = [], [] for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_iterator()): @@ -300,87 +279,88 @@ for data1, data2 in zip(dataset1.create_dict_iterator(), dataset2.create_dict_it image_list2.append(data2['image']) label_list2.append(data2['label']) print("Flipped image Shape:", data2['image'].shape, ", Flipped label:", data2['label']) - print("") + print("------") -# 将原图与裁剪后的图可视化 num_samples = len(image_list1) + len(image_list2) for i in range(num_samples): if i < len(image_list1): plt.subplot(2, len(image_list1), i + 1) - plt.imshow(image_list1[i].squeeze(), cmap=plt.cm.gray) - plt.title(label_list1[i]) + plt.imshow(image_list1[i].asnumpy().squeeze(), cmap=plt.cm.gray) + plt.title(label_list1[i].asnumpy()) else: plt.subplot(2, len(image_list2), i + 1) - plt.imshow(image_list2[i % len(image_list2)].squeeze(), cmap=plt.cm.gray) - plt.title(label_list2[i % len(image_list2)]) + plt.imshow(image_list2[i % len(image_list2)].asnumpy().squeeze(), cmap=plt.cm.gray) + plt.title(label_list2[i % len(image_list2)].asnumpy()) plt.show() ``` +输出结果如下: + ``` Source image Shape : (32, 32, 3) , Source label : 4 Flipped image Shape: (32, 32, 3) , Flipped label: 4 - +------ Source image Shape : (32, 32, 3) , Source label : 9 Flipped image Shape: (32, 32, 3) , Flipped label: 9 - +------ Source image Shape : (32, 32, 3) , Source label : 6 Flipped image Shape: (32, 32, 3) , Flipped label: 6 - +------ Source image Shape : (32, 32, 3) , Source label : 5 Flipped image Shape: (32, 32, 3) , Flipped label: 5 +------ ``` +图片展示如下: + ![ctrans_invert](./images/ctrans_invert.png) ## py_transforms -下面将简要介绍几种常用的py_transforms模块数据增强算子的使用方法,更多的py_transforms模块数据增强算子参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html#module-mindspore.dataset.vision.py_transforms)。 +下面将简要介绍几种常用的`py_transforms`模块数据增强算子的使用方法。 -### ComposeOp +### Compose -```python -# 对输入图像进行解码,缩放组合操作 +接收一个`transforms`列表,将列表中的数据增强操作依次作用于数据集图片。 +下面的样例首先加载一个图片数据集,然后同时定义解码、缩放和数据类型转换操作,并作用于已加载的图片,最后输出处理后的图片形状及对应标签,并对图片进行了展示。 + +```python import matplotlib.pyplot as plt import mindspore.dataset as ds -import mindspore.dataset.transforms.vision.py_transforms as py_trans +import mindspore.dataset.vision.py_transforms as py_trans +from mindspore.dataset.transforms.py_transforms import Compose -# 设置全局随机种子 ds.config.set_seed(8) -# 图像数据集目录 DATA_DIR = "../data/dataset/testPK/data" -# 使用ImageFolderDatasetV2读取数据集,获取5个样本 -dataset1 = ds.ImageFolderDatasetV2(DATA_DIR, num_samples=5, shuffle=True) +dataset1 = ds.ImageFolderDataset(DATA_DIR, num_samples=5, shuffle=True) -# 创建一组数据增强算子的集合 transforms_list = [ - py_trans.Decode(), # 解码图像到PIL格式 - py_trans.Resize(size=(200,200)), # 缩放图像到[200, 200]大小 - py_trans.ToTensor() # 将PIL图像转换到Numpy + py_trans.Decode(), + py_trans.Resize(size=(200,200)), + py_trans.ToTensor() ] -compose_trans = py_trans.ComposeOp(transforms_list) - -# 使用map算子将其作用到数据管道的数据集中 -dataset2 = dataset1.map(input_columns=["image"], operations=compose_trans()) +compose_trans = Compose(transforms_list) +dataset2 = dataset1.map(operations=compose_trans, input_columns=["image"]) -# 启动数据管道,输出5个样本数据 image_list, label_list = [], [] for data in dataset2.create_dict_iterator(): image_list.append(data['image']) label_list.append(data['label']) print("Transformed image Shape:", data['image'].shape, ", Transformed label:", data['label']) -# 将原图与裁剪后的图可视化 num_samples = len(image_list) for i in range(num_samples): plt.subplot(1, len(image_list), i + 1) - plt.imshow(image_list[i].transpose(1, 2, 0)) - plt.title(label_list[i]) + plt.imshow(image_list[i].asnumpy().transpose(1, 2, 0)) + plt.title(label_list[i].asnumpy()) plt.show() ``` +输出结果如下: + ``` Transformed image Shape: (3, 200, 200) , Transformed label: 3 Transformed image Shape: (3, 200, 200) , Transformed label: 0 @@ -389,11 +369,13 @@ Transformed image Shape: (3, 200, 200) , Transformed label: 0 Transformed image Shape: (3, 200, 200) , Transformed label: 3 ``` +图片展示如下: + ![pytrans_compose](./images/pytrans_compose.png) ## 使用说明 -请勿混用c_transforms与py_transforms,因为c_transforms是在C++内维护buffer管理,py_transforms是在Python内维护buffer管理,两者混用会降低性能。 +请勿混用`c_transforms`与`py_transforms`,因为两者作用于图片的格式不同,混用会降低处理性能。 ![tranform_pipeline](./images/tranform_pipeline.png) @@ -401,15 +383,15 @@ Transformed image Shape: (3, 200, 200) , Transformed label: 3 **推荐的使用方式:** -- 单独使用py_transform或c_transform +- 单独使用`py_transform`或`c_transform` ![tranform_c_py](./images/tranform_good_1.png) -- 先使用py_transform,再使用c_transform +- 先使用`py_transform`,再使用`c_transform` ![tranform_c_py](./images/tranform_good_2.png) -- 先使用c_transform,再使用py_transform +- 先使用`c_transform`,再使用`py_transform` ![tranform_c_py](./images/tranform_good_3.png) diff --git a/api/source_zh_cn/programming_guide/auto_augmentation.md b/api/source_zh_cn/programming_guide/auto_augmentation.md index 57ec65060e666988f7e7d9fa6f7f783256374f33..64f86a16179fc57fdd65b6231f3f219374bd4267 100644 --- a/api/source_zh_cn/programming_guide/auto_augmentation.md +++ b/api/source_zh_cn/programming_guide/auto_augmentation.md @@ -3,110 +3,129 @@ - [自动数据增强](#自动数据增强) - - [基于概率动态调整数据增强策略](#基于概率动态调整数据增强策略) - - [基于训练结果信息动态调整数据增强策略](#基于训练结果信息动态调整数据增强策略) + - [概述](#概述) + - [基于概率的自动数据增强](#基于概率的自动数据增强) + - [RandomApply](#RandomApply) + - [RandomChoice](#RandomChoice) + - [RandomSelectSubpolicy](#RandomSelectSubpolicy) + - [基于回调参数的自动数据增强](#基于回调参数的自动数据增强) - + -## 基于概率动态调整数据增强策略 +## 概述 -MindSpore提供一系列基于概率的数据增强的API,用户可以对各种图像操作进行随机选择、组合,使数据增强更灵活。 +MindSpore除了可以让用户自定义数据增强的使用,还提供了一种自动数据增强方式,可以基于特定策略自动对图像进行数据增强处理。 -- [`RandomApply(transforms, prob=0.5)`](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.transforms.html?highlight=randomapply#mindspore.dataset.transforms.c_transforms.RandomApply) -以一定的概率指定`transforms`操作,即可能执行,也可以能不执行;`transform`可以是一个,也可以是一系列。 +自动数据增强主要分为基于概率的自动数据增强和基于回调参数的自动数据增强。 - ```python +## 基于概率的自动数据增强 - rand_apply_list = RandomApply([c_vision.RandomCrop(), c_vision.RandomColorAdjust()]) - ds = ds.map(operations=rand_apply_list) +MindSpore提供了一系列基于概率的自动数据增强API,用户可以对各种数据增强操作进行随机选择与组合,使数据增强更加灵活。 - ``` +关于API的详细说明,可以参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.transforms.html)。 - 按50%的概率来顺序执行`RandomCrop`和`RandomColorAdjust`操作,否则都不执行。 +### RandomApply -- [`RandomChoice(transforms)`](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.transforms.html?highlight=randomchoice#mindspore.dataset.transforms.c_transforms.RandomChoice) -从`transfrom`操作中随机选择一个执行。 +API接收一个数据增强操作列表`transforms`,以一定的概率顺序执行列表中各数据增强操作,默认概率为0.5,否则都不执行。 - ```python +在下面的代码示例中,以0.5的概率来顺序执行`RandomCrop`和`RandomColorAdjust`操作,否则都不执行。 - rand_choice = RandomChoice([c_vision.CenterCrop(), c_vision.RandomCrop()]) - ds = ds.map(operations=rand_choice) +```python +import mindspore.dataset.vision.c_transforms as c_vision +from mindspore.dataset.transforms.c_transforms import RandomApply - ``` +rand_apply_list = RandomApply([c_vision.RandomCrop(512), c_vision.RandomColorAdjust()]) +``` - 分别以50%概率来执行`CenterCrop`和`RandomCrop`操作。 +### RandomChoice -- [`RandomSelectSubpolicy(policy)`](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.transforms.vision.html?highlight=randomselectsubpolicy#mindspore.dataset.transforms.vision.c_transforms.RandomSelectSubpolicy) -用户可以预置策略(Policy),每次随机选择一个子策略(SubPolicy)组合,同一子策略中由若干个顺序执行的图像增强操作组成,每个操作与两个参数关联:1) 执行操作的概率 2)执行操作的幅度; -对于一个batch中的每张图像,随机选择子策略来变换图像。 +API接收一个数据增强操作列表`transforms`,从中随机选择一个数据增强操作执行。 - ```python +在下面的代码示例中,等概率地在`CenterCrop`和`RandomCrop`中选择一个操作执行。 - policy = [ - [(c_vision.RandomRotation((45, 45)), 0.5), (c_vision.RandomVerticalFlip(), 1.0), (c_vision.RandomColorAdjust(), 0.8)], - [(c_vision.RandomRotation((90, 90)), 1), (c_vision.RandomColorAdjust(), 0.2)] - ] - ds = ds.map(operations=c_vision.RandomSelectSubpolicy(policy), input_columns=["image"]) +```python +import mindspore.dataset.vision.c_transforms as c_vision +from mindspore.dataset.transforms.c_transforms import RandomChoice - ``` +rand_choice = RandomChoice([c_vision.CenterCrop(512), c_vision.RandomCrop(512)]) +``` - 示例中包括2条子策略,其中子策略1中包含`RandomRotation`、`RandomVerticalFlip`、`RandomColorAdjust`3个操作,概率分别为0.5、1.0、0.8;子策略2中包含`RandomRotation`和`RandomColorAdjust`,概率分别为1.0、2.0。 +### RandomSelectSubpolicy -## 基于训练结果信息动态调整数据增强策略 +API接收一个预置策略列表,包含一系列子策略组合,每一子策略由若干个顺序执行的数据增强操作及其执行概率组成。 -Mindspore的`sync_wait`接口支持按batch或epoch粒度来调整数据增强策略,实现训练过程中动态调整数据增强策略。 -`sync_wait`必须和`sync_update`配合使用实现数据pipeline上的同步回调。 +对各图像先等概率随机选择一种子策略,再依照子策略中的概率顺序执行各个操作。 -- [`sync_wait(condition_name, num_batch=1, callback=None)`](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html?highlight=sync_wait#mindspore.dataset.ImageFolderDatasetV2.sync_wait) -- [`sync_update(condition_name, num_batch=None, data=None)`](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html?highlight=sync_update#mindspore.dataset.ImageFolderDatasetV2.sync_update) +在下面的代码示例中,预置了两条子策略,子策略1中包含`RandomRotation`、`RandomVerticalFlip`和`RandomColorAdjust`三个操作,概率分别为0.5、1.0和0.8;子策略2中包含`RandomRotation`和`RandomColorAdjust`两个操作,概率分别为1.0和0.2。 -`sync_wait`将阻塞整个数据处理pipeline直到`sync_update`触发用户预先定义的`callback`函数。 +```python +import mindspore.dataset.vision.c_transforms as c_vision +from mindspore.dataset.vision.c_transforms import RandomSelectSubpolicy -1. 用户预先定义class`Augment`,其中`preprocess`为`map`操作中的自定义数据增强函数,`update`为更新数据增强策略的回调函数。 +policy_list = [ + [(c_vision.RandomRotation((45, 45)), 0.5), (c_vision.RandomVerticalFlip(), 1.0), (c_vision.RandomColorAdjust(), 0.8)], + [(c_vision.RandomRotation((90, 90)), 1.0), (c_vision.RandomColorAdjust(), 0.2)] + ] +policy = RandomSelectSubpolicy(policy_list) +``` - ```python - import mindspore.dataset.transforms.vision.py_transforms as transforms - import mindspore.dataset as de - import numpy as np +## 基于回调参数的自动数据增强 - class Augment: - def __init__(self): - self.ep_num = 0 - self.step_num = 0 +MindSpore的`sync_wait`接口支持按batch或epoch粒度在训练过程中动态调整数据增强策略,用户可以设定阻塞条件触发特定的数据增强操作。 - def preprocess(self, input_): - return (np.array((input_ + self.step_num ** self.ep_num - 1), )) +`sync_wait`将阻塞整个数据处理pipeline直到`sync_update`触发用户预先定义的`callback`函数,两者需配合使用,对应说明如下: - def update(self, data): - self.ep_num = data['ep_num'] - self.step_num = data['step_num'] +- sync_wait(condition_name, num_batch=1, callback=None) - ``` + 该API为数据集添加一个阻塞条件`condition_name`,当`sync_update`调用时执行指定的`callback`函数。 -2. 数据处理pipeline先回调自定义的增强策略更新函数`auto_aug.update`,然后在`map`操作中按更新后的策略来执行`auto_aug.preprocess`中定义的数据增强。 +- sync_update(condition_name, num_batch=None, data=None) - ```python + 该API用于释放对应`condition_name`的阻塞,并对`data`触发指定的`callback`函数。 - arr = list(range(1, 4)) - ds = de.NumpySlicesDataset(arr, shuffle=False) - aug = Augment() - ds= ds.sync_wait(condition_name="policy", callback=aug.update) - ds = ds.map(operations=[aug.preprocess]) +下面将演示基于回调参数的自动数据增强的用法。 - ``` +1. 用户预先定义`Augment`类,其中`preprocess`为自定义的数据增强函数,`update`为更新数据增强策略的回调函数。 -3. 在每个step调用`sync_update`进行数据增强策略的更新。 + ```python + import mindspore.dataset.vision.py_transforms as transforms + import mindspore.dataset as ds + import numpy as np - ```python - epochs = 5 - itr = ds.create_tuple_iterator(num_epochs=epochs) - step_num = 0 - for ep_num in range(epochs): - for data in itr: - print("epcoh: {}, step:{}, data :{}".format(ep_num, step_num, data)) - step_num += 1 - ds.sync_update(condition_name="policy", data={'ep_num': ep_num, 'step_num': step_num}) + class Augment: + def __init__(self): + self.ep_num = 0 + self.step_num = 0 - ``` + def preprocess(self, input_): + return (np.array((input_ + self.step_num ** self.ep_num - 1), )) + + def update(self, data): + self.ep_num = data['ep_num'] + self.step_num = data['step_num'] + ``` + +2. 数据处理pipeline先回调自定义的增强策略更新函数`update`,然后在`map`操作中按更新后的策略来执行`preprocess`中定义的数据增强操作。 + + ```python + arr = list(range(1, 4)) + dataset = ds.NumpySlicesDataset(arr, shuffle=False) + aug = Augment() + dataset = dataset.sync_wait(condition_name="policy", callback=aug.update) + dataset = dataset.map(operations=[aug.preprocess]) + ``` + +3. 在每个step中调用`sync_update`进行数据增强策略的更新。 + + ```python + epochs = 5 + itr = dataset.create_tuple_iterator(num_epochs=epochs) + step_num = 0 + for ep_num in range(epochs): + for data in itr: + print("epcoh: {}, step:{}, data :{}".format(ep_num, step_num, data)) + step_num += 1 + dataset.sync_update(condition_name="policy", data={'ep_num': ep_num, 'step_num': step_num}) + ``` diff --git a/api/source_zh_cn/programming_guide/auto_parallel_context.md b/api/source_zh_cn/programming_guide/auto_parallel.md similarity index 79% rename from api/source_zh_cn/programming_guide/auto_parallel_context.md rename to api/source_zh_cn/programming_guide/auto_parallel.md index 14ce9bdab9d86ddc8e66279f2d92167cc2ae0f48..8001b6f2a761902d9af079d128207ed968b280d5 100644 --- a/api/source_zh_cn/programming_guide/auto_parallel_context.md +++ b/api/source_zh_cn/programming_guide/auto_parallel.md @@ -6,31 +6,34 @@ - [概述](#概述) - [分布式并行配置](#分布式并行配置) - [通用配置](#通用配置) - - [device_num](#device_num) - - [global_rank](#global_rank) - - [gradients_mean](#gradients_mean) - - [parallel_mode](#parallel_mode) + - [device_num](#device_num) + - [global_rank](#global_rank) + - [gradients_mean](#gradients_mean) + - [parallel_mode](#parallel_mode) + - [all_reduce_fusion_config](#all_reduce_fusion_config) - [自动并行配置](#自动并行配置) - - [gradient_fp32_sync](#gradient_fp32-sync) - - [loss_repeated_mean](#loss_repeated_mean) - - [auto_parallel_search_mode](#auto_parallel_search_mode) - - [strategy_ckpt_load_file](#strategy_ckpt_load_file) - - [strategy_ckpt_save_file](#strategy_ckpt_save_file) - - [full_batch](#full_batch) + - [gradient_fp32_sync](#gradient_fp32_sync) + - [auto_parallel_search_mode](#auto_parallel_search_mode) + - [strategy_ckpt_load_file](#strategy_ckpt_load_file) + - [strategy_ckpt_save_file](#strategy_ckpt_save_file) + - [full_batch](#full_batch) - [数据并行配置](#数据并行配置) - - [enable_parallel_optimizer](#enable_parallel_optimizer) + - [enable_parallel_optimizer](#enable_parallel_optimizer) - [混合并行配置](#混合并行配置) - - [layerwise_parallel](#layerwise_parallel) + - [layerwise_parallel](#layerwise_parallel) - [分布式通信接口](#分布式通信接口) - [init](#init) - - [get_rank](#get_rank) - [get_group_size](#get_group_size) + - [get_rank](#get_rank) + - [分布式属性配置](#分布式属性配置) + - [cross_batch](#cross_batch) + - [fusion](#fusion) - [数据并行](#数据并行) - [自动并行](#自动并行) - + ## 概述 @@ -43,7 +46,7 @@ MindSpore提供了分布式并行训练的功能,它支持了包括数据并 MindSpore的分布式并行配置通过`auto_parallel_context`来进行集中管理,用户可根据自身需求和实际情况来进行个性化的配置。这些配置可分为四大类: - 通用配置:对数据并行和自动并行均起作用的配置,如:`device_num`、`global_rank`。 -- 自动并行配置:仅在自动并行模式下起作用的配置,如:`gradient_fp32_sync`、`loss_repeated_mean`。 +- 自动并行配置:仅在自动并行模式下起作用的配置,如:`gradient_fp32_sync`。 - 数据并行配置:仅在数据并行模式下起作用的配置,如:`enable_parallel_optimizer`。 - 混合并行配置:仅在混合并行模式下起作用的配置,如:`layerwise_parallel`。 @@ -97,15 +100,21 @@ context.get_auto_parallel_context("gradients_mean") - `stand_alone`:单机模式。 - `data_parallel`:数据并行模式。 - `hybrid_parallel`:混合并行模式。 -- `semi_auto_parallel`:半自动并行模式,即用户可通过`set_strategy`方法给算子配置切分策略,若不配置策略,则默认是数据并行策略。 +- `semi_auto_parallel`:半自动并行模式,即用户可通过`shard`方法给算子配置切分策略,若不配置策略,则默认是数据并行策略。 - `auto_parallel`:自动并行模式,即框架会自动建立代价模型,为用户选择最优的切分策略。 +其中,`auto_parallel`和`data_parallel`在MindSpore教程中有完整样例: + +。 + 代码样例如下: ```python -from mindspore import context +from mindspore import context +from mindspore.ops import operations as P -context.set_auto_parallel_context(parallel_mode=“auto_parallel”) +context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") +mul = P.Mul().shard(((2, 1), (2, 1))) context.get_auto_parallel_context("parallel_mode") ``` @@ -140,29 +149,16 @@ context.set_auto_parallel_context(gradient_fp32_sync=False) context.get_auto_parallel_context("gradient_fp32_sync") ``` -#### loss_repeated_mean - -`loss_repeated_mean`表示在loss重复计算的场景下,反向是否进行均值操作,其值为bool类型,默认为True。loss存在重复计算的场景下,反向进行均值操作能使分布式逻辑和单机保持一致。但在某些场景下,不进行均值操作可能会使网络收敛的速度更快。因此,MindSpore提供`loss_repeated_mean`接口,让用户自由配置。 - -代码样例如下: - -```python -from mindspore import context - -context.set_auto_parallel_context(loss_repeated_mean=False) -context.get_auto_parallel_context("loss_repeated_mean") -``` - #### auto_parallel_search_mode -MindSpore提供了`dynamic_programming`和`recursive_programming`两种搜索策略的算法。`dynamic_programming`能够搜索出代价模型刻画的最优策略,但在搜索巨大网络模型的并行策略时耗时较长;而`recursive_programming`能较快搜索出并行策略,但搜索出来的策略可能不是运行性能最优的。为此,MindSpore提供了参数,让用户自由选择搜索算法。 +MindSpore提供了`dynamic_programming`和`recursive_programming`两种搜索策略的算法。`dynamic_programming`能够搜索出代价模型刻画的最优策略,但在搜索巨大网络模型的并行策略时耗时较长;而`recursive_programming`能较快搜索出并行策略,但搜索出来的策略可能不是运行性能最优的。为此,MindSpore提供了参数,让用户自由选择搜索算法,默认是`dynamic_programming`。 代码样例如下: ```python from mindspore import context -context.set_auto_parallel_context(auto_parallel_search_mode=“dynamic_programming”) +context.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming") context.get_auto_parallel_context("auto_parallel_search_mode") ``` @@ -175,7 +171,7 @@ context.get_auto_parallel_context("auto_parallel_search_mode") ```python from mindspore import context -context.set_auto_parallel_context(strategy_ckpt_load_file=“./”) +context.set_auto_parallel_context(strategy_ckpt_load_file="./") context.get_auto_parallel_context("strategy_ckpt_load_file") ``` @@ -188,7 +184,7 @@ context.get_auto_parallel_context("strategy_ckpt_load_file") ```python from mindspore import context -context.set_auto_parallel_context(strategy_ckpt_save_file=“./”) +context.set_auto_parallel_context(strategy_ckpt_save_file="./") context.get_auto_parallel_context("strategy_ckpt_save_file") ``` @@ -285,7 +281,34 @@ init() rank_id = get_rank() ``` -### 数据并行 +## 分布式属性配置 + +### cross_batch + +在特定场景下,`data_parallel`的计算逻辑和`stand_alone`是不一样的,`auto_parallel`在任何场景下都是和`stand_alone`的计算逻辑保持一致。而`data_parallel`的收敛效果可能更好,因此MindSpore提供了`cross_barch`这个参数,可以使`auto_parallel`的计算逻辑和`data_parallel`保持一致,用户可通过`add_prim_attr`方法进行配置,默认值是False。 + +代码样例如下: + +```python +from mindspore.ops import operations as P + +mul = P.Mul().add_prim_attr("cross_batch", True) +``` + +### fusion + +出于性能考虑,MindSpore提供了`AllGather`和`AllReduce`算子的融合功能,`fusion`值相同的同类算子(算子类型以及通信域相同)会融合在一起,`fusion`的值必须大于等于0,且当`fusion`值为0时,表示不融合。 + +代码样例如下: + +```python +from mindspore.ops import operations as P + +allreduce1 = P.AllReduce().add_prim_attr("fusion", 1) +allreduce2 = P.AllReduce().add_prim_attr("fusion", 1) +``` + +## 数据并行 数据并行是对数据进行切分的并行模式,一般按照batch维度切分,将数据分配到各个计算单元(worker)中,进行模型计算。在数据并行模式下,数据集要以数据并行的方式导入,并且`parallel_mode`要设置为`data_parallel`。 @@ -293,10 +316,11 @@ rank_id = get_rank() 。 -### 自动并行 +## 自动并行 自动并行是融合了数据并行、模型并行及混合并行的一种分布式并行模式,可以自动建立代价模型,为用户选择一种并行模式。其中,代价模型指基于内存的计算开销和通信开销对训练时间建模,并设计高效的算法找到训练时间较短的并行策略。在自动并行模式下,数据集也要以数据并行的方式导入,并且`parallel_mode`要设置为`auto_parallel`。 具体用例请参考MindSpore分布式并行训练教程: -。 \ No newline at end of file +。 + diff --git a/api/source_zh_cn/programming_guide/callback.md b/api/source_zh_cn/programming_guide/callback.md index 5717a72a47b5e06350f4f9379e215ebdc8f0411c..e971af34901a6eaf1a22974efa451ddb46c9a134 100644 --- a/api/source_zh_cn/programming_guide/callback.md +++ b/api/source_zh_cn/programming_guide/callback.md @@ -1,10 +1,11 @@ -# MindSpore Callback回调函数机制 +# Callback机制 -- [概述](#概述) -- [MindSpore内置回调函数](#mindspore内置回调函数) -- [MindSpore自定义回调函数](#mindspore自定义回调函数) +- [Callback机制](#callback机制) + - [概述](#概述) + - [MindSpore内置回调函数](#mindspore内置回调函数) + - [MindSpore自定义回调函数](#mindspore自定义回调函数) @@ -13,17 +14,13 @@ ## 概述 Callback回调函数在MindSpore中被实现为一个类,Callback机制类似于一种监控模式,可以帮助用户观察网络训练过程中各种参数的变化情况和网络内部的状态,还可以根据用户的指定,在达到特定条件后执行相应的操作,在训练过程中,Callback列表会按照定义的顺序执行Callback函数。Callback机制让用户可以及时有效地掌握网络模型的训练状态,并根据需要随时作出调整,可以极大地提升用户的开发效率。 -在MindSpore中,Callback机制一般用在网络训练过程`model.train`中,用户可以通过配置不同的内置回调函数传入不同的参数,从而实现各种功能。例如,可以通过`LossMonitor`监控每一个epoch的loss变化情况,通过checkpoint保存网络参数和模型进行再训练或推理,通过`TimeMonitor`监控每一个epoch,每一个step的训练时间,以及提前终止训练,动态调整参数等。 +在MindSpore中,Callback机制一般用在网络训练过程`model.train`中,用户可以通过配置不同的内置回调函数传入不同的参数,从而实现各种功能。例如,可以通过`LossMonitor`监控每一个epoch的loss变化情况,通过`ModelCheckpoint`保存网络参数和模型进行再训练或推理,通过`TimeMonitor`监控每一个epoch,每一个step的训练时间,以及提前终止训练,动态调整参数等。 ## MindSpore内置回调函数 - ModelCheckpoint - 与模型训练过程相结合,保存训练后的模型和网络参数,方便进行再推理或再训练。 - -- CheckpointConfig - - 一般与`ModelCheckpoint`配合使用,可自定义配置checkpoint的保存策略。 + 与模型训练过程相结合,保存训练后的模型和网络参数,方便进行再推理或再训练。`ModelCheckpoint`一般与`CheckpointConfig`配合使用,`CheckpointConfig`是一个参数配置类,可自定义配置checkpoint的保存策略。 详细内容,请参考[Checkpoint官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/use/saving_and_loading_model_parameters.html)。 diff --git a/api/source_zh_cn/programming_guide/cell.md b/api/source_zh_cn/programming_guide/cell.md index 8570e80c0a9d38524dcd8e21bd196ab4ef1da9b1..eb6d8a88488bc3115e349cdc9f7d8a259927d24a 100644 --- a/api/source_zh_cn/programming_guide/cell.md +++ b/api/source_zh_cn/programming_guide/cell.md @@ -1,58 +1,72 @@ -# cell模块概述 +# Cell构建及其子类 -- [cell模块概述](#cell模块概述) - - [概念用途](#概念用途) - - [关键成员函数](#关键成员函数) - - [模型层](#模型层) - - [损失函数](#损失函数) - - [网络构造](#Cell构造自定义网络) +- [Cell构建及其子类](#cell构建及其子类) + - [概述](#概述) + - [关键成员函数](#关键成员函数) + - [construct方法](#construct方法) + - [parameters_dict](#parameters_dict) + - [cells_and_names](#cells_and_names) + - [set_grad](#set_grad) + - [nn模块与ops模块的关系](#nn模块与ops模块的关系) + - [模型层](#模型层) + - [内置模型层](#内置模型层) + - [应用实例](#应用实例) + - [损失函数](#损失函数) + - [内置损失函数](#内置损失函数) + - [应用实例](#应用实例-1) + - [优化算法](#优化算法) + - [构建自定义网络](#构建自定义网络) -## 概念用途 + -MindSpose的Cell类是构建所有网络的基类,也是网络的基本单元。当用户需要自定义网络时,需要继承Cell类,并重写__init_方法和contruct方法。 +## 概述 -损失函数,优化器和模型层等本质上也属于网络结构,也需要继承Cell类才能实现功能,同样用户也可以根据业务需求自定义这部分内容。 +MindSpore的`Cell`类是构建所有网络的基类,也是网络的基本单元。当用户需要自定义网络时,需要继承`Cell`类,并重写`__init__`方法和`contruct`方法。 -本节内容首先将会介绍Cell类的关键成员函数,然后介绍基于Cell实现的MindSpore内置损失函数,优化器和模型层及使用方法,最后通过实例介绍 -如何利用Cell类构建自定义网络。 +损失函数、优化器和模型层等本质上也属于网络结构,也需要继承`Cell`类才能实现功能,同样用户也可以根据业务需求自定义这部分内容。 + +本节内容首先将会介绍`Cell`类的关键成员函数,然后介绍基于`Cell`实现的MindSpore内置损失函数、优化器和模型层及使用方法,最后通过实例介绍如何利用`Cell`类构建自定义网络。 ## 关键成员函数 ### construct方法 -Cell类重写了__call__方法,在cell类的实例被调用时,会执行contruct方法。网络结构在contruct方法里面定义。 +`Cell`类重写了`__call__`方法,在`Cell`类的实例被调用时,会执行`contruct`方法。网络结构在`contruct`方法里面定义。 + +下面的样例中,我们构建了一个简单的网络实现卷积计算功能。构成网络的算子在`__init__`中定义,在`contruct`方法里面使用,用例的网络结构为`Conv2d`->`BiasAdd`。 -下面的样例中,我们构建了一个简单的网络。用例的网络结构为Conv2d->BatchNorm2d->ReLU->Flatten->Dense。 -在construct方法中,x为输入数据, out是经过网络的每层计算后得到的计算结果。 +在`construct`方法中,`x`为输入数据,`output`是经过网络结构计算后得到的计算结果。 ``` +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindspore.common.parameter import Parameter +from mindspore.common.initializer import initializer + class Net(nn.Cell): - def __init__(self): + def __init__(self, in_channels=10, out_channels=20, kernel_size=3): super(Net, self).__init__() - self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal') - self.bn = nn.BatchNorm2d(64) - self.relu = nn.ReLU() - self.flatten = nn.Flatten() - self.fc = nn.Dense(64 * 222 * 222, 3) + self.conv2d = P.Conv2D(out_channels, kernel_size) + self.bias_add = P.BiasAdd() + self.weight = Parameter( + initializer('normal', [out_channels, in_channels, kernel_size, kernel_size]), + name='conv.weight') def construct(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - x = self.flatten(x) - out = self.fc(x) - return out + output = self.conv2d(x, self.weight) + output = self.bias_add(output, self.bias) + return output ``` ### parameters_dict -parameters_dict方法识别出网络结构中所有的参数,返回一个以key为参数名,value为参数值的OrderedDict()。 +`parameters_dict`方法识别出网络结构中所有的参数,返回一个以key为参数名,value为参数值的`OrderedDict`。 -Cell类中返回参数的方法还有许多,例如get_parameters(),trainable_params()等, 具体使用方法可以参见MindSpore API手册。 +`Cell`类中返回参数的方法还有许多,例如`get_parameters`、`trainable_params`等,具体使用方法可以参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Cell)。 代码样例如下: @@ -63,120 +77,191 @@ print(result.keys()) print(result['conv.weight']) ``` -样例中的Net()采用上文构造网络的用例,打印了网络中是所有参数的名字和conv.weight参数的结果。 +样例中的`Net`采用上文构造网络的用例,打印了网络中所有参数的名字和`conv.weight`参数的结果。 -运行结果如下: +输出如下: ``` -odict_keys(['conv.weight', 'bn.moving_mean', 'bn.moving_variance', 'bn.gamma', 'bn.beta', 'fc.weight', 'fc.bias']) -Parameter (name=conv.weight, value=[[[[ 1.07402597e-02 7.70052336e-03 5.55867562e-03] - [-3.21971579e-03 -3.75304517e-04 -8.73021083e-04] -... -[-1.81201510e-02 -1.31190736e-02 -4.27651079e-03]]]]) +odict_keys(['conv.weight']) +Parameter (name=conv.weight, value=[[[[-3.95042636e-03 1.08830128e-02 -6.51786150e-03] + [ 8.66129529e-03 7.36288540e-03 -4.32638079e-03] + [-1.47628486e-02 8.24100431e-03 -2.71035335e-03]] + ...... + [ 1.58852488e-02 -1.03505487e-02 1.72988791e-02]]]]) ``` ### cells_and_names -cells_and_names方法是一个迭代器,返回网络中每个cell的名字和它的内容本身。 +`cells_and_names`方法是一个迭代器,返回网络中每个`Cell`的名字和它的内容本身。 + +用例简单实现了获取与打印每个`Cell`名字的功能,其中根据网络结构可知,存在1个`Cell`为`nn.Conv2d`。 -用例简单实现了网络的cell获取与打印每个cell名字的功能,其中根据上文网络结构可知,存在五个cell分别是'conv','bn','relu','flatten','fc'。 +其中`nn.Conv2d`是MindSpore以`Cell`为基类封装好的一个卷积层,其具体内容将在“模型层”中进行介绍。 代码样例如下: ``` -net = Net() +import mindspore.nn as nn + +class Net1(nn.Cell): + def __init__(self): + super(Net1, self).__init__() + self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal') + + def construct(self, x): + out = self.conv(x) + return out + +net = Net1() names = [] for m in net.cells_and_names(): + print(m) names.append(m[0]) if m[0] else None +print('-------names-------') print(names) ``` -运行结果: +输出如下: ``` -['conv', 'bn', 'relu', 'flatten', 'fc'] +('', Net1< + (conv): Conv2d + >) +('conv', Conv2d) +-------names------- +['conv'] ``` + +### set_grad + +`set_grad`接口功能是使用户构建反向网络,在不传入参数调用时,默认设置`requires_grad`为True,需要在计算网络反向的场景中使用。 + +以`TrainOneStepCell`为例,其接口功能是使网络进行单步训练,需要计算网络反向,因此初始化方法里需要使用`set_grad`。 + +`TrainOneStepCell`部分代码如下: +``` +class TrainOneStepCell(Cell): + def __init__(self, network, optimizer, sens=1.0): + super(TrainOneStepCell, self).__init__(auto_prefix=False) + self.network = network + self.network.set_grad() + ...... +``` + +如果用户使用`TrainOneStepCell`等类似接口无需使用`set_grad`, 内部已封装实现。 + +若用户需要自定义此类训练功能的接口,需要在其内部调用,或者在外部设置`network.set_grad`。 + +## nn模块与ops模块的关系 + +MindSpore的nn模块是Python实现的模型组件,是对低阶API的封装,主要包括各种模型层、损失函数、优化器等。 + +同时nn也提供了部分与`Primitive`算子同名的接口,主要作用是对`Primitive`算子进行进一步封装,为用户提供更友好的API。 + +重新分析上文介绍`construct`方法的用例,此用例是MindSpore的`nn.Conv2d`源码简化内容,内部会调用`P.Conv2D`。`nn.Conv2d`卷积API增加输入参数校验功能并判断是否`bias`等,是一个高级封装的模型层。 +``` +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindspore.common.parameter import Parameter +from mindspore.common.initializer import initializer + +class Net(nn.Cell): + def __init__(self, in_channels=10, out_channels=20, kernel_size=3): + super(Net, self).__init__() + self.conv2d = P.Conv2D(out_channels, kernel_size) + self.bias_add = P.BiasAdd() + self.weight = Parameter( + initializer('normal', [out_channels, in_channels, kernel_size, kernel_size]), + name='conv.weight') + + def construct(self, x): + output = self.conv2d(x, self.weight) + output = self.bias_add(output, self.bias) + return output +``` + ## 模型层 -在讲述了Cell的使用方法后可知,MindSpore能够以Cell为基类构造网络结构。 +在讲述了`Cell`的使用方法后可知,MindSpore能够以`Cell`为基类构造网络结构。 -为了方便业界需求及用户使用方便,MindSpore框架内置了大量的模型层,用户可以通过接口直接调用。 +为了方便用户的使用,MindSpore框架内置了大量的模型层,用户可以通过接口直接调用。 -同样,用户也可以自定义模型,此内容在cell自定义构建中介绍。 +同样,用户也可以自定义模型,此内容在“构建自定义网络”中介绍。 ### 内置模型层 -MindSpore框架在nn的layer层内置了丰富的接口,主要内容如下: +MindSpore框架在`mindspore.nn`的layer层内置了丰富的接口,主要内容如下: -- 激活层: +- 激活层 激活层内置了大量的激活函数,在定义网络结构中经常使用。激活函数为网络加入了非线性运算,使得网络能够拟合效果更好。 - 主要接口有Softmax,Relu,Elu,Tanh,Sigmoid等。 + 主要接口有`Softmax`、`Relu`、`Elu`、`Tanh`、`Sigmoid`等。 -- 基础层: +- 基础层 - 基础层实现了网络中一些常用的基础结构,例如全连接层,Onehot编码,Dropout,平铺层等都在此部分实现。 + 基础层实现了网络中一些常用的基础结构,例如全连接层、Onehot编码、Dropout、平铺层等都在此部分实现。 - 主要接口有Dense,Flatten,Dropout,Norm,OneHot等。 + 主要接口有`Dense`、`Flatten`、`Dropout`、`Norm`、`OneHot`等。 -- 容器层: +- 容器层 - 容器层主要功能是实现一些存储多个cell的数据结构。 + 容器层主要功能是实现一些存储多个Cell的数据结构。 - 主要接口有SequentialCell,CellList等。 + 主要接口有`SequentialCell`、`CellList`等。 -- 卷积层: +- 卷积层 - 卷积层提供了一些卷积计算的功能,如普通卷积,深度卷积和卷积转置等。 + 卷积层提供了一些卷积计算的功能,如普通卷积、深度卷积和卷积转置等。 - 主要接口有Conv2d,Conv1d,Conv2dTranspose,DepthwiseConv2d,Conv1dTranspose等。 + 主要接口有`Conv2d`、`Conv1d`、`Conv2dTranspose`、`Conv1dTranspose`等。 -- 池化层: +- 池化层 池化层提供了平均池化和最大池化等计算的功能。 - 主要接口有AvgPool2d,MaxPool2d,AvgPool1d。 + 主要接口有`AvgPool2d`、`MaxPool2d`和`AvgPool1d`。 -- 嵌入层: +- 嵌入层 嵌入层提供word embedding的计算功能,将输入的单词映射为稠密向量。 - 主要接口有:Embedding,EmbeddingLookup,EmbeddingLookUpSplitMode等。 + 主要接口有`Embedding`、`EmbeddingLookup`、`EmbeddingLookUpSplitMode`等。 -- 长短记忆循环层: +- 长短记忆循环层 - 长短记忆循环层提供LSTM计算功能。其中LSTM内部会调用LSTMCell接口, LSTMCell是一个LSTM单元, - 对一个LSTM层做运算,当涉及多LSTM网络层运算时,使用LSTM接口。 + 长短记忆循环层提供LSTM计算功能。其中`LSTM`内部会调用`LSTMCell`接口,`LSTMCell`是一个LSTM单元,对一个LSTM层做运算,当涉及多LSTM网络层运算时,使用`LSTM`接口。 - 主要接口有:LSTM,LSTMCell。 + 主要接口有`LSTM`和`LSTMCell`。 -- 标准化层: +- 标准化层 标准化层提供了一些标准化的方法,即通过线性变换等方式将数据转换成均值和标准差。 - 主要接口有:BatchNorm1d,BatchNorm2d,LayerNorm,GroupNorm,GlobalBatchNorm等。 + 主要接口有`BatchNorm1d`、`BatchNorm2d`、`LayerNorm`、`GroupNorm`、`GlobalBatchNorm`等。 -- 数学计算层: +- 数学计算层 数据计算层提供一些算子拼接而成的计算功能,例如数据生成和一些数学计算等。 - 主要接口有ReduceLogSumExp,Range,LinSpace,LGamma等。 + 主要接口有`ReduceLogSumExp`、`Range`、`LinSpace`、`LGamma`等。 -- 图片层: +- 图片层 图片计算层提供了一些矩阵计算相关的功能,将图片数据进行一些变换与计算。 - 主要接口有ImageGradients,SSIM,MSSSIM,PSNR,CentralCrop等。 + 主要接口有`ImageGradients`、`SSIM`、`MSSSIM`、`PSNR`、`CentralCrop`等。 -- 量化层: +- 量化层 量化是指将数据从float的形式转换成一段数据范围的int类型,所以量化层提供了一些数据量化的方法和模型层结构封装。 - - 主要接口有Conv2dBnAct,DenseBnAct,Conv2dBnFoldQuant,LeakyReLUQuant等。 + + 主要接口有`Conv2dBnAct`、`DenseBnAct`、`Conv2dBnFoldQuant`、`LeakyReLUQuant`等。 ### 应用实例 -MindSpore的模型层在mindspore.nn下,使用方法如下所示: +MindSpore的模型层在`mindspore.nn`下,使用方法如下所示: ``` +import mindspore.nn as nn + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -195,44 +280,37 @@ class Net(nn.Cell): return out ``` -依然是上述网络构造的用例,从这个用例中可以看出,程序调用了Conv2d,BatchNorm2d,ReLU,Flatten和Dense模型层的接口。 -在Net初始化方法里面被定义,然后在construct方法里面真正运行,这些模型层接口有序的连接,形成一个可执行的网络。 +依然是上述网络构造的用例,从这个用例中可以看出,程序调用了`Conv2d`、`BatchNorm2d`、`ReLU`、`Flatten`和`Dense`模型层的接口。 + +在`Net`初始化方法里被定义,然后在`construct`方法里真正运行,这些模型层接口有序的连接,形成一个可执行的网络。 ## 损失函数 -目前MindSpore主要支持的损失函数有L1Loss,MSELoss,SmoothL1Loss,SoftmaxCrossEntropyWithLogits,SoftmaxCrossEntropyExpand -和CosineEmbeddingLoss。 +目前MindSpore主要支持的损失函数有`L1Loss`、`MSELoss`、`SmoothL1Loss`、`SoftmaxCrossEntropyWithLogits`和`CosineEmbeddingLoss`。 -MindSpore的损失函数全部是Cell的子类实现,所以也支持用户自定义损失函数,其构造方法在cell自定义构建中进行介绍。 +MindSpore的损失函数全部是`Cell`的子类实现,所以也支持用户自定义损失函数,其构造方法在“构建自定义网络”中进行介绍。 ### 内置损失函数 -- L1Loss: +- L1Loss - 计算两个输入数据的绝对值误差,用于回归模型。reduction参数默认值为mean,返回loss平均值结果, -若reduction值为sum,返回loss累加结果,若reduction值为none,返回每个loss的结果。 + 计算两个输入数据的绝对值误差,用于回归模型。`reduction`参数默认值为mean,返回loss平均值结果,若`reduction`值为sum,返回loss累加结果,若`reduction`值为none,返回每个loss的结果。 -- MSELoss: +- MSELoss - 计算两个输入数据的平方误差,用于回归模型。reduction参数默认值为mean,返回loss平均值结果, -若reduction值为sum,返回loss累加结果,若reduction值为none,返回每个loss的结果。 + 计算两个输入数据的平方误差,用于回归模型。`reduction`参数同`L1Loss`。 -- SmoothL1Loss: +- SmoothL1Loss - SmoothL1Loss为平滑L1损失函数,用于回归模型,阈值sigma默认参数为1。 + `SmoothL1Loss`为平滑L1损失函数,用于回归模型,阈值`sigma`默认参数为1。 +` +- SoftmaxCrossEntropyWithLogits -- SoftmaxCrossEntropyWithLogits: - - 交叉熵损失函数,用于分类模型。当标签数据不是one-hot编码形式时,需要输入参数sparse为True。reduction参数 - 与L1Loss一致。 + 交叉熵损失函数,用于分类模型。当标签数据不是one-hot编码形式时,需要输入参数`sparse`为True。`reduction`参数默认值为none,其参数含义同`L1Loss`。 -- SoftmaxCrossEntropyExpand: - - 交叉熵扩展损失函数,用于分类模型。当标签数据不是one-hot编码形式时,需要输入参数sparse为True。 +- CosineEmbeddingLoss -- CosineEmbeddingLoss: - - CosineEmbeddingLoss用于衡量两个输入相似程度,用于分类模型。margin默认为0.0,reduction参数与L1Loss一致 + `CosineEmbeddingLoss`用于衡量两个输入相似程度,用于分类模型。`margin`默认为0.0,`reduction`参数同`L1Loss`。 ### 应用实例 @@ -249,28 +327,33 @@ target_data = Tensor(np.array([[0, 2, 5], [3, 1, 1]]).astype(np.float32)) print(loss(input_data, target_data)) ``` -此用例构造了两个Tensor数据,利用nn.L1Loss()接口定义了L1Loss,将input_data和target_data传入loss, -执行L1Loss的计算,结果为1.5。若loss = nn.L1Loss(reduction='sum'),则结果为9.0。 -若loss = nn.L1Loss(reduction='none'),结果为[[1. 0. 2.] [1. 2. 3.]] +输出结果: +``` +1.5 +``` + +此用例构造了两个Tensor数据,利用`nn.L1Loss`接口定义了loss,将`input_data`和`target_data`传入loss,执行L1Loss的计算,结果为1.5。若loss = nn.L1Loss(reduction='sum'),则结果为9.0。若loss = nn.L1Loss(reduction='none'),结果为[[1. 0. 2.] [1. 2. 3.]]。 + +## 优化算法 +`mindspore.nn.optim`是MindSpore框架中实现各种优化算法的模块,详细说明参见[优化算法](https://www.mindspore.cn/api/zh-CN/master/programming_guide/optim.html)。 -## Cell构造自定义网络 +## 构建自定义网络 -无论是网络结构,还是前文提到的模型层,损失函数和优化器等,本质上都是一个Cell,因此都可以自定义实现。 +无论是网络结构,还是前文提到的模型层、损失函数和优化器等,本质上都是一个`Cell`,因此都可以自定义实现。 -首先构造一个继承cell的子类,然后在__init__方法里面定义算子和模型层等,然后在construct方法里面构造网络结构。 +首先构造一个继承`Cell`的子类,然后在`__init__`方法里面定义算子和模型层等,在`construct`方法里面构造网络结构。 -以lenet5网络为例,在__init__方法中定义了卷积层,池化层和全连接层等结构单元,然后在construct方法将定义的内容连接在一起, -形成一个完整lenet5的网络结构。 +以LeNet网络为例,在`__init__`方法中定义了卷积层,池化层和全连接层等结构单元,然后在`construct`方法将定义的内容连接在一起,形成一个完整LeNet的网络结构。 -lenet5网络实现方式如下所示: +LeNet网络实现方式如下所示: ``` import mindspore.nn as nn class LeNet5(nn.Cell): def __init__(self): super(LeNet5, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") + self.conv1 = nn.Conv2d(1, 6, 5, pad_mode="valid") self.conv2 = nn.Conv2d(6, 16, 5, pad_mode="valid") self.fc1 = nn.Dense(16 * 5 * 5, 120) self.fc2 = nn.Dense(120, 84) diff --git a/api/source_zh_cn/programming_guide/compute_component.rst b/api/source_zh_cn/programming_guide/compute_component.rst new file mode 100644 index 0000000000000000000000000000000000000000..58d676eacf136ff3e6af3cdd0e85053ee616db62 --- /dev/null +++ b/api/source_zh_cn/programming_guide/compute_component.rst @@ -0,0 +1,10 @@ +计算组件 +=========== + +.. toctree:: + :maxdepth: 1 + + operator + parameter + cell + network_component \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/context.md b/api/source_zh_cn/programming_guide/context.md new file mode 100644 index 0000000000000000000000000000000000000000..c36eec545d07ce9b0f3d962fa496d623c535e5a3 --- /dev/null +++ b/api/source_zh_cn/programming_guide/context.md @@ -0,0 +1,137 @@ +# 运行管理 + + + +- [运行管理](#运行管理) + - [概述](#概述) + - [执行模式管理](#执行模式管理) + - [模式选择](#模式选择) + - [模式切换](#模式切换) + - [硬件管理](#硬件管理) + - [分布式管理](#分布式管理) + - [维测管理](#维测管理) + - [采集profiling数据](#采集profiling数据) + - [异步数据dump功能](#异步数据dump功能) + - [print算子落盘](#print算子落盘) + + + + + +## 概述 +初始化网络之前要配置context参数,用于控制程序执行的策略。比如选择执行模式、选择执行后端、配置分布式相关参数等。按照context参数设置实现的不同功能,可以将其分为执行模式管理、硬件管理、分布式管理和维测管理等。 + +## 执行模式管理 +MindSpore支持PyNative和Graph这两种运行模式: + +- `PYNATIVE_MODE`:动态图模式,将神经网络中的各个算子逐一下发执行,方便用户编写和调试神经网络模型。 + +- `GRAPH_MODE`:静态图模式或者图模式,将神经网络模型编译成一整张图,然后下发执行。该模式利用图优化等技术提高运行性能,同时有助于规模部署和跨平台运行。 + +### 模式选择 +通过设置可以控制程序运行的模式,默认情况下,MindSpore处于PyNative模式。 + +代码样例如下: +```python +from mindspore import context +context.set_context(mode=context.GRAPH_MODE) +``` + +### 模式切换 +实现两种模式之间的切换。 + +MindSpore处于PYNATIVE模式时,可以通过`context.set_context(mode=context.GRAPH_MODE)`切换为Graph模式;同样地,MindSpore处于Graph模式时,可以通过 `context.set_context(mode=context.PYNATIVE_MODE)`切换为PyNative模式。 + +代码样例如下: +```python +import numpy as np +import mindspore.nn as nn +from mindspore import context, Tensor + +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + +conv = nn.Conv2d(3, 4, 3, bias_init='zeros') +input_data = Tensor(np.ones([1, 3, 5, 5]).astype(np.float32)) +conv(input_data) +context.set_context(mode=context.PYNATIVE_MODE) +conv(input_data) +``` + +上面的例子先将运行模式设置为`GRAPH_MODE`模式,然后将模式切换为`PYNATIVE_MODE`模式,实现了模式的切换。 + +## 硬件管理 +硬件管理部分主要包括`device_target`和`device_id`两个参数。 + +- `device_target`: 用于设置目标设备,支持Ascend、GPU和CPU,可以根据实际环境情况设置。 + +- `device_id`: 表示卡物理序号,即卡所在机器中的实际序号。如果目标设备为Ascend,且规格为N*Ascend(其中N>1,如8*Ascend),在非分布式模式执行的情况下,为了避免设备的使用冲突,可以通过设置`device_id`决定程序执行的device编号,该编号范围为:0 ~ 服务器总设备数量-1,服务器总设备数量不能超过4096,默认为设备0。 + +> 在GPU和CPU上,设置`device_id`参数无效。 + +代码样例如下: +```python +from mindspore import context +context.set_context(device_target="Ascend", device_id=6) +``` + +## 分布式管理 +context中有专门用于配置并行训练参数的接口:context.set_auto_parallel_context,该接口必须在初始化网络之前调用。 + +- `parallel_mode`:分布式并行模式,默认为单机模式`ParallelMode.STAND_ALONE`。可选数据并行`ParallelMode.DATA_PARALLEL`及自动并行`ParallelMode.AUTO_PARALLEL`。 + +- `gradients_mean`:反向计算时,框架内部会将数据并行参数分散在多台机器的梯度值进行收集,得到全局梯度值后再传入优化器中更新。默认值为`False`,设置为True对应`allreduce_mean`操作,False对应`allreduce_sum`操作。 + +- `enable_parallel_optimizer`:开发中特性。打开优化器模型并行开关,通过拆分权重到各卡分别进行更新再同步的方式以提升性能。该参数目前只在数据并行模式和参数量大于机器数时有效,支持`Lamb`和`Adam`优化器。 + +> `device_num`和`global_rank`建议采用默认值,框架内会调用HCCL接口获取。 + +代码样例如下: +```python +from mindspore import context +from mindspore.context import ParallelMode +context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL, gradients_mean=True) +``` + +> 分布式并行训练详细介绍可以查看[分布式并行训练](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/distributed_training_tutorials.html)。 + +## 维测管理 +为了方便维护和定位问题,context提供了大量维测相关的参数配置,如采集profiling数据、异步数据dump功能和print算子落盘等。 + +### 采集profiling数据 +系统支持在训练过程中采集profiling数据,然后通过profiling工具进行性能分析。当前支持采集的profiling数据包括: + +- `enable_profiling`:是否开启profiling功能。设置为True,表示开启profiling功能,从enable_options读取profiling的采集选项;设置为False,表示关闭profiling功能,仅采集training_trace。 + +- `enable_options`:profiling采集选项,取值如下,支持采集多项数据。training_trace:采集迭代轨迹数据,即训练任务及AI软件栈的软件信息,实现对训练任务的性能分析,重点关注数据增强、前后向计算、梯度聚合更新等相关数据;task_trace:采集任务轨迹数据,即昇腾910处理器HWTS/AICore的硬件信息,分析任务开始、结束等信息;op_trace:采集单算子性能数据。格式:['op_trace','task_trace','training_trace'] + +代码样例如下: +```python +from mindspore import context +context.set_context(enable_profiling=True, profiling_options="training_trace") +``` + +### 异步数据dump功能 +在Ascend环境上执行训练,当训练结果和预期有偏差时,可以通过异步数据dump功能保存算子的输入输出进行调试。 + +代码样例如下: +```python +from mindspore import context +context.set_context(save_graphs=True) +``` + +> 详细的调试方法可以查看[异步数据Dump功能介绍](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/customized_debugging_information.html#dump)。 + +### print算子落盘 +默认情况下,MindSpore的自研print算子可以将用户输入的Tensor或字符串信息打印出来,支持多字符串输入,多Tensor输入和字符串与Tensor的混合输入,输入参数以逗号隔开。 + +> Print打印功能可以查看[Print算子功能介绍](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/customized_debugging_information.html#print)。 + +- `print_file_path`:可以将print算子数据保存到文件,同时关闭屏幕打印功能。如果保存的文件已经存在,则会给文件添加时间戳后缀。数据保存到文件可以解决数据量较大时屏幕打印数据丢失的问题。 + +代码样例如下: +```python +from mindspore import context +context.set_context(print_file_path="print.pb") +``` + +> context接口详细介绍可以查看[mindspore.context](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.context.html)。 \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/data_pipeline.rst b/api/source_zh_cn/programming_guide/data_pipeline.rst new file mode 100644 index 0000000000000000000000000000000000000000..62f55a9e10f09862341f28c075c606735ad82081 --- /dev/null +++ b/api/source_zh_cn/programming_guide/data_pipeline.rst @@ -0,0 +1,13 @@ +数据管道 +=========== + +.. toctree:: + :maxdepth: 1 + + dataset_loading + sampler + pipeline + augmentation + tokenizer + dataset_conversion + auto_augmentation diff --git a/tutorials/source_zh_cn/advanced_use/auto_data_acceleration.rst b/api/source_zh_cn/programming_guide/data_type.rst similarity index 30% rename from tutorials/source_zh_cn/advanced_use/auto_data_acceleration.rst rename to api/source_zh_cn/programming_guide/data_type.rst index 003693d04b59d55bc6673bd1fe49c7550a296bac..ee35ad4275c43a2c79162e5661fcfb6934c6edf4 100644 --- a/tutorials/source_zh_cn/advanced_use/auto_data_acceleration.rst +++ b/api/source_zh_cn/programming_guide/data_type.rst @@ -1,8 +1,8 @@ -自动数据加速 -======== +数据类型 +=========== .. toctree:: :maxdepth: 1 - data_processing_acceleration - cache + dtype + tensor \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/dataset_conversion.md b/api/source_zh_cn/programming_guide/dataset_conversion.md index 59e68a061ff4a680539cd85a9b9973622b189ee5..3230b4c19669e0b7f9c9670ea55dec658d156d29 100644 --- a/api/source_zh_cn/programming_guide/dataset_conversion.md +++ b/api/source_zh_cn/programming_guide/dataset_conversion.md @@ -4,16 +4,14 @@ - [MindSpore数据格式转换](#mindspore数据格式转换) - [概述](#概述) - - [非标准数据集转换MindRecord](#非标准数据集转换mindrecord) - - [CV类数据集](#cv类数据集) - - [NLP类数据集](#nlp类数据集) - - [常用数据集转换MindRecord](#常用数据集转换mindrecord) - - [转换CIFAR-10数据集](#转换cifar-10数据集) - - [转换CIFAR-100数据集](#转换cifar-100数据集) - - [转换ImageNet数据集](#转换imagenet数据集) - - [转换MNIST数据集](#转换mnist数据集) - - [转换CSV数据集](#转换csv数据集) - - [转换TFRecord数据集](#转换tfrecord数据集) + - [非标准数据集转换MindRecord](#非标准数据集转换mindrecord) + - [转换CV类数据集](#转换cv类数据集) + - [转换NLP类数据集](#转换nlp类数据集) + - [常用数据集转换MindRecord](#常用数据集转换mindrecord) + - [转换CIFAR-10数据集](#转换cifar-10数据集) + - [转换ImageNet数据集](#转换imagenet数据集) + - [转换CSV数据集](#转换csv数据集) + - [转换TFRecord数据集](#转换tfrecord数据集) @@ -21,162 +19,136 @@ ## 概述 -用户可以将非标准的数据集和常见的经典数据集转换为MindSpore数据格式即MindRecord,从而方便地加载到MindSpore中进行训练。同时,MindSpore在部分场景做了性能优化,使用MindSpore数据格式可以获得更好的性能体验。 +用户可以将非标准的数据集和常用的数据集转换为MindSpore数据格式,即MindRecord,从而方便地加载到MindSpore中进行训练。同时,MindSpore在部分场景做了性能优化,使用MindRecord可以获得更好的性能。 ## 非标准数据集转换MindRecord -主要介绍如何将CV类数据和NLP类数据转换为MindRecord格式,并通过MindDataset实现MindRecoed格式文件的读取。 +下面主要介绍如何将CV类数据和NLP类数据转换为MindRecord,并通过`MindDataset`实现MindRecord文件的读取。 -### CV类数据集 +### 转换CV类数据集 - ```python - """ - 示例说明:本示例主要介绍用户如何将自己的CV类数据集转换成MindRecoed格式,并使用MindDataset读取。 - 详细步骤: - 1. 创建一个包含100条记录的MindRecord文件,其样本包含file_name(字符串), label(整形), data(二进制)三个字段; - 2. 使用MindDataset读取MindRecord文件。 - """ +本示例主要介绍用户如何将自己的CV类数据集转换成MindRecord,并使用`MindDataset`读取。 - from io import BytesIO - import os - import mindspore.dataset as ds - from mindspore.mindrecord import FileWriter - import mindspore.dataset.transforms.vision.c_transforms as vision - from PIL import Image +本示例首先创建一个包含100条记录的MindRecord文件,其样本包含`file_name`(字符串)、 +`label`(整形)、 `data`(二进制)三个字段,然后使用`MindDataset`读取该MindRecord文件。 - ################################ 生成MindRecord文件 ################################ +```python +from io import BytesIO +import os +import mindspore.dataset as ds +from mindspore.mindrecord import FileWriter +import mindspore.dataset.vision.c_transforms as vision +from PIL import Image - mindrecord_filename = "test.mindrecord" +mindrecord_filename = "test.mindrecord" - # 如果存在MindRecord文件,则需要先删除 - if os.path.exists(mindrecord_filename): - os.remove(mindrecord_filename) - os.remove(mindrecord_filename + ".db") +if os.path.exists(mindrecord_filename): + os.remove(mindrecord_filename) + os.remove(mindrecord_filename + ".db") - # 创建写对象,将会生成 mindrecord_filename 和 mindrecord_filename.db 两个文件 - writer = FileWriter(file_name=mindrecord_filename, shard_num=1) +writer = FileWriter(file_name=mindrecord_filename, shard_num=1) - # 定义数据集Schema - cv_schema = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}} - writer.add_schema(cv_schema, "it is a cv dataset") +cv_schema = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}} +writer.add_schema(cv_schema, "it is a cv dataset") - # [可选]定义索引字段,只能是标量字段 - writer.add_index(["file_name", "label"]) +writer.add_index(["file_name", "label"]) - # 按Schema方式组织训练数据,并将其写入MindRecord文件 - # 此处使用Image.new(...)模拟图片数据,真实场景可以使用io接口读取磁盘上的图像数据 - data = [] - for i in range(100): # 模拟数据集有100个样本 - i += 1 +data = [] +for i in range(100): + i += 1 - sample = {} - white_io = BytesIO() - Image.new('RGB', (i*10, i*10), (255, 255, 255)).save(white_io, 'JPEG') # 图片大小可以不同 - image_bytes = white_io.getvalue() - sample['file_name'] = str(i) + ".jpg" # 对应file_name字段 - sample['label'] = i # 对应label字段 - sample['data'] = white_io.getvalue() # 对应data字段 + sample = {} + white_io = BytesIO() + Image.new('RGB', (i*10, i*10), (255, 255, 255)).save(white_io, 'JPEG') + image_bytes = white_io.getvalue() + sample['file_name'] = str(i) + ".jpg" + sample['label'] = i + sample['data'] = white_io.getvalue() - data.append(sample) - if i % 10 == 0: # 每10条样本做一次写操作 - writer.write_raw_data(data) - data = [] + data.append(sample) + if i % 10 == 0: + writer.write_raw_data(data) + data = [] - if data: # 写入可能剩余的数据 - writer.write_raw_data(data) +if data: + writer.write_raw_data(data) + +writer.commit() + +data_set = ds.MindDataset(dataset_file=mindrecord_filename) +decode_op = vision.Decode() +data_set = data_set.map(operations=decode_op, input_columns=["data"], num_parallel_workers=2) +count = 0 +for item in data_set.create_dict_iterator(output_numpy=True): + print("sample: {}".format(item)) + count += 1 +print("Got {} samples".format(count)) +``` + +### 转换NLP类数据集 + +本示例主要介绍用户如何将自己的NLP类数据集转换成MindRecord,并使用`MindDataset`读取。为了方便展示,此处略去了将文本转换成字典序的预处理过程。 + +本示例首先创建一个包含100条记录的MindRecord文件,其样本包含八个字段,均为整形数组,然后使用`MindDataset`读取该MindRecord文件。 + +```python +import os +import numpy as np +import mindspore.dataset as ds +from mindspore.mindrecord import FileWriter + +mindrecord_filename = "test.mindrecord" + +if os.path.exists(mindrecord_filename): + os.remove(mindrecord_filename) + os.remove(mindrecord_filename + ".db") + +writer = FileWriter(file_name=mindrecord_filename, shard_num=1) - writer.commit() # 关闭写入操作 - - ################################ 读取MindRecord文件 ################################ - - data_set = ds.MindDataset(dataset_file=mindrecord_filename) # 创建读取对象,默认开启shuffle - decode_op = vision.Decode() - data_set = data_set.map(input_columns=["data"], operations=decode_op, num_parallel_workers=2) # 解码data字段 - count = 0 - for item in data_set.create_dict_iterator(): # 循环读取MindRecord中所有数据 - print("sample: {}".format(item)) - count += 1 - print("Got {} samples".format(count)) - ``` - -### NLP类数据集 - -> 因为NLP类数据一般会经过预处理转换为字典序,此预处理过程不在本示例范围,该示例只演示转换后的字典序数据如何写入MindRecord。 - - ```python - """ - 示例说明:本示例主要介绍用户如何将自己的NLP类数据集转换成MindRecoed格式,并使用MindDataset读取。 - 详细步骤: - 1. 创建一个包含100条记录的MindRecord文件,其样本包含八个字段,均为整形数组; - 2. 使用MindDataset读取MindRecord文件。 - """ - - import os - import numpy as np - import mindspore.dataset as ds - from mindspore.mindrecord import FileWriter - - ################################ 生成MindRecord文件 ################################ - - mindrecord_filename = "test.mindrecord" - - # 如果存在MindRecord文件,则需要先删除 - if os.path.exists(mindrecord_filename): - os.remove(mindrecord_filename) - os.remove(mindrecord_filename + ".db") - - # 创建写对象,将会生成 mindrecord_filename 和 mindrecord_filename.db 两个文件 - writer = FileWriter(file_name=mindrecord_filename, shard_num=1) - - # 定义数据集Schema,此处认为文本已经转为字典序 - nlp_schema = {"source_sos_ids": {"type": "int64", "shape": [-1]}, - "source_sos_mask": {"type": "int64", "shape": [-1]}, - "source_eos_ids": {"type": "int64", "shape": [-1]}, - "source_eos_mask": {"type": "int64", "shape": [-1]}, - "target_sos_ids": {"type": "int64", "shape": [-1]}, - "target_sos_mask": {"type": "int64", "shape": [-1]}, - "target_eos_ids": {"type": "int64", "shape": [-1]}, - "target_eos_mask": {"type": "int64", "shape": [-1]}} - writer.add_schema(nlp_schema, "it is a preprocessed nlp dataset") - - # 按Schema方式组织训练数据,并将其写入MindRecord文件 - data = [] - for i in range(100): # 模拟数据集有100个样本 - i += 1 - - # 组织训练数据 - sample = {"source_sos_ids": np.array([i, i+1, i+2, i+3, i+4], dtype=np.int64), - "source_sos_mask": np.array([i*1, i*2, i*3, i*4, i*5, i*6, i*7], dtype=np.int64), - "source_eos_ids": np.array([i+5, i+6, i+7, i+8, i+9, i+10], dtype=np.int64), - "source_eos_mask": np.array([19, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64), - "target_sos_ids": np.array([28, 29, 30, 31, 32], dtype=np.int64), - "target_sos_mask": np.array([33, 34, 35, 36, 37, 38], dtype=np.int64), - "target_eos_ids": np.array([39, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64), - "target_eos_mask": np.array([48, 49, 50, 51], dtype=np.int64)} - - data.append(sample) - if i % 10 == 0: # 每10条样本做一次写操作 - writer.write_raw_data(data) - data = [] - - if data: # 写入可能剩余的数据 +nlp_schema = {"source_sos_ids": {"type": "int64", "shape": [-1]}, + "source_sos_mask": {"type": "int64", "shape": [-1]}, + "source_eos_ids": {"type": "int64", "shape": [-1]}, + "source_eos_mask": {"type": "int64", "shape": [-1]}, + "target_sos_ids": {"type": "int64", "shape": [-1]}, + "target_sos_mask": {"type": "int64", "shape": [-1]}, + "target_eos_ids": {"type": "int64", "shape": [-1]}, + "target_eos_mask": {"type": "int64", "shape": [-1]}} +writer.add_schema(nlp_schema, "it is a preprocessed nlp dataset") + +data = [] +for i in range(100): + i += 1 + + sample = {"source_sos_ids": np.array([i, i+1, i+2, i+3, i+4], dtype=np.int64), + "source_sos_mask": np.array([i*1, i*2, i*3, i*4, i*5, i*6, i*7], dtype=np.int64), + "source_eos_ids": np.array([i+5, i+6, i+7, i+8, i+9, i+10], dtype=np.int64), + "source_eos_mask": np.array([19, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64), + "target_sos_ids": np.array([28, 29, 30, 31, 32], dtype=np.int64), + "target_sos_mask": np.array([33, 34, 35, 36, 37, 38], dtype=np.int64), + "target_eos_ids": np.array([39, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64), + "target_eos_mask": np.array([48, 49, 50, 51], dtype=np.int64)} + + data.append(sample) + if i % 10 == 0: writer.write_raw_data(data) + data = [] - writer.commit() # 关闭写入操作 +if data: + writer.write_raw_data(data) - ################################ 读取MindRecord文件 ################################ +writer.commit() - data_set = ds.MindDataset(dataset_file=mindrecord_filename) # 创建读取对象,默认开启shuffle - count = 0 - for item in data_set.create_dict_iterator(): # 循环读取MindRecord中所有数据 - print("sample: {}".format(item)) - count += 1 - print("Got {} samples".format(count)) - ``` +data_set = ds.MindDataset(dataset_file=mindrecord_filename) +count = 0 +for item in data_set.create_dict_iterator(): + print("sample: {}".format(item)) + count += 1 +print("Got {} samples".format(count)) +``` ## 常用数据集转换MindRecord -MindSpore提供转换常见数据集的工具类,能够将常见的经典数据集转换为MindRecord格式。常见数据集及其对应的工具类列表如下。 +MindSpore提供转换常用数据集的工具类,能够将常用的数据集转换为MindRecord。常用数据集及其对应的工具类列表如下。 | 数据集 | 格式转换工具类 | | -------- | ------------ | @@ -191,9 +163,9 @@ MindSpore提供转换常见数据集的工具类,能够将常见的经典数 ### 转换CIFAR-10数据集 -用户可以通过`Cifar10ToMR`类,将CIFAR-10原始数据转换为MindRecord格式。 +用户可以通过`Cifar10ToMR`类,将CIFAR-10原始数据转换为MindRecord,并使用`MindDataset`读取。 -1. 下载[CIFAR-10数据集](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz)并解压,目录结构如下所示。 +1. 下载[CIFAR-10数据集](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz)并解压,其目录结构如下所示。 ``` └─cifar-10-batches-py @@ -213,10 +185,10 @@ MindSpore提供转换常见数据集的工具类,能够将常见的经典数 from mindspore.mindrecord import Cifar10ToMR ``` -3. 创建`Cifar10ToMR`对象,调用`transform`接口,将CIFAR-10数据集转换为MindRecord格式。 +3. 创建`Cifar10ToMR`对象,调用`transform`接口,将CIFAR-10数据集转换为MindRecord。 ```python - CIFAR10_DIR = "./cifar10/cifar-10-batches-py" + CIFAR10_DIR = "./cifar-10-batches-py" MINDRECORD_FILE = "./cifar10.mindrecord" cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, MINDRECORD_FILE) cifar10_transformer.transform(['label']) @@ -224,52 +196,30 @@ MindSpore提供转换常见数据集的工具类,能够将常见的经典数 **参数说明:** - `CIFAR10_DIR`:CIFAR-10数据集的文件夹路径。 - - `MINDRECORD_FILE`:输出的MindSpore数据格式文件路径。 - -### 转换CIFAR-100数据集 + - `MINDRECORD_FILE`:输出的MindRecord文件路径。 -用户可以通过`Cifar100ToMR`类,将CIFAR-100原始数据转换为MindRecord格式。 - -1. 准备好CIFAR-100数据集,将文件解压至指定的目录(示例中将数据集保存到`cifar100`目录),如下所示。 - - ``` - % ll cifar100/cifar-100-python/ - meta - test - train - ``` - > CIFAR-100数据集下载地址: - -2. 导入转换数据集的工具类`Cifar100ToMR`。 +4. 通过`MindDataset`读取MindRecord。 ```python - from mindspore.mindrecord import Cifar100ToMR + import mindspore.dataset as ds + import mindspore.dataset.vision.c_transforms as vision + + data_set = ds.MindDataset(dataset_file=MINDRECORD_FILE) + decode_op = vision.Decode() + data_set = data_set.map(operations=decode_op, input_columns=["data"], num_parallel_workers=2) + count = 0 + for item in data_set.create_dict_iterator(output_numpy=True): + print("sample: {}".format(item)) + count += 1 + print("Got {} samples".format(count)) ``` -3. 实例化`Cifar100ToMR`对象,调用`transform`接口,将CIFAR-100数据集转换为MindSpore数据格式。 - - ```python - CIFAR100_DIR = "./cifar100/cifar-100-python" - MINDRECORD_FILE = "./cifar100.mindrecord" - cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, MINDRECORD_FILE) - cifar100_transformer.transform(['fine_label', 'coarse_label']) - ``` - - **参数说明:** - - `CIFAR100_DIR`:CIFAR-100数据集的文件夹路径。 - - `MINDRECORD_FILE`:输出的MindSpore数据格式文件路径。 - ### 转换ImageNet数据集 -用户可以通过`ImageNetToMR`类,将ImageNet原始数据(图片、标注)转换为MindSpore数据格式。 - -1. 下载并按照要求准备好ImageNet数据集。 +用户可以通过`ImageNetToMR`类,将ImageNet原始数据(图片、标注)转换为MindRecord,并使用`MindDataset`读取。 - > ImageNet数据集下载地址: +1. 下载[ImageNet数据集](http://image-net.org/download),将所有图片存放在同一文件夹,用一个映射文件记录图片和标签的对应关系。映射文件包含2列,分别为各类别图片目录和标签ID,用空格隔开,映射文件示例如下: - 对下载后的ImageNet数据集,整理数据集组织形式为一个包含所有图片的文件夹,以及一个记录图片对应标签的映射文件。 - - 标签映射文件包含2列,分别为各类别图片目录、标签ID,用空格隔开,映射文件示例如下: ``` n01440760 0 n01443537 1 @@ -279,243 +229,188 @@ MindSpore提供转换常见数据集的工具类,能够将常见的经典数 n01496331 5 ``` -2. 导入转换数据集的工具类`ImageNetToMR`。 +2. 导入数据集转换工具类`ImageNetToMR`。 ```python from mindspore.mindrecord import ImageNetToMR ``` -3. 实例化`ImageNetToMR`对象,调用`transform`接口,将数据集转换为MindSpore数据格式。 +3. 创建`ImageNetToMR`对象,调用`transform`接口,将数据集转换为MindRecord。 + ```python IMAGENET_MAP_FILE = "./testImageNetDataWhole/labels_map.txt" IMAGENET_IMAGE_DIR = "./testImageNetDataWhole/images" MINDRECORD_FILE = "./testImageNetDataWhole/imagenet.mindrecord" - PARTITION_NUMBER = 4 + PARTITION_NUMBER = 8 imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, IMAGENET_IMAGE_DIR, MINDRECORD_FILE, PARTITION_NUMBER) imagenet_transformer.transform() ``` - 其中, - `IMAGENET_MAP_FILE`:ImageNetToMR数据集的标签映射文件路径。 - `IMAGENET_IMAGE_DIR`:包含ImageNet所有图片的文件夹路径。 - `MINDRECORD_FILE`:输出的MindSpore数据格式文件路径。 -### 转换MNIST数据集 + **参数说明:** + - `IMAGENET_MAP_FILE`:ImageNet数据集标签映射文件的路径。 + - `IMAGENET_IMAGE_DIR`:包含ImageNet所有图片的文件夹路径。 + - `MINDRECORD_FILE`:输出的MindRecord文件路径。 -用户可以通过`MnistToMR`类,将MNIST原始数据转换为MindSpore数据格式。 +4. 通过`MindDataset`读取MindRecord。 -1. 准备MNIST数据集,将下载好的文件放至指定的目录,如下所示: - - ``` - % ll mnist_data/ - train-images-idx3-ubyte.gz - train-labels-idx1-ubyte.gz - t10k-images-idx3-ubyte.gz - t10k-labels-idx1-ubyte.gz + ```python + import mindspore.dataset as ds + import mindspore.dataset.vision.c_transforms as vision + + data_set = ds.MindDataset(dataset_file=MINDRECORD_FILE + "0") + decode_op = vision.Decode() + data_set = data_set.map(operations=decode_op, input_columns=["data"], num_parallel_workers=2) + count = 0 + for item in data_set.create_dict_iterator(output_numpy=True): + print("sample: {}".format(item)) + count += 1 + print("Got {} samples".format(count)) ``` - > MNIST数据集下载地址: +### 转换CSV数据集 -2. 导入转换数据集的工具类`MnistToMR`。 +本示例首先创建一个包含5条记录的CSV文件,然后通过`CsvToMR`工具类将CSV文件转换为MindRecord,并最终通过`MindDataset`将其读取出来。 - ```python - from mindspore.mindrecord import MnistToMR - ``` +```python +import csv +import os +import mindspore.dataset as ds +from mindspore.mindrecord import CsvToMR -3. 实例化`MnistToMR`对象,调用`transform`接口,将MNIST数据集转换为MindSpore数据格式。 +CSV_FILE_NAME = "test.csv" +MINDRECORD_FILE_NAME = "test.mindrecord" +PARTITION_NUM = 1 - ```python - MNIST_DIR = "./mnist_data" - MINDRECORD_FILE = "./mnist.mindrecord" - mnist_transformer = MnistToMR(MNIST_DIR, MINDRECORD_FILE) - mnist_transformer.transform() - ``` +def generate_csv(): + headers = ["id", "name", "math", "english"] + rows = [(1, "Lily", 78.5, 90), + (2, "Lucy", 99, 85.2), + (3, "Mike", 65, 71), + (4, "Tom", 95, 99), + (5, "Jeff", 85, 78.5)] + with open(CSV_FILE_NAME, 'w', encoding='utf-8') as f: + writer = csv.writer(f) + writer.writerow(headers) + writer.writerows(rows) - ***参数说明:*** - - `MNIST_DIR`:MNIST数据集的文件夹路径。 - - `MINDRECORD_FILE`:输出的MindSpore数据格式文件路径。 +generate_csv() +if os.path.exists(MINDRECORD_FILE_NAME): + os.remove(MINDRECORD_FILE_NAME) + os.remove(MINDRECORD_FILE_NAME + ".db") -### 转换CSV数据集 +csv_transformer = CsvToMR(CSV_FILE_NAME, MINDRECORD_FILE_NAME, partition_number=PARTITION_NUM) + +csv_transformer.transform() + +assert os.path.exists(MINDRECORD_FILE_NAME) +assert os.path.exists(MINDRECORD_FILE_NAME + ".db") - ```python - """ - 示例说明:本示例首先创建一个CSV文件,然后通过MindSpore中CsvToMR工具, - 将Csv文件转换为MindRecord文件,并最终通过MindDataset将其读取出来。 - 详细步骤: - 1. 创建一个包含5条记录的CSV文件; - 2. 使用CsvToMR工具将CSV转换为MindRecord; - 3. 使用MindDataset读取MindRecord文件。 - """ - - import csv - import os - import mindspore.dataset as ds - from mindspore.mindrecord import CsvToMR - - CSV_FILE_NAME = "test.csv" # 创建的CSV文件 - MINDRECORD_FILE_NAME = "test.mindrecord" # 转换后的MindRecord文件 - PARTITION_NUM = 1 - - ################################ 创建CSV文件 ################################ - - # 生成CSV文件 - def generate_csv(): - headers = ["id", "name", "math", "english"] - rows = [(1, "Lily", 78.5, 90), - (2, "Lucy", 99, 85.2), - (3, "Mike", 65, 71), - (4, "Tom", 95, 99), - (5, "Jeff", 85, 78.5)] - with open(CSV_FILE_NAME, 'w', encoding='utf-8') as f: - writer = csv.writer(f) - writer.writerow(headers) - writer.writerows(rows) - - generate_csv() - - if os.path.exists(MINDRECORD_FILE_NAME): - os.remove(MINDRECORD_FILE_NAME) - os.remove(MINDRECORD_FILE_NAME + ".db") - - ################################ CSV 转 MindRecord文件 ################################ - - # 调用CsvToMR工具,初始化 - csv_transformer = CsvToMR(CSV_FILE_NAME, MINDRECORD_FILE_NAME, partition_number=PARTITION_NUM) - # 执行转换操作 - csv_transformer.transform() - - assert os.path.exists(MINDRECORD_FILE_NAME) - assert os.path.exists(MINDRECORD_FILE_NAME + ".db") - - ############################### 读取MindRecord文件 ################################ - - data_set = ds.MindDataset(dataset_file=MINDRECORD_FILE_NAME) # 创建读取对象,默认开启shuffle - count = 0 - for item in data_set.create_dict_iterator(): # 循环读取MindRecord中所有数据 - print("sample: {}".format(item)) - count += 1 - print("Got {} samples".format(count)) - ``` +data_set = ds.MindDataset(dataset_file=MINDRECORD_FILE_NAME) +count = 0 +for item in data_set.create_dict_iterator(output_numpy=True): + print("sample: {}".format(item)) + count += 1 +print("Got {} samples".format(count)) +``` ### 转换TFRecord数据集 - ```python - """ - 示例说明:本示例通过TF创建一个TFRecord文件,然后通过MindSpore中TFRecordToMR工具, - 将TFRecord文件转换为MindRecord文件,并最终通过MindDataset将其读取出来。 - 详细步骤: - 1. 创建一个包含10条记录,且样本格式为: - feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string), - "image_bytes": tf.io.FixedLenFeature([], tf.string), - "int64_scalar": tf.io.FixedLenFeature([], tf.int64), - "float_scalar": tf.io.FixedLenFeature([], tf.float32), - "int64_list": tf.io.FixedLenFeature([6], tf.int64), - "float_list": tf.io.FixedLenFeature([7], tf.float32)} - 的TFRecord文件; - 2. 使用TFRecordToMR工具将TFRecord转换为MindRecord; - 3. 使用MindDataset读取MindRecord文件,并通过Decode算子对其image_bytes字段进行解码。 - """ - - import collections - from io import BytesIO - import os - import mindspore.dataset as ds - from mindspore.mindrecord import TFRecordToMR - import mindspore.dataset.transforms.vision.c_transforms as vision - from PIL import Image - import tensorflow as tf # 需要tensorflow >= 2.1.0 - - TFRECORD_FILE_NAME = "test.tfrecord" # 创建的TFRecord文件 - MINDRECORD_FILE_NAME = "test.mindrecord" # 转换后的MindRecord文件 - PARTITION_NUM = 1 - - ################################ 创建TFRecord文件 ################################ - - # 生成TFRecord文件 - def generate_tfrecord(): - def create_int_feature(values): - if isinstance(values, list): - feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) # values: [int, int, int] - else: - feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values])) # values: int - return feature - - def create_float_feature(values): - if isinstance(values, list): - feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) # values: [float, float] - else: - feature = tf.train.Feature(float_list=tf.train.FloatList(value=[values])) # values: float - return feature - - def create_bytes_feature(values): - if isinstance(values, bytes): - white_io = BytesIO() - Image.new('RGB', (10, 10), (255, 255, 255)).save(white_io, 'JPEG') # 图片大小可以不同 - image_bytes = white_io.getvalue() - feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes])) # values: bytes - else: - # values: string - feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')])) - return feature - - writer = tf.io.TFRecordWriter(TFRECORD_FILE_NAME) - - example_count = 0 - for i in range(10): - file_name = "000" + str(i) + ".jpg" - image_bytes = bytes(str("aaaabbbbcccc" + str(i)), encoding="utf-8") - int64_scalar = i - float_scalar = float(i) - int64_list = [i, i+1, i+2, i+3, i+4, i+1234567890] - float_list = [float(i), float(i+1), float(i+2.8), float(i+3.2), - float(i+4.4), float(i+123456.9), float(i+98765432.1)] - - features = collections.OrderedDict() - features["file_name"] = create_bytes_feature(file_name) - features["image_bytes"] = create_bytes_feature(image_bytes) - features["int64_scalar"] = create_int_feature(int64_scalar) - features["float_scalar"] = create_float_feature(float_scalar) - features["int64_list"] = create_int_feature(int64_list) - features["float_list"] = create_float_feature(float_list) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(tf_example.SerializeToString()) - example_count += 1 - writer.close() - print("Write {} rows in tfrecord.".format(example_count)) - - generate_tfrecord() - - ################################ TFRecord 转 MindRecord文件 ################################ - - feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string), - "image_bytes": tf.io.FixedLenFeature([], tf.string), - "int64_scalar": tf.io.FixedLenFeature([], tf.int64), - "float_scalar": tf.io.FixedLenFeature([], tf.float32), - "int64_list": tf.io.FixedLenFeature([6], tf.int64), - "float_list": tf.io.FixedLenFeature([7], tf.float32), - } - - if os.path.exists(MINDRECORD_FILE_NAME): - os.remove(MINDRECORD_FILE_NAME) - os.remove(MINDRECORD_FILE_NAME + ".db") - - # 调用TFRecordToMR工具,初始化 - tfrecord_transformer = TFRecordToMR(TFRECORD_FILE_NAME, MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"]) - # 执行转换操作 - tfrecord_transformer.transform() - - assert os.path.exists(MINDRECORD_FILE_NAME) - assert os.path.exists(MINDRECORD_FILE_NAME + ".db") - - ############################### 读取MindRecord文件 ################################ - - data_set = ds.MindDataset(dataset_file=MINDRECORD_FILE_NAME) # 创建读取对象,默认开启shuffle - decode_op = vision.Decode() - data_set = data_set.map(input_columns=["image_bytes"], operations=decode_op, num_parallel_workers=2) # 解码图像字段 - count = 0 - for item in data_set.create_dict_iterator(): # 循环读取MindRecord中所有数据 - print("sample: {}".format(item)) - count += 1 - print("Got {} samples".format(count)) - ``` +> 目前只支持TensorFlow 2.1.0及以上版本。 + +本示例首先通过TensorFlow创建一个TFRecord文件,然后通过`TFRecordToMR`工具类将TFRecord文件转换为MindRecord,最后通过`MindDataset`将其读取出来,并使用`Decode`算子对`image_bytes`字段进行解码。 + +```python +import collections +from io import BytesIO +import os +import mindspore.dataset as ds +from mindspore.mindrecord import TFRecordToMR +import mindspore.dataset.vision.c_transforms as vision +from PIL import Image +import tensorflow as tf + +TFRECORD_FILE_NAME = "test.tfrecord" +MINDRECORD_FILE_NAME = "test.mindrecord" +PARTITION_NUM = 1 + +def generate_tfrecord(): + def create_int_feature(values): + if isinstance(values, list): + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + else: + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values])) + return feature + + def create_float_feature(values): + if isinstance(values, list): + feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + else: + feature = tf.train.Feature(float_list=tf.train.FloatList(value=[values])) + return feature + + def create_bytes_feature(values): + if isinstance(values, bytes): + white_io = BytesIO() + Image.new('RGB', (10, 10), (255, 255, 255)).save(white_io, 'JPEG') + image_bytes = white_io.getvalue() + feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes])) + else: + feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')])) + return feature + + writer = tf.io.TFRecordWriter(TFRECORD_FILE_NAME) + + example_count = 0 + for i in range(10): + file_name = "000" + str(i) + ".jpg" + image_bytes = bytes(str("aaaabbbbcccc" + str(i)), encoding="utf-8") + int64_scalar = i + float_scalar = float(i) + int64_list = [i, i+1, i+2, i+3, i+4, i+1234567890] + float_list = [float(i), float(i+1), float(i+2.8), float(i+3.2), + float(i+4.4), float(i+123456.9), float(i+98765432.1)] + + features = collections.OrderedDict() + features["file_name"] = create_bytes_feature(file_name) + features["image_bytes"] = create_bytes_feature(image_bytes) + features["int64_scalar"] = create_int_feature(int64_scalar) + features["float_scalar"] = create_float_feature(float_scalar) + features["int64_list"] = create_int_feature(int64_list) + features["float_list"] = create_float_feature(float_list) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(tf_example.SerializeToString()) + example_count += 1 + writer.close() + print("Write {} rows in tfrecord.".format(example_count)) + +generate_tfrecord() + +feature_dict = {"file_name": tf.io.FixedLenFeature([], tf.string), + "image_bytes": tf.io.FixedLenFeature([], tf.string), + "int64_scalar": tf.io.FixedLenFeature([], tf.int64), + "float_scalar": tf.io.FixedLenFeature([], tf.float32), + "int64_list": tf.io.FixedLenFeature([6], tf.int64), + "float_list": tf.io.FixedLenFeature([7], tf.float32), + } + +if os.path.exists(MINDRECORD_FILE_NAME): + os.remove(MINDRECORD_FILE_NAME) + os.remove(MINDRECORD_FILE_NAME + ".db") + +tfrecord_transformer = TFRecordToMR(TFRECORD_FILE_NAME, MINDRECORD_FILE_NAME, feature_dict, ["image_bytes"]) +tfrecord_transformer.transform() + +assert os.path.exists(MINDRECORD_FILE_NAME) +assert os.path.exists(MINDRECORD_FILE_NAME + ".db") + +data_set = ds.MindDataset(dataset_file=MINDRECORD_FILE_NAME) +decode_op = vision.Decode() +data_set = data_set.map(operations=decode_op, input_columns=["image_bytes"], num_parallel_workers=2) +count = 0 +for item in data_set.create_dict_iterator(output_numpy=True): + print("sample: {}".format(item)) + count += 1 +print("Got {} samples".format(count)) +``` diff --git a/api/source_zh_cn/programming_guide/dataset_loading.md b/api/source_zh_cn/programming_guide/dataset_loading.md index e3695494d5c24c92daa030459d8fe16f72931d7c..fc04d1c9efcd09048723d79708bee4b814cc8039 100644 --- a/api/source_zh_cn/programming_guide/dataset_loading.md +++ b/api/source_zh_cn/programming_guide/dataset_loading.md @@ -4,8 +4,7 @@ - [数据集加载](#数据集加载) - [概述](#概述) - - [经典数据集加载](#经典数据集加载) - - [MNIST数据集](#mnist数据集) + - [常用数据集加载](#常用数据集加载) - [CIFAR10/100数据集](#cifar10100数据集) - [VOC数据集](#voc数据集) - [COCO数据集](#coco数据集) @@ -13,8 +12,7 @@ - [MindRecord数据格式](#mindrecord数据格式) - [Manifest数据格式](#manifest数据格式) - [TFRecord数据格式](#tfrecord数据格式) - - [Numpy数据格式](#numpy数据格式) - - [text数据格式](#text数据格式) + - [NumPy数据格式](#numpy数据格式) - [CSV数据格式](#csv数据格式) - [自定义数据集加载](#自定义数据集加载) - [构造数据集生成函数](#构造数据集生成函数) @@ -27,15 +25,15 @@ ## 概述 -MindSpore支持加载图像领域常用的经典数据集,用户可以直接使用`mindspore.dataset`中对应的类实现数据集的加载。目前支持的经典数据集及对应的数据集类如下表所示。 +MindSpore支持加载图像领域常用的数据集,用户可以直接使用`mindspore.dataset`中对应的类实现数据集的加载。目前支持的常用数据集及对应的数据集类如下表所示。 | 图像数据集 | 数据集类 | 数据集简介 | | ---- | ---- | ---- | | MNIST | MnistDataset | MNIST是一个大型手写数字图像数据集,拥有60,000张训练图像和10,000张测试图像,常用于训练各种图像处理系统。 | | CIFAR-10 | Cifar10Dataset | CIFAR-10是一个微小图像数据集,包含10种类别下的60,000张32x32大小彩色图像,平均每种类别6,000张,其中5,000张为训练集,1,000张为测试集。 | | CIFAR-100 | Cifar100Dataset | CIFAR-100与CIFAR-10类似,但拥有100种类别,平均每种类别600张,其中500张为训练集,100张为测试集。 | -|CelebA | CelebADataset | CelebA是一个大型人脸图像数据集,包含超过200,000张名人人脸图像,每张图像拥有40个特征标记。 | -| PASCAL-VOC | VOCDataset | PASCAL-VOC是一个经典图像数据集,被广泛用于目标检测、图像分割等计算机视觉领域。 | +| CelebA | CelebADataset | CelebA是一个大型人脸图像数据集,包含超过200,000张名人人脸图像,每张图像拥有40个特征标记。 | +| PASCAL-VOC | VOCDataset | PASCAL-VOC是一个常用图像数据集,被广泛用于目标检测、图像分割等计算机视觉领域。 | | COCO | CocoDataset | COCO是一个大型目标检测、图像分割、姿态估计数据集。 | | CLUE | CLUEDataset | CLUE是一个大型中文语义理解数据集。 | @@ -45,65 +43,39 @@ MindSpore还支持加载多种数据存储格式下的数据集,用户可以 | ---- | ---- | ---- | | MindRecord | MindDataset | MindRecord是MindSpore的自研数据格式,具有读写高效、易于分布式处理等优势。 | | Manifest | ManifestDataset | Manifest是华为ModelArts支持的一种数据格式,描述了原始文件和标注信息,可用于标注、训练、推理场景。 | -| TFRecord | TFRecordDataset | TFRecord是Tensorflow定义的一种二进制数据文件格式。 | -| Numpy | NumpySlicesDataset | Numpy数据源指的是已经读入内存中的Numpy arrays格式数据集。 | +| TFRecord | TFRecordDataset | TFRecord是TensorFlow定义的一种二进制数据文件格式。 | +| NumPy | NumpySlicesDataset | NumPy数据源指的是已经读入内存中的NumPy arrays格式数据集。 | | Text File | TextFileDataset | Text File指的是常见的文本格式数据。 | | CSV File | CSVDataset | CSV指逗号分隔值,其文件以纯文本形式存储表格数据。 | -MindSpore也同样支持使用GeneratorDataset自定义数据集的加载方式,用户可以根据需要实现自己的数据集类。 +MindSpore也同样支持使用`GeneratorDataset`自定义数据集的加载方式,用户可以根据需要实现自己的数据集类。 -更多详细的数据集加载接口说明,参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)。 +> 更多详细的数据集加载接口说明,参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)。 -## 经典数据集加载 +## 常用数据集加载 -### MNIST数据集 +下面将介绍几种常用数据集的加载方式。 -```python -# 通过MNIST API读取、解析MNIST数据集,并构建数据管道 - -import mindspore.dataset as ds - -# 下载MNIST数据集,将其解压到MnistData目录 -DATA_DIR = "MnistData/" - -# 使用MnistDataset读取数据集,指定num_samples以获取5个样本数据 -# shuffle参数为True时,是随机获取5个样本,每次运行的label结果可能不一致 -dataset = ds.MnistDataset(DATA_DIR, num_samples=5, shuffle=True) - -# 启动数据管道,输出5个样本数据 -for data in dataset.create_dict_iterator(): - print("Image shape:", data['image'].shape, ", Label:", data['label']) -``` +### CIFAR10/100数据集 -``` -Image shape: (28, 28, 1) , Label: 4 -Image shape: (28, 28, 1) , Label: 9 -Image shape: (28, 28, 1) , Label: 4 -Image shape: (28, 28, 1) , Label: 0 -Image shape: (28, 28, 1) , Label: 9 -``` +下面的样例通过`Cifar10Dataset`接口加载CIFAR-10数据集,使用顺序采样器获取其中5个样本,然后展示了对应图片的形状和标签。 -### CIFAR10/100数据集 +CIFAR-100数据集和MNIST数据集的加载方式也与之类似。 ```python -# 通过Cifar API读取、解析CIFAR数据集,并构建数据管道(以CIFAR10数据集为例) - import mindspore.dataset as ds -# 下载CIFAR10数据集,将其解压到CIFAR10Data目录 DATA_DIR = "Cifar10Data/" -# 指定一个顺序采样器SequentialSampler,按照读取顺序获取5个样本数据 sampler = ds.SequentialSampler(num_samples=5) - -# 使用CIFAR10Dataset读取数据集,指定sampler为上述采样器 dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 启动数据管道,输出5个样本数据 for data in dataset.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) ``` +输出结果如下: + ``` Image shape: (32, 32, 3) , Label: 0 Image shape: (32, 32, 3) , Label: 1 @@ -114,34 +86,30 @@ Image shape: (32, 32, 3) , Label: 4 ### VOC数据集 -```python -# 通过VOC API读取、解析VOC数据集,并构建数据管道 +下面的样例通过`VOCDataset`接口加载VOC2012数据集,分别演示了将任务指定为分割(Segmentation)和检测(Detection)时的原始图像形状和目标形状。 +```python import mindspore.dataset as ds -# 下载VOC数据集,将其解压到VOC2012目录 DATA_DIR = "VOC2012/" -# 使用VOCDataset读取数据集,指定为Segmentation任务,同时指定num_samples以获取2个样本数据 -# decode参数会将读取的图像解码 -dataset = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", num_samples=2, decode=True, shuffle=False) +dataset = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", num_samples=2, decode=True, shuffle=False) + print("[Segmentation]:") for data in dataset.create_dict_iterator(): - # 原图像 print("image shape:", data["image"].shape) - # 分割后图像 print("target shape:", data["target"].shape) -# 接下来是Detection任务 -dataset = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", num_samples=1, decode=True, shuffle=False) +dataset = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", num_samples=1, decode=True, shuffle=False) + print("[Detection]:") for data in dataset.create_dict_iterator(): - # 原图像 print("image shape:", data["image"].shape) - # 目标框 print("bbox shape:", data["bbox"].shape) ``` +输出结果如下: + ``` [Segmentation]: image shape: (281, 500, 3) @@ -155,39 +123,35 @@ bbox shape: (2, 4) ### COCO数据集 -```python -# 通过Coco API读取、解析Coco数据集,并构建数据管道 +下面的样例通过`CocoDataset`接口加载COCO数据集,分别演示了将任务指定为目标检测(Detection)、背景分割(Stuff)、关键点检测(Keypoint)和全景分割(Panoptic)时获取到的不同数据。 +```python import mindspore.dataset as ds -# 下载Coco数据集,将其解压到CocoData目录 DATA_DIR = "COCO/train/" ANNOTATION_FILE = "COCO/annotations/train.json" KEYPOINT_FILE = "COCO/annotations/key_point.json" PANOPTIC_FILE = "COCO/annotations/panoptic.json" -# 使用CocoDataset读取数据集,指定为Detection任务,同时指定num_samples以获取1个样本数据 dataset = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Detection", num_samples=1) for data in dataset.create_dict_iterator(): print("Detection:", data.keys()) -# 让我们来观察一下,在指定Coco不同任务时,我们获取到的不同数据 -# Stuff 任务 dataset = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Stuff", num_samples=1) for data in dataset.create_dict_iterator(): print("Stuff:", data.keys()) -# Keypoint 任务 dataset = ds.CocoDataset(DATA_DIR, annotation_file=KEYPOINT_FILE, task="Keypoint", num_samples=1) for data in dataset.create_dict_iterator(): print("Keypoint:", data.keys()) -# Panoptic 任务 dataset = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic", num_samples=1) for data in dataset.create_dict_iterator(): print("Panoptic:", data.keys()) ``` +输出结果如下: + ``` Detection: dict_keys(['bbox', 'image', 'iscrowd', 'category_id']) Stuff: dict_keys(['segmentation', 'iscrowd', 'image']) @@ -195,49 +159,51 @@ Keypoint: dict_keys(['keypoints', 'num_keypoints', 'image']) Panoptic: dict_keys(['bbox', 'image', 'area', 'category_id', 'iscrowd']) ``` -> 更多经典数据集加载接口说明,参见对应[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)。 - ## 特定格式数据集加载 +下面将介绍几种特定格式数据集文件的加载方式。 + ### MindRecord数据格式 -MindRecord是MindSpore的自研数据格式,具有更好的性能和特性。 +MindRecord是MindSpore定义的一种数据格式,使用MindRecord能够获得更好的性能提升。 + +> 阅读[数据格式转换](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_conversion.html)章节,了解如何将数据集转化为MindSpore数据格式。 ->阅读[数据格式转换](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_conversion.html)章节,了解如何将数据集转化为MindSpore数据格式。 +下面的样例通过`MindDataset`接口加载MindRecord文件,并展示已加载数据的标签。 ```python import mindspore.dataset as ds -# 指定MindRecord数据格式地址 DATA_DIR = "mindrecord_dataset_path" mindrecord_dataset = ds.MindDataset(DATA_DIR) -# 启动数据管道读取 -for data in mindrecord_dataset.create_dict_iterator(): +for data in mindrecord_dataset.create_dict_iterator(output_numpy=True): print(data["label"]) ``` ### Manifest数据格式 -Manifest是华为ModelArts支持的数据格式文件,详细说明请参见相关[文档](https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0009.html)。 +Manifest是华为ModelArts支持的数据格式文件,详细说明请参见[Manifest文档](https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0009.html)。 + +下面的样例通过`ManifestDataset`接口加载Manifest文件,并展示已加载数据的标签。 ```python import mindspore.dataset as ds -# 指定Manifest数据集地址 DATA_DIR = "manifest_dataset_path" manifest_dataset = ds.ManifestDataset(DATA_DIR) -# 启动数据管道读取 for data in manifest_dataset.create_dict_iterator(): print(data["label"]) ``` ### TFRecord数据格式 -TFRecord是Tensorflow定义的一种二进制数据文件格式。 +TFRecord是TensorFlow定义的一种二进制数据文件格式。 + +下面的样例通过`TFRecordDataset`接口加载TFRecord文件,并介绍了两种不同的数据集格式设定方案。 -1. 传入数据集路径或`.tfrecord`文件列表,创建TFRecordDataset对象。 +1. 传入数据集路径或TFRecord文件列表,创建`TFRecordDataset`对象。 ```python import mindspore.dataset as ds @@ -246,77 +212,76 @@ TFRecord是Tensorflow定义的一种二进制数据文件格式。 dataset = ds.TFRecordDataset(DATA_DIR) ``` -2. 用户可以选择通过创建Schema文件或Schema类,设定数据集格式及特征。 - - - 创建Schema文件 - - Schema文件示例: - - ``` - { - "datasetType": "TF", - "numRows": 3, - "columns": { - "image": { - "type": "uint8", - "rank": 1 - }, - "label" : { - "type": "int64", - "rank": 1 +2. 用户可以通过编写Schema文件或创建Schema对象,设定数据集格式及特征。 + + - 编写Schema文件 + + 将数据集格式和特征按JSON格式写入Schema文件,示例如下: + + ``` + { + "columns": { + "image": { + "type": "uint8", + "rank": 1 + }, + "label" : { + "type": "string", + "rank": 1 + } + "id" : { + "type": "int64", + "rank": 0 + } } } - } - ``` + ``` - - `datasetType`: 数据格式的类型,这里`TF`是指TFrecord数据格式。 + - `columns`:列信息字段,需要根据数据集的实际列名定义。上面的示例中,数据集列为`image`、`label`和`id`。 - - `columns`:列信息字段,需要根据数据集的实际列名定义,上面Schema文件示例中,数据集列为`image`和`label`两列。 + 然后在创建`TFRecordDataset`时将Schema文件路径传入。 - - `numRows`:行数信息字段,控制加载数据的最大行数。如果定义的行数大于实际行数,加载时则以实际行数为准。 + ```python + DATA_DIR = "tfrecord_dataset_path" + SCHEMA_DIR = "dataset_schema_path/schema.json" + dataset = ds.TFRecordDataset(DATA_DIR, schema=SCHEMA_DIR) + ``` - 在创建TFRecordDataset时将Schema文件路径传入。 + - 创建Schema对象 - ```python - DATA_DIR = "tfrecord_dataset_path" - SCHEMA_DIR = "dataset_schema_path/schema.json" - dataset = ds.TFRecordDataset(DATA_DIR, schema=SCHEMA_DIR) - ``` + 创建Schema对象,为其添加自定义字段,然后在创建数据集对象时传入。 - - 创建Schema类 + ```python + import mindspore.common.dtype as mstype + schema = ds.Schema() + schema.add_column('image', de_type=mstype.uint8) + schema.add_column('label', de_type=mstype.int32) + dataset = ds.TFRecordDataset(DATA_DIR, schema=schema) + ``` - ```python - import mindspore.common.dtype as mstype - schema = ds.Schema() - schema.add_column('image', de_type=mstype.uint8) - schema.add_column('label', de_type=mstype.int32) - dataset = ds.TFRecordDataset(DATA_DIR, schema=schema) - ``` +### NumPy数据格式 -### Numpy数据格式 +如果所有数据已经读入内存,可以直接使用`NumpySlicesDataset`类将其加载。 -如果所有数据已经读入内存,可以直接使用NumpySlicesDataset类将其加载。 +下面的样例分别介绍了通过`NumpySlicesDataset`加载arrays数据、 list数据和dict数据的方式。 -- 加载Numpy arrays数据 +- 加载NumPy arrays数据 ```python - # 从Numpy arrays构建数据管道 - import numpy as np import mindspore.dataset as ds - # 使用numpy构建一个数组 features, labels = np.random.sample((5, 2)), np.random.sample((5, 1)) - # 从numpy中构建数据管道 - # 注意:传入参数需要是一个tuple,即是(features, labels);column_names用于指定生成的数据集名称为col1, col2 + data = (features, labels) dataset = ds.NumpySlicesDataset(data, column_names=["col1", "col2"], shuffle=False) - # 启动数据管道 for data in dataset: print(data[0], " ", data[1]) ``` + 输出结果如下: + ``` [0.49893939 0.36348882] [0.15234002] [0.83845534 0.19721032] [0.94602561] @@ -328,22 +293,19 @@ TFRecord是Tensorflow定义的一种二进制数据文件格式。 - 加载Python list数据 ```python - # 从Python list构建数据管道 import mindspore.dataset as ds - # 构建一个list data1 = [[1, 2], [3, 4]] - # 从list中构建数据管道 - # column_names用于指定生成的数据集名称为col1 dataset = ds.NumpySlicesDataset(data1, column_names=["col1"], shuffle=False) - # 启动数据管道 for data in dataset: print(data[0]) ``` + 输出结果如下: + ``` [1 2] [3 4] @@ -352,60 +314,42 @@ TFRecord是Tensorflow定义的一种二进制数据文件格式。 - 加载Python dict数据 ```python - # 从Python dict构建数据管道 - import mindspore.dataset as ds - # 构建一个dict data1 = {"a": [1, 2], "b": [3, 4]} - # 从dict中构建数据管道 - # column_names用于指定生成的数据集名称为col1, col2 dataset = ds.NumpySlicesDataset(data1, column_names=["col1", "col2"], shuffle=False) - # 启动数据管道 for data in dataset.create_dict_iterator(): print(data) ``` + 输出结果如下: + ``` - {'col1': array(1, dtype=int64), 'col2': array(3, dtype=int64)} - {'col1': array(2, dtype=int64), 'col2': array(4, dtype=int64)} + {'col1': Tensor(shape=[], dtype=Int64, value= 1), 'col2': Tensor(shape=[], dtype=Int64, value= 3)} + {'col1': Tensor(shape=[], dtype=Int64, value= 2), 'col2': Tensor(shape=[], dtype=Int64, value= 4)} ``` -### text数据格式 - -```python -import mindspore.dataset as ds - -# 指定text数据格式地址 -DATA_DIR = "text_file_path" -text_dataset = ds.TextFileDataset(DATA_DIR) +### CSV数据格式 -# 启动数据管道读取 -for data in text_dataset.create_dict_iterator(): - print(data["label"]) -``` +下面的样例通过`CSVDataset`加载CSV格式数据集文件,并展示了已加载数据的标签。 -### CSV数据格式 +Text格式数据集文件的加载方式与CSV文件类似。 ```python import mindspore.dataset as ds -# 指定CSV数据格式地址 DATA_DIR = "csv_file_path" csv_dataset = ds.CSVDataset(DATA_DIR) -# 启动数据管道读取 -for data in csv_dataset.create_dict_iterator(): - print(data["label"]) +for data in csv_dataset.create_dict_iterator(output_numpy=True): + print(data["1"]) ``` ->更多数据格式文件加载说明,参见对应[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)。 - ## 自定义数据集加载 -对于目前MindSpore不支持直接加载的数据集,可以通过构造GeneratorDataset对象实现自定义方式的加载,或者将其转换成MindRecord数据格式。目前自定义数据集加载有以下几种方式。 +对于目前MindSpore不支持直接加载的数据集,可以通过构造`GeneratorDataset`对象实现自定义方式的加载,或者将其转换成MindRecord数据格式。目前自定义数据集加载有以下几种方式。 ### 构造数据集生成函数 @@ -415,23 +359,22 @@ for data in csv_dataset.create_dict_iterator(): import numpy as np import mindspore.dataset as ds -# 随机生成一个数据集 np.random.seed(58) data = np.random.sample((5, 2)) label = np.random.sample((5, 1)) -# 自定义数据返回方式 def GeneratorFunc(): for i in range(5): yield (data[i], label[i]) -# 构建自定义数据集对象 dataset = ds.GeneratorDataset(GeneratorFunc, ["data", "label"]) for data in dataset.create_dict_iterator(): print(data["data"], data["label"]) ``` +输出结果如下: + ``` [0.36510558 0.45120592] [0.78888122] [0.49606035 0.07562207] [0.38068183] @@ -476,6 +419,8 @@ for data in dataset.create_dict_iterator(): print(data["data"], data["label"]) ``` +输出结果如下: + ``` [0.36510558 0.45120592] [0.78888122] [0.49606035 0.07562207] [0.38068183] @@ -511,6 +456,8 @@ for data in dataset.create_dict_iterator(): print(data["data"], data["label"]) ``` +输出结果如下: + ``` [0.36510558 0.45120592] [0.78888122] [0.49606035 0.07562207] [0.38068183] @@ -549,6 +496,8 @@ for data in dataset.create_dict_iterator(): print(data["data"], data["label"]) ``` +输出结果如下: + ``` [0.36510558 0.45120592] [0.78888122] [0.57176158 0.28963401] [0.16271622] diff --git a/api/source_zh_cn/programming_guide/dtype.md b/api/source_zh_cn/programming_guide/dtype.md new file mode 100644 index 0000000000000000000000000000000000000000..b11e74e003f24414e584825092a8fa032f8f3eac --- /dev/null +++ b/api/source_zh_cn/programming_guide/dtype.md @@ -0,0 +1,64 @@ +# dtype + + + +- [dtype](#dtype) + - [概述](#概述) + - [数据类型转换接口](#数据类型转换接口) + + + + + +## 概述 + +MindSpore张量支持不同的数据类型,包含`int8`、`int16`、`int32`、`int64`、`uint8`、`uint16`、`uint32`、`uint64`、`float16`、`float32`、`float64`、`bool_`,与NumPy的数据类型一一对应。 + +在MindSpore的运算处理流程中,Python中的`int`数会被转换为定义的int64类型,`float`数会被转换为定义的`float32`类型。 + +详细的类型支持情况请参考。 + +以下代码,打印MindSpore的数据类型int32。 +``` +from mindspore import dtype as mstype + +data_type = mstype.int32 +print(data_type) +``` + +输出如下: + +``` +Int32 +``` + + +## 数据类型转换接口 + +MindSpore提供了以下几个接口,实现与NumPy数据类型和Python内置的数据类型间的转换。 + +- `dtype_to_nptype`:将MindSpore的数据类型转换为NumPy对应的数据类型。 +- `dtype_to_pytype`:将MindSpore的数据类型转换为Python对应的内置数据类型。 +- `pytype_to_dtype`:将Python内置的数据类型转换为MindSpore对应的数据类型。 + +以下代码实现了不同数据类型间的转换,并打印转换后的类型。 + +``` +from mindspore import dtype as mstype + +np_type = mstype.dtype_to_nptype(mstype.int32) +ms_type = mstype.pytype_to_dtype(int) +py_type = mstype.dtype_to_pytype(mstype.float64) + +print(np_type) +print(ms_type) +print(py_type) +``` + +输出如下: + +``` + +Int64 + +``` diff --git a/api/source_zh_cn/programming_guide/execution_management.rst b/api/source_zh_cn/programming_guide/execution_management.rst new file mode 100644 index 0000000000000000000000000000000000000000..b57742c1c72acd6a5f4e25b8fd9ee7dba5cd6dfc --- /dev/null +++ b/api/source_zh_cn/programming_guide/execution_management.rst @@ -0,0 +1,9 @@ +执行管理 +=========== + +.. toctree:: + :maxdepth: 1 + + context + run + callback \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/extension.rst b/api/source_zh_cn/programming_guide/extension.rst new file mode 100644 index 0000000000000000000000000000000000000000..ffba7b0682c05c45a45ee9f9784935b35e874b33 --- /dev/null +++ b/api/source_zh_cn/programming_guide/extension.rst @@ -0,0 +1,7 @@ +功能扩展 +=========== + +.. toctree:: + :maxdepth: 1 + + probability \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/images/batch.png b/api/source_zh_cn/programming_guide/images/batch.png index cce0f467eac154d0633543e5c69613ce7bdbbdcc..ee974652d361b4085033a08789a036d331c2bec8 100644 Binary files a/api/source_zh_cn/programming_guide/images/batch.png and b/api/source_zh_cn/programming_guide/images/batch.png differ diff --git a/api/source_zh_cn/programming_guide/images/concat.png b/api/source_zh_cn/programming_guide/images/concat.png index 742aa2a0203f078ee7d06549c3372ce271cea455..7a28ff7826cc2a1c6334e2ff15eeaaffd6b67c06 100644 Binary files a/api/source_zh_cn/programming_guide/images/concat.png and b/api/source_zh_cn/programming_guide/images/concat.png differ diff --git a/api/source_zh_cn/programming_guide/images/ctrans_invert.png b/api/source_zh_cn/programming_guide/images/ctrans_invert.png index a27301d28dd11b037ab973cc97d1b3042f24f3b0..b73f9bd1abed0b4064d10461cc360160591ef4e3 100644 Binary files a/api/source_zh_cn/programming_guide/images/ctrans_invert.png and b/api/source_zh_cn/programming_guide/images/ctrans_invert.png differ diff --git a/api/source_zh_cn/programming_guide/images/ctrans_resize.png b/api/source_zh_cn/programming_guide/images/ctrans_resize.png index f4f2b23642cc8d87f3ad5684205c968c79bd794d..e5275e371cbe0b668a0f6f1d699ea67efa09956f 100644 Binary files a/api/source_zh_cn/programming_guide/images/ctrans_resize.png and b/api/source_zh_cn/programming_guide/images/ctrans_resize.png differ diff --git a/api/source_zh_cn/programming_guide/images/map.png b/api/source_zh_cn/programming_guide/images/map.png index abe704717045e3816f3ffe4d10a8b023ec983b3d..275631c1c5f0ea256be00004251e61c382748487 100644 Binary files a/api/source_zh_cn/programming_guide/images/map.png and b/api/source_zh_cn/programming_guide/images/map.png differ diff --git a/api/source_zh_cn/programming_guide/images/pytrans_compose.png b/api/source_zh_cn/programming_guide/images/pytrans_compose.png index 66221a4f5e7a9f985475fa2dd68f1994903636c3..6d74fc231a7253393f98a645c0c68c7b2c517fb2 100644 Binary files a/api/source_zh_cn/programming_guide/images/pytrans_compose.png and b/api/source_zh_cn/programming_guide/images/pytrans_compose.png differ diff --git a/api/source_zh_cn/programming_guide/images/randomcrop.png b/api/source_zh_cn/programming_guide/images/randomcrop.png index 8095bceb67cd3643dda1dce6c060a98ccb40373f..ef62fe1a08f221a2c4ce81f9e60ba5c9e0d93a61 100644 Binary files a/api/source_zh_cn/programming_guide/images/randomcrop.png and b/api/source_zh_cn/programming_guide/images/randomcrop.png differ diff --git a/api/source_zh_cn/programming_guide/images/randomhorizontalflip.png b/api/source_zh_cn/programming_guide/images/randomhorizontalflip.png index f127d7ab479851049262fc3713dba7d14b2c908a..2d851183a8f858c54a26b636703b9177df4ec80e 100644 Binary files a/api/source_zh_cn/programming_guide/images/randomhorizontalflip.png and b/api/source_zh_cn/programming_guide/images/randomhorizontalflip.png differ diff --git a/api/source_zh_cn/programming_guide/images/repeat.png b/api/source_zh_cn/programming_guide/images/repeat.png index 7cb40834c41b8d17e37cf2da8ba368ad72212f48..9717ec81c52f23615e236d27e0f7c96bd6ac1155 100644 Binary files a/api/source_zh_cn/programming_guide/images/repeat.png and b/api/source_zh_cn/programming_guide/images/repeat.png differ diff --git a/api/source_zh_cn/programming_guide/images/shuffle.png b/api/source_zh_cn/programming_guide/images/shuffle.png index d4af0f38c4ecbff6fb80ad3c06b974ef71adeb56..4464cefad03beefac6bb413da22eebeffaf8fe41 100644 Binary files a/api/source_zh_cn/programming_guide/images/shuffle.png and b/api/source_zh_cn/programming_guide/images/shuffle.png differ diff --git a/api/source_zh_cn/programming_guide/images/tranform_bad.png b/api/source_zh_cn/programming_guide/images/tranform_bad.png index 2d3ee60ccffdbe7c9ad3f5adb4235cdc8f3532d2..0f659a14be8d8af05cee1757adc3d67664e1c259 100644 Binary files a/api/source_zh_cn/programming_guide/images/tranform_bad.png and b/api/source_zh_cn/programming_guide/images/tranform_bad.png differ diff --git a/api/source_zh_cn/programming_guide/images/tranform_good_1.png b/api/source_zh_cn/programming_guide/images/tranform_good_1.png index 3c4b373ead883539b6d4673c68665bec20034e18..e147f019e9211ee888e58172469ff1fabe4fa776 100644 Binary files a/api/source_zh_cn/programming_guide/images/tranform_good_1.png and b/api/source_zh_cn/programming_guide/images/tranform_good_1.png differ diff --git a/api/source_zh_cn/programming_guide/images/tranform_good_2.png b/api/source_zh_cn/programming_guide/images/tranform_good_2.png index 066a5d082387206a01ceb6ad54cc9dd7e074c672..f6ed65482d233d88f46b108c3fe4e21bd00df4c5 100644 Binary files a/api/source_zh_cn/programming_guide/images/tranform_good_2.png and b/api/source_zh_cn/programming_guide/images/tranform_good_2.png differ diff --git a/api/source_zh_cn/programming_guide/images/tranform_good_3.png b/api/source_zh_cn/programming_guide/images/tranform_good_3.png index 500b36c18eb53253c58f84515d5b90b1136d23c0..575d8038bb7f8260b86d226033c30e15c3b20e0e 100644 Binary files a/api/source_zh_cn/programming_guide/images/tranform_good_3.png and b/api/source_zh_cn/programming_guide/images/tranform_good_3.png differ diff --git a/api/source_zh_cn/programming_guide/images/tranform_pipeline.png b/api/source_zh_cn/programming_guide/images/tranform_pipeline.png index 07906d4751f286de989a4c873d9fd422207eb5eb..7278418d5ebfb3db921627f213ceb455aba53794 100644 Binary files a/api/source_zh_cn/programming_guide/images/tranform_pipeline.png and b/api/source_zh_cn/programming_guide/images/tranform_pipeline.png differ diff --git a/api/source_zh_cn/programming_guide/images/zip.png b/api/source_zh_cn/programming_guide/images/zip.png index 2839b2c36f00533917b2406d7f215249ad8dbc6b..f0052435898ae6a3546dfea9c50711ab3f303699 100644 Binary files a/api/source_zh_cn/programming_guide/images/zip.png and b/api/source_zh_cn/programming_guide/images/zip.png differ diff --git a/api/source_zh_cn/programming_guide/infer.md b/api/source_zh_cn/programming_guide/infer.md new file mode 100644 index 0000000000000000000000000000000000000000..0da170636b96a9cfd31e89b91fefe9d3edbacaa0 --- /dev/null +++ b/api/source_zh_cn/programming_guide/infer.md @@ -0,0 +1,19 @@ +# 推理 + + + +- [推理](#推理) + + + + + +基于MindSpore训练后的模型,支持在Ascend 910 AI处理器、Ascend 310 AI处理器、GPU、CPU、端侧等多种不同的平台上执行推理。使用方法可参考如下教程: + +- [在Ascend 910 AI处理器上执行推理](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html#ascend-910-ai) +- [在Ascend 310 AI处理器上执行推理](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html#ascend-310-ai) +- [在GPU上执行推理](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html#gpu) +- [在CPU上执行推理](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html#cpu) +- [在端侧执行推理](https://www.mindspore.cn/lite/tutorial/zh-CN/master/quick_start/quick_start.html) + +同时,MindSpore提供了一个轻量级、高性能的服务模块,称为MindSpore Serving,可帮助MindSpore开发者在生产环境中高效部署在线推理服务,使用方法可参考[部署推理服务](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/serving.html)。 \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/component.md b/api/source_zh_cn/programming_guide/network_component.md similarity index 78% rename from api/source_zh_cn/programming_guide/component.md rename to api/source_zh_cn/programming_guide/network_component.md index dd92ce5515f63c12a6e11d84c3b8648734f94c8f..8ccdd7610254881a961962ab34c34ee73007e72c 100644 --- a/api/source_zh_cn/programming_guide/component.md +++ b/api/source_zh_cn/programming_guide/network_component.md @@ -4,24 +4,23 @@ - [常用网络组件](#常用网络组件) - [概述](#概述) - - [GradOperation](#GradOperation) - - [WithLossCell](#WithLossCell) - - [TrainOneStepCell](#TrainOneStepCell) + - [GradOperation](#gradoperation) + - [WithLossCell](#withlosscell) + - [TrainOneStepCell](#trainonestepcell) ## 概述 -MindSpore封装一些常用的网络组件,用于网络的训练,推理,求梯度和数据处理等。 +MindSpore封装了一些常用的网络组件,用于网络的训练、推理、求梯度和数据处理等操作。 这些网络组件可以直接被用户使用,同样也会在`model.train`和`model.eval`等更高级的封装接口内部进行使用。 -本节内容将会介绍三个网络组件,分别是`GradOperation`,`WithLossCell`和`TrainOneStepCell`,将会从功能,用户使用和内部使用三个方面来进行介绍。 +本节内容将会介绍三个网络组件,分别是`GradOperation`、`WithLossCell`和`TrainOneStepCell`,将会从功能、用户使用和内部使用三个方面来进行介绍。 ## GradOperation -GradOperation组件用于生成输入函数的梯度,利用`get_all`,`get_by_list`和`sens_param`参数 -控制梯度的计算方式,细节内容详见API文档。 +GradOperation组件用于生成输入函数的梯度,利用`get_all`、`get_by_list`和`sens_param`参数控制梯度的计算方式,细节内容详见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GradOperation)。 GradOperation的使用实例如下: @@ -59,9 +58,7 @@ y = Tensor([[0.01, 0.3, 1.1], [0.1, 0.2, 1.3], [2.1, 1.2, 3.3]], dtype=mstype.fl GradNetWrtX(Net())(x, y) ``` -上面的例子是计算`Net`相对与x的梯度值,首先需要定义网络`Net`作为`GradOperation`的输入, -实例创建了包含梯度运算的`GradNetWrtX`。调用`GradNetWrtX`是将网络传入`GradOperation`生成梯度函数, -将输入数据传入梯度函数中返回最终结果。 +上面的例子是计算`Net`相对与x的梯度值,首先需要定义网络`Net`作为`GradOperation`的输入,实例创建了包含梯度运算的`GradNetWrtX`。调用`GradNetWrtX`是将网络传入`GradOperation`生成梯度函数,将输入数据传入梯度函数中返回最终结果。 输出如下: @@ -76,7 +73,7 @@ MindSpore涉及梯度计算的其他组件,例如`WithGradCell`和`TrainOneSte ## WithLossCell -`WithLossCell`本质上是一个包含损失函数的`Cell`, 构造`WithLossCell`需要事先定义好网络和损失函数。 +`WithLossCell`本质上是一个包含损失函数的`Cell`,构造`WithLossCell`需要事先定义好网络和损失函数。 下面通过一个实例来介绍其具体的使用, 首先需要构造一个网络,内容如下: @@ -124,20 +121,19 @@ class LeNet(nn.Cell): return output ``` -下面是`WithLossCell`的使用实例,分别定义好网络和损失函数,然后创建一个`WithLossCell`, -然后传入输入数据和标签数据,`WithLossCell`内部根据网络和损失函数返回计算结果 +下面是`WithLossCell`的使用实例,分别定义好网络和损失函数,然后创建一个`WithLossCell`,传入输入数据和标签数据,`WithLossCell`内部根据网络和损失函数返回计算结果。 ``` data = Tensor(np.ones([32, 1, 32, 32]).astype(np.float32) * 0.01) label = Tensor(np.ones([32]).astype(np.int32)) net = LeNet() -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net_with_criterion = WithLossCell(net, criterion) loss = net_with_criterion(data, label) print("+++++++++Loss+++++++++++++") print(loss) ``` -输出结果如下: +输出如下: ``` +++++++++Loss+++++++++++++ 2.302585 @@ -157,7 +153,7 @@ learning_rate = 0.01 momentum = 0.9 optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net_with_criterion = WithLossCell(net, criterion) train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer for i in range(5): @@ -167,10 +163,9 @@ for i in range(5): print(res) ``` -用例中构造了优化器和一个`WithLossCell`的实例,然后传入`TrainOneStepCell` 中初始化一个训练网络,用例循环五次,相当于网络训练了五次, -并输出每次的loss结果,由结果可以看出每次训练后loss值在逐渐减小。 +用例中构造了优化器和一个`WithLossCell`的实例,然后传入`TrainOneStepCell`中初始化一个训练网络,用例循环五次,相当于网络训练了五次,并输出每次的loss结果,由结果可以看出每次训练后loss值在逐渐减小。 -输出结果如下: +输出如下: ``` +++++++++result:0++++++++++++ 2.302585 diff --git a/api/source_zh_cn/programming_guide/operator.md b/api/source_zh_cn/programming_guide/operator.md index a62e2250259863469be22349a058727d7b8e12dc..9f6db01e8810b39b9aef08f11183ea4f9ef4e533 100644 --- a/api/source_zh_cn/programming_guide/operator.md +++ b/api/source_zh_cn/programming_guide/operator.md @@ -1,210 +1,227 @@ -# 算子组件 - -算子组件指常用的算子及其操作,按功能大致可分为张量操作,网络操作,数组操作,图像操作,编码操作,调试操作,量化操作等七个模块。所有的算子在Ascend芯片或者CPU, GPU的支持情况,参见[这里](https://www.mindspore.cn/docs/zh-CN/master/operator_list.html "list") - - -这七类算子操作的相互关系见下: +# 算子 -- [算子组件](#算子组件) - - [张量操作](#张量操作) - - [标量运算](#标量运算) - - [加法](#加法) - - [Element-wise 除法](#element-wise-除法) - - [Element-wise 乘](#element-wise-乘) - - [三角函数](#求三角函数) - - [向量运算](#向量运算) - - [Concat](#concat-算子) - - [Squeeze](#squeeze) - - [Sparse2Dense](#求sparse2dense改变tensor维度使其变稠密) - - [ScalarCast](#scalarcast) - - [矩阵运算](#矩阵运算) - - [矩阵乘法](#矩阵乘法) - - [常见范数](#常见范数) - - [广播机制](#广播机制) - - [网络操作](#网络操作) - - [特征提取](#特征提取) - - [卷积操作](#卷积操作) - - [卷积的反向传播操作](#卷积的反向传播算子操作) - - [激活函数](#激活函数) - - [LossFunction](#lossfunction) - - [L1 Loss](#l1loss) - - [优化算法](#优化算法) - - [SGD](#sgd) - - [数组操作](#数组操作) - - [DType](#dtype) - - [Cast](#cast) - - [Shape](#shape) - - [图像操作](#图像操作) - - [编码运算](#编码运算) - - [BoundingBoxEncode](#boundingboxencode) - - [BoundingBoxDecode](#boundingboxdecode) - - [IOU](#iou-计算) - - [调试操作](#调试操作) - - [Debug](#debug) - - [HookBackward](#hookbackward) - - [量化操作](#量化操作) - - [MinMaxUpdatePerLayer](#minmaxupdateperlayer) +- [算子](#算子) + - [概述](#概述) + - [张量操作](#张量操作) + - [标量运算](#标量运算) + - [加法](#加法) + - [Element-wise乘法](#element-wise乘法) + - [求三角函数](#求三角函数) + - [向量运算](#向量运算) + - [Squeeze](#squeeze) + - [求Sparse2Dense](#求sparse2dense) + - [矩阵运算](#矩阵运算) + - [矩阵乘法](#矩阵乘法) + - [广播机制](#广播机制) + - [网络操作](#网络操作) + - [特征提取](#特征提取) + - [卷积操作](#卷积操作) + - [卷积的反向传播算子操作](#卷积的反向传播算子操作) + - [激活函数](#激活函数) + - [LossFunction](#lossfunction) + - [L1Loss](#l1loss) + - [优化算法](#优化算法) + - [SGD](#sgd) + - [数组操作](#数组操作) + - [DType](#dtype) + - [Cast](#cast) + - [Shape](#shape) + - [图像操作](#图像操作) + - [编码运算](#编码运算) + - [BoundingBoxEncode](#boundingboxencode) + - [BoundingBoxDecode](#boundingboxdecode) + - [IOU计算](#iou计算) + - [调试操作](#调试操作) + - [Debug](#debug) + - [HookBackward](#hookbackward) + + +## 概述 +算子组件包含了常用的算子及其操作,按功能大致可分为张量操作、网络操作、数组操作、图像操作、编码操作、调试操作和量化操作七个模块。所有的算子在Ascend AI处理器、GPU和CPU的支持情况,参见[算子支持列表](https://www.mindspore.cn/docs/zh-CN/master/operator_list.html)。 ## 张量操作 - -主要包括张量的结构操作和张量的数学运算。 -张量结构操作诸如:张量创建,索引切片,维度变换,合并分割。 -张量数学运算主要有:标量运算,向量运算,矩阵运算。另外我们会介绍张量运算的广播机制。 -本篇我们介绍张量的数学运算。 - - +张量操作包括张量的结构操作和张量的数学运算。 + +张量结构操作有:张量创建、索引切片、维度变换和合并分割。 + +张量数学运算有:标量运算、向量运算和矩阵运算。 + +这里以张量的数学运算和运算的广播机制为例,介绍使用方法。 ### 标量运算 -张量的数学运算符可以分为标量运算符、向量运算符、以及矩阵运算符。 -加减乘除乘方,以及三角函数,指数,对数等常见函数,逻辑比较运算符等都是标量运算符。 + +张量的数学运算符可以分为标量运算符、向量运算符以及矩阵运算符。 + +加减乘除乘方,以及三角函数、指数、对数等常见函数,逻辑比较运算符等都是标量运算符。 + 标量运算符的特点是对张量实施逐元素运算。 -有些标量运算符对常用的数学运算符进行了重载。并且支持类似numpy的广播特性。 -举例说明: +有些标量运算符对常用的数学运算符进行了重载。并且支持类似NumPy的广播特性。 + +以下代码实现了对input_x作乘方数为input_y的乘方操作: ```python import numpy as np -import mindspore # 导入mindspore包 -from mindspore import Tensor # 导入mindspore下的Tensor包 +import mindspore +from mindspore import Tensor import mindspore.ops.operations as P input_x = mindspore.Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) input_y = 3.0 -input_x**input_y +print(input_x**input_y) ``` -真实输入为: -```python -print(input_x) -[ 1. 8. 64.] +输出如下: ``` - -真实输出为: -```python -print(input_x**input_y) [ 1. 8. 64.] ``` #### 加法 -```python -input_x + input_y -[4.0 5.0 7.0] -``` -除普通加外,还有element-wise加法: +上述代码中`input_x`和`input_y`的相加实现方式如下: ```python -net = NetAddN() -input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) -input_y = Tensor(np.array([4, 5, 6]), mindspore.float32) -net(input_x, input_y, input_x, input_y)[10.0, 14.0, 18.0] +print(input_x + input_y) ``` -#### Element-wise 除法 -```python -input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) -input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) -div = P.Div() -div(input_x, input_y) +输出如下: ``` - -求FloorDiv: -```python -input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)) -input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) -floor_div = P.FloorDiv() -floor_div(input_x, input_y)[0, 1, -1] +[4.0 5.0 7.0] ``` -#### Element-wise 乘 +#### Element-wise乘法 + +以下代码实现了Element-wise乘法示例: ```python +import numpy as np +import mindspore +from mindspore import Tensor +import mindspore.ops.operations as P + input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) mul = P.Mul() -mul(input_x, input_y) +res = mul(input_x, input_y) + +print(res) ``` -真实输出: -```python -[4, 10, 18] +输出如下: +``` +[4. 10. 18] ``` -#### 求三角函数: +#### 求三角函数 + +以下代码实现了Acos: ```python +import numpy as np +import mindspore +from mindspore import Tensor +import mindspore.ops.operations as P + acos = P.ACos() input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) output = acos(input_x) +print(output) ``` +输出如下: +``` +[0.7377037, 1.5307858, 1.2661037,0.97641146] +``` ### 向量运算 -向量运算符只在一个特定轴上运算,将一个向量映射到一个标量或者另外一个向量。 -#### Concat 算子: -```python -data1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) -data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) -op = P.Concat() -output = op((data1, data2)) -``` +向量运算符只在一个特定轴上运算,将一个向量映射到一个标量或者另外一个向量。 #### Squeeze + +以下代码实现了压缩第3个通道维度为1的通道: ```python +import numpy as np +import mindspore +from mindspore import Tensor +import mindspore.ops.operations as P + input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) squeeze = P.Squeeze(2) output = squeeze(input_tensor) + +print(output) +``` + +输出如下: +``` +[[1. 1.] + [1. 1.] + [1. 1.]] ``` +#### 求Sparse2Dense -#### 求Sparse2Dense(改变tensor维度使其变稠密): +以下代码实现了对Sparse2Dense示例: ```python +import numpy as np +import mindspore as ms +from mindspore import Tensor +import mindspore.ops.operations as P + indices = Tensor([[0, 1], [1, 2]]) values = Tensor([1, 2], dtype=ms.float32) dense_shape = (3, 4) out = P.SparseToDense()(indices, values, dense_shape) + +print(out) ``` -#### ScalarCast: -```python -scalar_cast = P.ScalarCast() -output = scalar_cast(255.0, mindspore.int32) +输出如下: +``` +[[0, 1, 0, 0], + [0, 0, 2, 0], + [0, 0, 0, 0]] ``` ### 矩阵运算 -矩阵运算包括: 矩阵乘法,矩阵范数,矩阵行列式,矩阵求特征值,矩阵分解等运算。 -#### 矩阵乘法: +矩阵运算包括矩阵乘法、矩阵范数、矩阵行列式、矩阵求特征值、矩阵分解等运算。 + +#### 矩阵乘法 + +以下代码实现了input_x 和 input_y的矩阵乘法: ```python +import numpy as np +import mindspore +from mindspore import Tensor +import mindspore.ops.operations as P + input_x = Tensor(np.ones(shape=[1, 3]), mindspore.float32) input_y = Tensor(np.ones(shape=[3, 4]), mindspore.float32) matmul = P.MatMul() output = matmul(input_x, input_y) -``` -#### 常见范数: +print(output) +``` -```python -input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32) -scale = Tensor(np.ones([64]), mindspore.float32) -bias = Tensor(np.ones([64]), mindspore.float32) -mean = Tensor(np.ones([64]), mindspore.float32) -variance = Tensor(np.ones([64]), mindspore.float32) -batch_norm = P.BatchNorm() -output = batch_norm(input_x, scale, bias, mean, variance) +输出如下: +``` +[[3. 3. 3. 3.]] ``` #### 广播机制 -Broadcast 广播一个tensor到整个group -举例说明: +广播表示输入各变量channel数目不一致时,改变他们的channel数以得到结果。 + +以下代码实现了广播机制的示例: ```python from mindspore import Tensor from mindspore.communication import init import mindspore.nn as nn import mindspore.ops.operations as P -init() +import numpy as np + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -220,62 +237,137 @@ output = net(input_) ## 网络操作 - -网络操作包括特征提取, 激活函数, LossFunction, 优化算法等: +网络操作包括特征提取、激活函数、LossFunction、优化算法等。 ### 特征提取 +特征提取是机器学习中的常见操作,核心是提取比原输入更具代表性的Tensor。 + #### 卷积操作 -举例说明: + +以下代码实现了常见卷积操作之一的2D convolution 操作: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32) -weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)) +weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32) conv2d = P.Conv2D(out_channel=32, kernel_size=3) -conv2d(input, weight) +res = conv2d(input, weight) + +print(res) +``` +输出如下: ``` +[[[[288. 288. 288. ... 288. 288. 288.] + [288. 288. 288. ... 288. 288. 288.] + [288. 288. 288. ... 288. 288. 288.] + ... + [288. 288. 288. ... 288. 288. 288.] + [288. 288. 288. ... 288. 288. 288.] + [288. 288. 288. ... 288. 288. 288.]] + + ... + [288. 288. 288. ... 288. 288. 288.] + [288. 288. 288. ... 288. 288. 288.] + [288. 288. 288. ... 288. 288. 288.]]]] +``` + +#### 卷积的反向传播算子操作 + +以下代码实现了反向梯度算子传播操作的具体代码,输出存于dout, weight: -#### 卷积的反向传播算子操作: -输出结果: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore +import mindspore.ops.functional as F + dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32) weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32) x = Tensor(np.ones([10, 32, 32, 32])) conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3) -conv2d_backprop_input(dout, weight, F.shape(x)) +res = conv2d_backprop_input(dout, weight, F.shape(x)) + +print(res) +``` +输出如下: +``` +[[[[ 32. 64. 96. ... 96. 64. 32.] + [ 64. 128. 192. ... 192. 128. 64.] + [ 96. 192. 288. ... 288. 192. 96.] + ... + [ 96. 192. 288. ... 288. 192. 96.] + [ 64. 128. 192. ... 192. 128. 64.] + [ 32. 64. 96. ... 96. 64. 32.]] + + [[ 32. 64. 96. ... 96. 64. 32.] + [ 64. 128. 192. ... 192. 128. 64.] + [ 96. 192. 288. ... 288. 192. 96.] + ... + [ 96. 192. 288. ... 288. 192. 96.] + [ 64. 128. 192. ... 192. 128. 64.] + [ 32. 64. 96. ... 96. 64. 32.]]]] ``` ### 激活函数 -举例说明: + +以下代码实现Softmax激活函数计算: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) softmax = P.Softmax() -softmax(input_x) +res = softmax(input_x) + +print(res) ``` -输出结果: -```python -[0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086] +输出如下: +``` +[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086] ``` ### LossFunction -#### L1Loss: -举例说明: +#### L1Loss + +以下代码实现了L1 loss function: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + loss = P.SmoothL1Loss() input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) -loss(input_data, target_data) +res = loss(input_data, target_data) +print(res) ``` -输出结果: -```python -[0, 0, 0.5] +输出如下: +``` +[0. 0. 0.5] ``` ### 优化算法 -#### SGD: + +#### SGD + +以下代码实现了SGD梯度下降算法的具体实现,输出是result: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + sgd = P.SGD() parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32) gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32) @@ -284,55 +376,103 @@ accum = Tensor(np.array([0.1, 0.3, -0.2, -0.1]), mindspore.float32) momentum = Tensor(0.1, mindspore.float32) stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32) result = sgd(parameters, gradient, learning_rate, accum, momentum, stat) + +print(result) ``` -## 数组操作 +输出如下: +``` +[0. 0. 0. 0.] +``` - +## 数组操作 数组操作指操作对象是一些数组的操作。 -### DType -返回跟输入的数据类型一致的并且适配Mindspore的tensor变量, 常用于Mindspore 工程内。 -举例说明: +### DType + +返回跟输入的数据类型一致的并且适配Mindspore的Tensor变量,常用于Mindspore工程内。 + ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) -type = P.DType()(input_tensor) +typea = P.DType()(input_tensor) + +print(typea) +``` + +输出如下: +``` +Float32 ``` ### Cast -转换输入的数据类型并且输出与目标数据类型相同的变量 -举例说明: + +转换输入的数据类型并且输出与目标数据类型相同的变量。 + ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) input_x = Tensor(input_np) type_dst = mindspore.float16 cast = P.Cast() result = cast(input_x, type_dst) +print(type(result)) +``` + +输出结果: +``` + ``` -### Shape -返回输入数据的形状 -举例说明: +### Shape + +返回输入数据的形状。 + +以下代码实现了返回输入数据input_tensor的操作: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) shape = P.Shape() output = shape(input_tensor) +print(output) +``` + +输出如下: +``` +[3, 2, 1] ``` ## 图像操作 - -图像操作包括图像预处理操作, 如图像剪切(Crop,便于得到大量训练样本)和大小变化(Reise,用于构建图像金子塔等): +图像操作包括图像预处理操作,如图像剪切(Crop,便于得到大量训练样本)和大小变化(Reise,用于构建图像金子塔等)。 -举例说明: +以下代码实现了Crop和Resize操作: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore.common.dtype as mstype +from mindspore.ops import composite as C + class CropAndResizeNet(nn.Cell): def __init__(self, crop_size): super(CropAndResizeNet, self).__init__() self.crop_and_resize = P.CropAndResize() self.crop_size = crop_size - @ms_function + def construct(self, x, boxes, box_index): return self.crop_and_resize(x, boxes, box_index, self.crop_size) @@ -350,79 +490,146 @@ output = crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_index)) print(output.asnumpy()) ``` +输出如下: +``` +[[[[ 6.51672244e-01 -1.85958534e-01 5.19907832e-01] +[ 1.53466597e-01 4.10562098e-01 6.26138210e-01] +[ 6.62892580e-01 3.81776541e-01 4.69261825e-01] +... +[-5.83377600e-01 -3.53377648e-02 -6.01786733e-01] +[ 1.36125124e+00 5.84172308e-02 -6.41442612e-02] +[-9.11651254e-01 -1.19495761e+00 1.96810793e-02]] + +[[ 6.06956100e-03 -3.73778701e-01 1.88935513e-03] +[-1.06859171e+00 2.00272346e+00 1.37180305e+00] +[ 1.69524819e-01 2.90421434e-02 -4.12243098e-01] +... + +[[-2.04489112e-01 2.36615837e-01 1.33802962e+00] +[ 1.08329034e+00 -9.00492966e-01 -8.21497202e-01] +[ 7.54147097e-02 -3.72897685e-01 -2.91040149e-02] +... +[ 1.12317121e+00 8.98950577e-01 4.22795087e-01] +[ 5.13781667e-01 5.12095273e-01 -3.68211865e-01] +[-7.04941899e-02 -1.09924078e+00 6.89047515e-01]]]] +``` + ## 编码运算 - -编码运算包括 BoundingBox Encoding和 BoundingBox Decoding, IOU计算等。 +编码运算包括BoundingBox Encoding、BoundingBox Decoding、IOU计算等。 ### BoundingBoxEncode + 对物体所在区域方框进行编码,得到类似PCA的更精简信息,以便做后续类似特征提取,物体检测,图像恢复等任务。 -举例说明: +以下代码实现了对anchor_box和groundtruth_box的boundingbox encode: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32) groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) -boundingbox_encode(anchor_box, groundtruth_box) +res = boundingbox_encode(anchor_box, groundtruth_box) +print(res) +``` + +输出如下: ``` -输出结果为: -```python [[5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01] [-1.0000000e+00 2.5000000e-01 0.0000000e+00 4.0551758e-01]] ``` -### BoundingBoxDecode +### BoundingBoxDecode + 编码器对区域位置信息解码之后,用此算子进行解码。 -举例说明: +以下代码实现了: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32) -deltas = Tensor([[3,1,2,2],[1,s2,1,4]],mindspore.float32) +deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), max_shape=(768, 1280), wh_ratio_clip=0.016) -boundingbox_decode(anchor_box, deltas) +res = boundingbox_decode(anchor_box, deltas) +print(res) +``` + +输出如下: ``` -输出结果: -```python [[4.1953125 0. 0. 5.1953125] [2.140625 0. 3.859375 60.59375]] ``` -### IOU 计算: -计算预测的物体所在方框和真实物体所在方框的交集区域与并集区域的占比大小。其常作为一种损失函数,用以优化模型。 +### IOU计算 -举例说明: +计算预测的物体所在方框和真实物体所在方框的交集区域与并集区域的占比大小,常作为一种损失函数,用以优化模型。 + +以下代码实现了计算两个变量anchor_boxes和gt_boxes之间的IOU,以out输出: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore + iou = P.IOU() anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) +out = iou(anchor_boxes, gt_boxes) +print(out) +``` + +输出如下: +``` +[[0. -0. 0.] + [0. -0. 0.] + [0. 0. 0.]] ``` ## 调试操作 -调试操作指的是用于调试网络的一些常用算子及其操作, 例如Debug等 + +调试操作指的是用于调试网络的一些常用算子及其操作,例如Debug等, 此操作非常方便,对入门深度学习重要,极大提高学习者的学习体验。 ### Debug -输出tensor变量的数值, 方便用户随时随地打印想了解或者debug必需的某变量数值。 -参考示例: +输出Tensor变量的数值,方便用户随时随地打印想了解或者debug必需的某变量数值。 + +以下代码实现了输出x这一变量的值: ```python +from mindspore import nn + class DebugNN(nn.Cell): def __init__(self,): self.debug = nn.Debug() def construct(self, x, y): + self.debug() x = self.add(x, y) self.debug(x) return x ``` ### HookBackward -打印中间变量的梯度,这一算子特别常用,遂举例在此,虽目前仅支持Pynative 形式 -参考示例: + +打印中间变量的梯度,是比较常用的算子,目前仅支持Pynative模式。 + +以下代码实现了打印中间变量(例中x,y)的梯度: ```python +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore.common.dtype as mstype +from mindspore.ops import composite as C + def hook_fn(grad_out): print(grad_out) -grad_all = GradOperation(get_all=True) +grad_all = C.GradOperation(get_all=True) hook = P.HookBackward(hook_fn) def hook_test(x, y): @@ -432,23 +639,11 @@ def hook_test(x, y): return z def backward(x, y): - return grad_all(hook_test)(x, y) + return grad_all(hook_test)(Tensor(x, mstype.float32), Tensor(y, mstype.float32)) backward(1, 2) ``` - -## 量化操作 - - -量化操作指对tensor做量化或者反量化操作。 量化操作指将浮点数用整数的加和表示,利用整数加和并行加速时速度快的优点, 实 -现在可接受精度损失下的性能提升。反量化指其反过程,其在精度要求高的地方常被用到。 - -### MinMaxUpdatePerLayer -完成在训练时的量化和反量化操作 -举例说明: -```python -input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32) -min_tensor = Tensor(np.array([-6]), mstype.float32) -max_tensor = Tensor(np.array([6]), mstype.float32) -output_tensor = FakeQuantPerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor) +输出如下: ``` +(Tensor(shape=[], dtype=Float32, value=2),) +``` \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/ops.md b/api/source_zh_cn/programming_guide/ops.md index 53bb69f5dfd011be5b4ed47d1f440da45df482a6..c459e7b716272a2249c09c76e5a6a3812e5f92b9 100644 --- a/api/source_zh_cn/programming_guide/ops.md +++ b/api/source_zh_cn/programming_guide/ops.md @@ -123,4 +123,4 @@ tensor [[2.4, 4.2] scalar 3 ``` -此外,高阶函数`GradOperation`提供了根据输入的函数,求这个函数对应的梯度函数的方式,详细可以参阅[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.composite.html#mindspore.ops.composite.GradOperation)。 \ No newline at end of file +此外,高阶函数`GradOperation`提供了根据输入的函数,求这个函数对应的梯度函数的方式,详细可以参阅[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GradOperation)。 \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/optim.md b/api/source_zh_cn/programming_guide/optim.md index 7a8ebb7096ae41b44fce8fcf3b4796033658431b..e8a58e7b6e77f6baa7ea5a531af5805fe8080f15 100644 --- a/api/source_zh_cn/programming_guide/optim.md +++ b/api/source_zh_cn/programming_guide/optim.md @@ -1,15 +1,15 @@ -# optim模块 +# 优化算法 -- [优化器](#优化器) - - [概述](#概述) - - [学习率](#学习率) - - [dynamic_lr](#dynamic_lr) - - [learning_rate_schedule](#learning_rate_schedule) - - [optimzer](#optimzer) - - [如何使用](#如何使用) - - [内置优化器](#内置优化器) +- [优化算法](#优化算法) + - [概述](#概述) + - [学习率](#学习率) + - [dynamic_lr](#dynamic_lr) + - [learning_rate_schedule](#learning_rate_schedule) + - [Optimzer](#optimzer) + - [如何使用](#如何使用) + - [内置优化器](#内置优化器) @@ -17,36 +17,41 @@ ## 概述 -mindSpore.nn.optim是Mindspore框架中实现各种优化算法的模块,包含常用的优化器,学习率等,并且接口具备足够的通用性,可以将以后更新、更复杂的方法集成到模块里。 +`mindspore.nn.optim`是MindSpore框架中实现各种优化算法的模块,包含常用的优化器、学习率等,并且接口具备足够的通用性,可以将以后更新、更复杂的方法集成到模块里。 -mindspore.nn.optim为模型提供常用的优化器,如SGD、ADAM、Momentum。优化器用于计算和更新梯度,模型优化算法的选择直接关系到最终模型的性能,如果有时候效果不好,未必是特征或者模型设计的问题,很有可能是优化算法的问题; -同时还有mindspore.nn提供的学习率的模块,学习率learing_rate分为dynamic_lr和learning_rate_schedule,都是动态学习率,但是实现方式不同,学习率最为监督学习以及深度学习中重要的参数,其决定着目标函数是否能收敛到局部最小值以及何时能收敛到最小值。 -合适的学习率能够使目标函数在合适的的时间内收敛到局部最小值。 +`mindspore.nn.optim`为模型提供常用的优化器,如`SGD`、`ADAM`、`Momentum`。优化器用于计算和更新梯度,模型优化算法的选择直接关系到最终模型的性能,如果有时候效果不好,未必是特征或者模型设计的问题,很有可能是优化算法的问题;同时还有`mindspore.nn`提供的学习率的模块,学习率分为`dynamic_lr`和`learning_rate_schedule`,都是动态学习率,但是实现方式不同,学习率最为监督学习以及深度学习中重要的参数,其决定着目标函数是否能收敛到局部最小值以及何时能收敛到最小值。合适的学习率能够使目标函数在合适的的时间内收敛到局部最小值。 > 本文档中的所有示例,支持CPU,GPU,Ascend环境。 ## 学习率 ### dynamic_lr -mindspore.nn.dynamic_lr模块有以下几个类,piecewise_constant_lr类是得到分段不变的学习速率,exponential_decay_lr类是基于指数衰减函数计算学习率,natural_exp_decay_lr类是基于自然指数衰减函数计算学习率,inverse_decay_lr类是基于反时间衰减函数计算学习速率,cosine_decay_lr类是基于余弦衰减函数计算学习率,polynomial_decay_lr类是基于多项式衰减函数计算学习率,warmup_lr类是提高学习率,它们是属于dynamic_lr的不同实现方式。 +`mindspore.nn.dynamic_lr`模块有以下几个类: -例如piecewise_constant_lr类代码样例如下: +- `piecewise_constant_lr`类:基于得到分段不变的学习速率。 +- `exponential_decay_lr`类:基于指数衰减函数计算学习率。 +- `natural_exp_decay_lr`类:基于自然指数衰减函数计算学习率。 +- `inverse_decay_lr`类:基于反时间衰减函数计算学习速率。 +- `cosine_decay_lr`类:基于余弦衰减函数计算学习率。 +- `polynomial_decay_lr`类:基于多项式衰减函数计算学习率。 +- `warmup_lr`类:提高学习率。 -``` -class mindspore.nn.dynamic_lr.piecewise_constant_lr(milestone, learning_rates) +它们是属于`dynamic_lr`的不同实现方式。 -Parameters: - milestone (Union[list[int], tuple[int]]) – A list of milestone. This list is a monotone increasing list. Every element is a milestone step, and must be greater than 0. - learning_rates (Union[list[float], tuple[float]]) – A list of learning rates. +例如`piecewise_constant_lr`类代码样例如下: -Returns: - list[float]. The size of list ``` +from mindspore.nn.dynamic_lr import piecewise_constant_lr -``` -milestone = [2, 5, 10] -learning_rates = [0.1, 0.05, 0.01] -piecewise_constant_lr(milestone, learning_rates) +def test_dynamic_lr(): + milestone = [2, 5, 10] + learning_rates = [0.1, 0.05, 0.01] + lr = piecewise_constant_lr(milestone, learning_rates) + print(lr) + + +if __name__ == '__main__': + test_dynamic_lr() ``` 返回结果如下: @@ -56,49 +61,50 @@ piecewise_constant_lr(milestone, learning_rates) ### learning_rate_schedule -mindspore.nn.learning_rate_schedule模块下有以下几个类。ExponentialDecayLR类,NaturalExpDecayLR类,InverseDecayLR类,CosineDecayLR类,PolynomialDecayLR类,WarmUpLR类。它们都属于learning_rate_schedule,只是实现方式不同。 +`mindspore.nn.learning_rate_schedule`模块下有以下几个类:`ExponentialDecayLR`类、`NaturalExpDecayLR`类、`InverseDecayLR`类、`CosineDecayLR`类、`PolynomialDecayLR`类和`WarmUpLR`类。它们都属于`learning_rate_schedule`,只是实现方式不同,各自含义如下: -ExponentialDecayLR类是基于指数衰减函数计算学习率,NaturalExpDecayLR类是基于自然指数衰减函数巨酸学习率,InverseDecayLR类是基于反时间衰减函数计算学习速率,CosineDecayLR类是基于余弦衰减函数计算学习率,PolynomialDecayLR类是基于多项式衰减函数计算学习率,WarmUpLR类是提高学习率,它们是属于learning_rate_schedule的不同实现方式。 +- `ExponentialDecayLR`类:基于指数衰减函数计算学习率。 +- `NaturalExpDecayLR`类:基于自然指数衰减函数计算学习率。 +- `InverseDecayLR`类:基于反时间衰减函数计算学习速率。 +- `CosineDecayLR`类:基于余弦衰减函数计算学习率。 +- `PolynomialDecayLR`类:基于多项式衰减函数计算学习率。 +- `WarmUpLR`类:提高学习率。 + +它们是属于`learning_rate_schedule`的不同实现方式。 例如ExponentialDecayLR类代码样例如下: ``` -class ExponentialDecayLR(learning_rate, decay_rate, decay_steps, is_stair=False) +from mindspore.common import dtype as mstype +from mindspore import Tensor +from mindspore.nn.learning_rate_schedule import ExponentialDecayLR -Parameters: - learning_rate(float) - The initial value of learning rate. - decay_rate(float) - The decay rate. - decay_steps(int) - A value used to calculate decayed learning rate. - is_stair(bool) - if true,learning rate decay once every decay_steps times. Default: False. +def test_learning_rate_schedule(): + learning_rate = 0.1 # learning_rate(float) - The initial value of learning rate. + decay_rate = 0.9 # decay_rate(float) - The decay rate. + decay_steps = 4 # decay_steps(int) - A value used to calculate decayed learning rate. + global_step = Tensor(2, mstype.int32) + exponential_decay_lr = ExponentialDecayLR(learning_rate, decay_rate, decay_steps) + res = exponential_decay_lr(global_step) + print(res) -inputs: - Tensor.The current step number. -Returns: - Tensor. The learning rate value for the current step. +if __name__ == '__main__': + test_learning_rate_schedule() ``` +返回结果如下: ``` -from mindspore.common import dtype as mstype -from mindspore import Tensor - - -learning_rate = 0.1 # learning_rate(float) - The initial value of learning rate. -decay_rate = 0.9 # decay_rate(float) - The decay rate. -decay_steps = 4 # decay_steps(int) - A value used to calculate decayed learning rate. -global_step = Tensor(2, mystype.int32) -exponential_decay_lr = ExponentialDecayLR(learning_rate, decay_rate, decay_steps) -exponential_decay_lr(global_step) - +0.094868325 ``` -## optimzer +## Optimzer ### 如何使用 -为了使用mindspore.nn.optim,我们需要构建一个optimizer对象。这个对象能够保持当前参数状态并基于计算得到的梯度进行参数更新。 +为了使用`mindspore.nn.optim`,我们需要构建一个`Optimizer`对象。这个对象能够保持当前参数状态并基于计算得到的梯度进行参数更新。 - 构建 -为了构建一个Optimizer,我们需要给它一个包含可需要优化的参数(必须是Variable对象)的iterable。然后,你可以设置optimizer的参数选项,比如学习率,权重衰减等等。 +为了构建一个`Optimizer`,我们需要给它一个包含可需要优化的参数(必须是Variable对象)的iterable。然后,你可以设置Optimizer的参数选项,比如学习率,权重衰减等等。 代码样例如下: @@ -117,7 +123,7 @@ optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) 优化器也支持为没个参数单独设置选项。若想这么做,不要直接传入变量Variable,而是传入一个字典的iterable。每一个字典都分别定义了一组参数,并且包含一个key键,这个key键对应相应的参数value值。其他的key键应该是优化器所接受的其他参数,并且会被用于对这组参数的优化。 我们仍然能够传递选项作为关键字参数,在未重写这些选项的组中,它们会被用作默认值。当你只想改动一个参数组的选项,但其他参数组的选项不变时,这是非常有用的。 -例如,当我们想制定每一层的学习率时,以SGD为例: +例如,当我们想制定每一层的学习率时,以`SGD`为例: ``` from mindspore import nn @@ -132,22 +138,37 @@ optim = nn.SGD([{'params': conv_params, 'weight_decay': 0.01}, ### 内置优化器 -深度学习优化算法大概常用的有SGD、Adam、Ftrl、lazyadam、Momentum、RMSprop、Lars、Proximal_ada_grad和lamb这几种。 -在mindspore.nn.optim模块中,他们都有对应的类实现。例如: +深度学习优化算法大概常用的有`SGD`、`Adam`、`Ftrl`、`lazyadam`、`Momentum`、`RMSprop`、`Lars`、`Proximal_ada_grad`和`lamb`这几种。 +在`mindspore.nn.optim`模块中,他们都有对应的类实现。例如: -- SGD,默认参数为纯SGD,设置momentum参数不为0,考虑了一阶动量,设置nesterov为True后变成NAG,即Nesterov Accelerated Gradient,在计算梯度时计算的是向前走一步所在位置的梯度。 +- `SGD`,默认参数为纯SGD,设置`momentum`参数不为0,考虑了一阶动量,设置`nesterov`为True后变成`NAG`,即`Nesterov Accelerated Gradient`,在计算梯度时计算的是向前走一步所在位置的梯度。 -- RMSprop,考虑了二阶动量,对于不同的参数有不同的学习率,即自适应学习率,对Adagrad进行了优化,通过指数平滑只考虑一定窗口内的二阶动量。 +- `RMSprop`,考虑了二阶动量,对于不同的参数有不同的学习率,即自适应学习率,对`Adagrad`进行了优化,通过指数平滑只考虑一定窗口内的二阶动量。 -- Adam,同时考虑了一阶动量和二阶动量,可以看成RMSprop上进一步考虑了一阶动量。 +- `Adam`,同时考虑了一阶动量和二阶动量,可以看成`RMSprop`上进一步考虑了一阶动量。 -例如SGD的代码样例如下: +例如`SGD`的代码样例如下: ``` from mindspore import nn from mindspore.train import Model from .optimizer import Optimizer - +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +import mindspore.common.dtype as mstype +from mindpore.ops import composite as C +from mindspore.common.parameter import Parameter + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.matmul = P.MatMul() + self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z') + def construct(self, x, y): + x = x * self.z + out = self.matmul(x, y) + return out net = Net() optim = nn.SGD(params=net.trainable_params()) diff --git a/api/source_zh_cn/programming_guide/parameter.md b/api/source_zh_cn/programming_guide/parameter.md index 4e6bf70213746377f7f22d2107e006205143f2b4..e75b65546987bb534dc934cc47d6555cfaaf3a97 100644 --- a/api/source_zh_cn/programming_guide/parameter.md +++ b/api/source_zh_cn/programming_guide/parameter.md @@ -6,36 +6,36 @@ - [概述](#概述) - [初始化](#初始化) - [属性](#属性) - - [接口](#方法) + - [方法](#方法) - [ParameterTuple](#parametertuple) - - - + ## 概述 -Parameter是变量张量,代表在训练网络时,需要被更新的参数,是MetaTensor的一个子类。 +`Parameter`是变量张量,代表在训练网络时,需要被更新的参数。本章主要介绍了`Parameter`的初始化以及属性和方法的使用,同时介绍了`ParameterTuple`。 ## 初始化 ``` -def __init__(self, default_input, name, requires_grad=True, layerwise_parallel=False) +mindspore.Parameter(default_input, name, requires_grad=True, layerwise_parallel=False) ``` 初始化一个`Parameter`对象,传入的数据支持`Tensor`、`Initializer`、`int`和`float`四种类型。 -`Initializer`是初始化器,保存了shape和dtype信息,可调用`to_tensor`方法生成存有数据的Tensor。 +`Initializer`是初始化器,保存了shape和dtype信息,提供`to_tensor`方法生成存有数据的`Tensor`,可调用`initializer`接口生成`Initializer`对象。 -当网络采用半自动或者全自动并行策略,并且使用`Initializer`初始化`Parameter`时, -`Parameter`里保存的不是`Tensor`,而是`MetaTensor`。 +当网络采用半自动或者全自动并行策略,并且使用`Initializer`初始化`Parameter`时,`Parameter`里保存的不是`Tensor`,而是`MetaTensor`。 -`MetaTensor`与`Tensor`不同,`MetaTensor`仅保存张量的形状和类型,而不保存实际数据, -所以不会占用任何内存,可调用`init_data`接口将`Parameter`里保存的`MetaTensor`转化为`Tensor`。 +`MetaTensor`与`Tensor`不同,`MetaTensor`仅保存张量的形状和类型,而不保存实际数据,所以不会占用任何内存,可调用`init_data`接口将`Parameter`里保存的`MetaTensor`转化为`Tensor`。 可为每个`Parameter`指定一个名称,便于后续操作和更新。 -当`layerwise_parallel`为`True`时,参数广播和参数梯度聚合时会过滤掉该参数。 +当参数需要被更新时,需要将`requires_grad`设置为`True`。 + +当`layerwise_parallel`(混合并行)配置为True时,参数广播和参数梯度聚合时会过滤掉该参数。 + +有关分布式并行的相关配置,可以参考文档:。 下例通过三种不同的数据类型构造了`Parameter`,三个`Parameter`都需要更新,都不采用layerwise并行。如下: ``` @@ -64,18 +64,17 @@ Parameter (name=z, value=2.0) ``` ## 属性 + - `inited_param`:返回保存了实际数据的`Parameter`,如果`Parameter`原本保存的是`MetaTensor`,会将其转换为`Tensor`。 - `name`:实例化`Parameter`时,为其指定的名字。 -- `cast_type`:用于`PyNative`模式下的混合精度,如果设置了`cast_type`,会在训练前,将`Parameter`自动转换成我们设置的`cast_type`, - `cast_type`仅支持设置为`float32`、`float16`和`None`,设置为`None`,就不做转换。 - - `sliced`:用在自动并行场景下,表示`Parameter`里保存的数据是否是分片数据。 如果是,就不再对其进行切分,如果不是,需要根据网络并行策略确认是否对其进行切分。 -- `is_init`:`Parameter`的初始化状态。 +- `is_init`:`Parameter`的初始化状态。在GE后端,Parameter需要一个`init graph`来从主机同步数据到设备侧,该标志表示数据是否已同步到设备。 + 此标志仅在GE后端起作用,其他后端将被设置为False。 - `layerwise_parallel`:`Parameter`是否支持layerwise并行。如果支持,参数就不会进行广播和梯度聚合,反之则需要。 @@ -95,7 +94,6 @@ x = Parameter(default_input=Tensor(np.arange(2*3).reshape((2, 3))), name="x") print("name: ", x.name, "\n", "sliced: ", x.sliced, "\n", "is_init: ", x.is_init, "\n", - "cast_type: ", x.cast_type, "\n", "inited_param: ", x.inited_param, "\n", "requires_grad: ", x.requires_grad, "\n", "layerwise_parallel: ", x.layerwise_parallel, "\n", @@ -108,7 +106,6 @@ print("name: ", x.name, "\n", name: x sliced: False is_init: False -cast_type: None inited_param: None requires_grad: True layerwise_parallel: False @@ -122,9 +119,9 @@ data: Parameter (name=x, value=[[0 1 2] 当初始化`Parameter`传入的数据是`Initializer`时,可调用该接口将`Parameter`保存的数据转换为`Tensor`。 - `set_data`:设置`Parameter`保存的数据,支持传入`Tensor`、`Initializer`、`int`和`float`进行设置, - 将slice_shape设置为True时,可改变`Parameter`的shape,反之,设置的数据shape必须与`Parameter`原来的shape保持一致。 + 将方法的入参`slice_shape`设置为True时,可改变`Parameter`的shape,反之,设置的数据shape必须与`Parameter`原来的shape保持一致。 -- `set_param_ps`:控制训练参数是否通过[Parameter Server](https://gitee.com/mindspore/docs/blob/master/tutorials/source_zh_cn/advanced_use/parameter_server_training.md)进行训练。 +- `set_param_ps`:控制训练参数是否通过[Parameter Server](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/parameter_server_training.html)进行训练。 - `clone`:克隆`Parameter`,需要指定克隆之后的参数名称。 @@ -137,13 +134,13 @@ from mindspore import Tensor, Parameter from mindspore import dtype as mstype from mindspore.common.initializer import initializer -x = Parameter(data=initializer('ones', [1, 2, 3], mstype.float32), name='x') +x = Parameter(default_input=initializer('ones', [1, 2, 3], mstype.float32), name='x') print(x) print(x.clone(prefix="x_c")) print(x.init_data()) print(x.set_param_ps()) -print(x.set_parameter_data(data=Tensor(np.arange(2*3).reshape((1, 2, 3))))) +print(x.set_data(default_input=Tensor(np.arange(2*3).reshape((1, 2, 3))))) ``` 输出如下: @@ -171,9 +168,9 @@ from mindspore import Tensor, Parameter, ParameterTuple from mindspore.common import dtype as mstype from mindspore.common.initializer import initializer -x = Parameter(data=Tensor(np.arange(2*3).reshape((2, 3))), name="x") -y = Parameter(data=initializer('ones', [1, 2, 3], mstype.float32), name='y') -z = Parameter(data=2.0, name='z') +x = Parameter(default_input=Tensor(np.arange(2*3).reshape((2, 3))), name="x") +y = Parameter(default_input=initializer('ones', [1, 2, 3], mstype.float32), name='y') +z = Parameter(default_input=2.0, name='z') params = ParameterTuple((x, y, z)) params_copy = params.clone("params_copy") print(params, "\n") diff --git a/api/source_zh_cn/programming_guide/performance_optimization.md b/api/source_zh_cn/programming_guide/performance_optimization.md new file mode 100644 index 0000000000000000000000000000000000000000..c86cb40e45b3a81c4663ce54f92c3a961b820f23 --- /dev/null +++ b/api/source_zh_cn/programming_guide/performance_optimization.md @@ -0,0 +1,13 @@ +# 性能优化 + + + +MindSpore提供了多种性能优化方法,用户可根据实际情况,利用它们来提升训练和推理的性能。 + +| 优化阶段 | 优化方法 | 支持情况 | +| --- | --- | --- | +| 训练 | [分布式并行训练](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/distributed_training_tutorials.html) | Ascend、GPU | +| | [混合精度](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/mixed_precision.html) | Ascend、GPU | +| | [图算融合](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/graph_kernel_fusion.html) | Ascend | +| | [梯度累积](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/gradient_accumulation.html) | GPU | +| 推理 | [训练后量化](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/post_training_quantization.html) | Lite | \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/pipeline.md b/api/source_zh_cn/programming_guide/pipeline.md index bd8af3408a74be9594ed762832502cdcbf10cec9..47c821046b13fa2b76b99bfdb06ae00cbcaf858d 100644 --- a/api/source_zh_cn/programming_guide/pipeline.md +++ b/api/source_zh_cn/programming_guide/pipeline.md @@ -11,7 +11,6 @@ - [repeat](#repeat) - [zip](#zip) - [concat](#concat) - - [project](#project) @@ -45,37 +44,33 @@ MindSpore目前支持的常用数据处理算子如下表所示,更多数据 ![shuffle](./images/shuffle.png) -```python -# 将数据集进行混洗操作 +下面的样例先构建了一个随机数据集,然后对其进行混洗操作,最后展示了混洗后的数据结果。 +```python import numpy as np import mindspore.dataset as ds -# 设置全局随机种子,确保shuffle的行为可预测 ds.config.set_seed(0) -# 构建一个generator def generator_func(): for i in range(5): yield (np.array([i, i+1, i+2]),) -# 从generator中构建数据管道 dataset1 = ds.GeneratorDataset(generator_func, ["data"]) -# 为数据集创建一个混洗操作 -# buffer_size代表创建一个存放size个样本的容器,再从此容器中随机采样样本进行输出 -# 当buffer_size设置为dataset的长度时,是全局混洗 dataset1 = dataset1.shuffle(buffer_size=2) for data in dataset1.create_dict_iterator(): print(data) ``` +输出结果如下: + ``` -{'data': array([0, 1, 2], dtype=int64)} -{'data': array([2, 3, 4], dtype=int64)} -{'data': array([3, 4, 5], dtype=int64)} -{'data': array([1, 2, 3], dtype=int64)} -{'data': array([4, 5, 6], dtype=int64)} +{'data': Tensor(shape=[3], dtype=Int64, value=[0, 1, 2])} +{'data': Tensor(shape=[3], dtype=Int64, value=[2, 3, 4])} +{'data': Tensor(shape=[3], dtype=Int64, value=[3, 4, 5])} +{'data': Tensor(shape=[3], dtype=Int64, value=[1, 2, 3])} +{'data': Tensor(shape=[3], dtype=Int64, value=[4, 5, 6])} ``` ### map @@ -86,13 +81,12 @@ for data in dataset1.create_dict_iterator(): ![map](./images/map.png) -```python -# 将数据集进行映射操作 +下面的样例先构建了一个随机数据集,然后定义了数据翻倍的映射函数并将其作用于数据集,最后对比展示了映射前后的数据结果。 +```python import numpy as np import mindspore.dataset as ds -# 构建一个generator def generator_func(): for i in range(5): yield (np.array([i, i+1, i+2]),) @@ -100,36 +94,33 @@ def generator_func(): def pyfunc(x): return x*2 -# 从generator中构建数据管道 dataset = ds.GeneratorDataset(generator_func, ["data"]) -# 创建数据管道,输出原始数据 for data in dataset.create_dict_iterator(): print(data) -print("") +print("------ after processing ------") -# 为数据集创建一个映射操作 -# input_columns指定要处理的列,operation指定映射函数 -dataset = dataset.map(input_columns=["data"], operations=pyfunc) +dataset = dataset.map(operations=pyfunc, input_columns=["data"]) -# 创建数据管道,输出映射后的数据 for data in dataset.create_dict_iterator(): print(data) ``` +输出结果如下: + ``` -{'data': array([0, 1, 2], dtype=int64)} -{'data': array([1, 2, 3], dtype=int64)} -{'data': array([2, 3, 4], dtype=int64)} -{'data': array([3, 4, 5], dtype=int64)} -{'data': array([4, 5, 6], dtype=int64)} - -{'data': array([0, 2, 4], dtype=int64)} -{'data': array([2, 4, 6], dtype=int64)} -{'data': array([4, 6, 8], dtype=int64)} -{'data': array([ 6, 8, 10], dtype=int64)} -{'data': array([ 8, 10, 12], dtype=int64)} +{'data': Tensor(shape=[3], dtype=Int64, value=[0, 1, 2])} +{'data': Tensor(shape=[3], dtype=Int64, value=[1, 2, 3])} +{'data': Tensor(shape=[3], dtype=Int64, value=[2, 3, 4])} +{'data': Tensor(shape=[3], dtype=Int64, value=[3, 4, 5])} +{'data': Tensor(shape=[3], dtype=Int64, value=[4, 5, 6])} +------ after processing ------ +{'data': Tensor(shape=[3], dtype=Int64, value=[0, 2, 4])} +{'data': Tensor(shape=[3], dtype=Int64, value=[2, 4, 6])} +{'data': Tensor(shape=[3], dtype=Int64, value=[4, 6, 8])} +{'data': Tensor(shape=[3], dtype=Int64, value=[ 6, 8, 10])} +{'data': Tensor(shape=[3], dtype=Int64, value=[ 8, 10, 12])} ``` ### batch @@ -138,45 +129,40 @@ for data in dataset.create_dict_iterator(): ![batch](./images/batch.png) -```python -# 将数据集进行分批操作 +下面的样例先构建了一个随机数据集,然后分别展示了保留多余数据与否的数据集分批结果,其中批大小为2。 +```python import numpy as np import mindspore.dataset as ds -# 构建一个generator def generator_func(): for i in range(5): yield (np.array([i, i+1, i+2]),) -# 从generator中构建数据管道 dataset1 = ds.GeneratorDataset(generator_func, ["data"]) -# 为数据集划分批次,batch_size代表每2个样本为一个批次 -# drop_remainder代表是否丢弃最后不能完整构成批次的样本 -# 在此例子中,5%2=1,但因为drop_remainder=False,因此保留最后一个单独的样本 dataset1 = dataset1.batch(batch_size=2, drop_remainder=False) for data in dataset1.create_dict_iterator(): print(data) -print("") +print("------ drop remainder ------") -# 从generator中构建数据管道 dataset2 = ds.GeneratorDataset(generator_func, ["data"]) -# 丢弃最后不能完整构成批次的样本 dataset2 = dataset2.batch(batch_size=2, drop_remainder=True) for data in dataset2.create_dict_iterator(): print(data) ``` -``` -{'data': array([[0, 1, 2], [1, 2, 3]], dtype=int64)} -{'data': array([[2, 3, 4], [3, 4, 5]], dtype=int64)} -{'data': array([[4, 5, 6]], dtype=int64)} +输出结果如下: -{'data': array([[0, 1, 2], [1, 2, 3]], dtype=int64)} -{'data': array([[2, 3, 4], [3, 4, 5]], dtype=int64)} +``` +{'data': Tensor(shape=[2, 3], dtype=Int64, value=[[0, 1, 2], [1, 2, 3]])} +{'data': Tensor(shape=[2, 3], dtype=Int64, value=[[2, 3, 4], [3, 4, 5]])} +{'data': Tensor(shape=[1, 3], dtype=Int64, value=[[4, 5, 6]])} +------ drop remainder ------ +{'data': Tensor(shape=[2, 3], dtype=Int64, value=[[0, 1, 2], [1, 2, 3]])} +{'data': Tensor(shape=[2, 3], dtype=Int64, value=[[2, 3, 4], [3, 4, 5]])} ``` ### repeat @@ -187,38 +173,36 @@ for data in dataset2.create_dict_iterator(): ![repeat](./images/repeat.png) -```python -# 将数据集进行加倍操作 +下面的样例先构建了一个随机数据集,然后将其重复2次,最后展示了重复后的数据结果。 +```python import numpy as np import mindspore.dataset as ds -# 构建一个generator def generator_func(): for i in range(5): yield (np.array([i, i+1, i+2]),) -# 从generator中构建数据管道 dataset1 = ds.GeneratorDataset(generator_func, ["data"]) -# 为数据集创建一个加倍操作 -# count参数代表将数据集内容扩充为原来的count倍 dataset1 = dataset1.repeat(count=2) for data in dataset1.create_dict_iterator(): print(data) ``` +输出结果如下: + ``` -{'data': array([0, 1, 2], dtype=int64)} -{'data': array([1, 2, 3], dtype=int64)} -{'data': array([2, 3, 4], dtype=int64)} -{'data': array([3, 4, 5], dtype=int64)} -{'data': array([4, 5, 6], dtype=int64)} -{'data': array([0, 1, 2], dtype=int64)} -{'data': array([1, 2, 3], dtype=int64)} -{'data': array([2, 3, 4], dtype=int64)} -{'data': array([3, 4, 5], dtype=int64)} -{'data': array([4, 5, 6], dtype=int64)} +{'data': Tensor(shape=[3], dtype=Int64, value=[0, 1, 2])} +{'data': Tensor(shape=[3], dtype=Int64, value=[1, 2, 3])} +{'data': Tensor(shape=[3], dtype=Int64, value=[2, 3, 4])} +{'data': Tensor(shape=[3], dtype=Int64, value=[3, 4, 5])} +{'data': Tensor(shape=[3], dtype=Int64, value=[4, 5, 6])} +{'data': Tensor(shape=[3], dtype=Int64, value=[0, 1, 2])} +{'data': Tensor(shape=[3], dtype=Int64, value=[1, 2, 3])} +{'data': Tensor(shape=[3], dtype=Int64, value=[2, 3, 4])} +{'data': Tensor(shape=[3], dtype=Int64, value=[3, 4, 5])} +{'data': Tensor(shape=[3], dtype=Int64, value=[4, 5, 6])} ``` ### zip @@ -230,39 +214,36 @@ for data in dataset1.create_dict_iterator(): ![zip](./images/zip.png) -```python -# 将数据集进行合并操作 +下面的样例先构建了两个不同样本数的随机数据集,然后将其进行列拼接,最后展示了拼接后的数据结果。 +```python import numpy as np import mindspore.dataset as ds -# 构建一个generator def generator_func(): for i in range(7): yield (np.array([i, i+1, i+2]),) -# 构建另一个generator def generator_func2(): for i in range(4): yield (np.array([1, 2]),) -# 从generator中构建数据管道 dataset1 = ds.GeneratorDataset(generator_func, ["data1"]) dataset2 = ds.GeneratorDataset(generator_func2, ["data2"]) -# 为数据集创建一个合并操作 -# 新的dataset3会拥有2个列名,分别为data1,data2,同时因为data2的数据较少,会与data2的数据长度对齐 dataset3 = ds.zip((dataset1, dataset2)) for data in dataset3.create_dict_iterator(): print(data) ``` +输出结果如下: + ``` -{'data1': array([0, 1, 2], dtype=int64), 'data2': array([1, 2], dtype=int64)} -{'data1': array([1, 2, 3], dtype=int64), 'data2': array([1, 2], dtype=int64)} -{'data1': array([2, 3, 4], dtype=int64), 'data2': array([1, 2], dtype=int64)} -{'data1': array([3, 4, 5], dtype=int64), 'data2': array([1, 2], dtype=int64)} +{'data1': Tensor(shape=[3], dtype=Int64, value= [0, 1, 2]), 'data2': Tensor(shape=[2], dtype=Int64, value= [1, 2])} +{'data1': Tensor(shape=[3], dtype=Int64, value= [1, 2, 3]), 'data2': Tensor(shape=[2], dtype=Int64, value= [1, 2])} +{'data1': Tensor(shape=[3], dtype=Int64, value= [2, 3, 4]), 'data2': Tensor(shape=[2], dtype=Int64, value= [1, 2])} +{'data1': Tensor(shape=[3], dtype=Int64, value= [3, 4, 5]), 'data2': Tensor(shape=[2], dtype=Int64, value= [1, 2])} ``` ### concat @@ -273,84 +254,34 @@ for data in dataset3.create_dict_iterator(): ![concat](./images/concat.png) -```python -# 将数据集进行连接操作 +下面的样例先构建了两个随机数据集,然后将其进行行拼接,最后展示了拼接后的数据结果。值得一提的是,使用`+`运算符也能达到同样的效果。 +```python import numpy as np import mindspore.dataset as ds -# 构建一个generator def generator_func(): for i in range(2): yield (np.array([0, 0, 0]),) -# 构建另一个generator def generator_func2(): for i in range(2): yield (np.array([1, 2, 3]),) -# 从generator中构建数据管道 dataset1 = ds.GeneratorDataset(generator_func, ["data1"]) dataset2 = ds.GeneratorDataset(generator_func2, ["data1"]) -# 为数据集创建一个连接操作,将dataset2合并到dataset1的data1列中 dataset3 = dataset1.concat(dataset2) -# 值得一提的是,使用'+'运算符可以达到上面同样的效果 -# dataset3 = dataset1 + dataset2 - for data in dataset3.create_dict_iterator(): print(data) - -``` - -``` -{'data1': array([0, 0, 0], dtype=int64)} -{'data1': array([0, 0, 0], dtype=int64)} -{'data1': array([1, 2, 3], dtype=int64)} -{'data1': array([1, 2, 3], dtype=int64)} ``` -### project +输出结果如下: -对数据集列进行映射,将指定列按顺序保留并向下传递到数据管道中,其余列将被丢弃。 - ->`project`还可以用于改变column排列的顺序! - -![project](./images/project.png) - -```python -# 将数据集进行投影操作 - -import numpy as np -import mindspore.dataset as ds - -# 构建一个generator -def generator_func(): - for i in range(2): - yield (np.array([1, 2, 3]), np.array([7, 8, 9]), ) - -# 从generator中构建数据管道 -dataset = ds.GeneratorDataset(generator_func, ["data1", "data2"]) - -# 构建数据管道,获得原始数据 -for data in dataset.create_dict_iterator(): - print(data) - -print("") - -# 为数据集创建一个投影操作,只保留data1的数据 -dataset = dataset.project(columns=["data1"]) - -# 构建数据管道,获得投影后的数据 -for data in dataset.create_dict_iterator(): - print(data) ``` - -``` -{'data1': array([1, 2, 3], dtype=int64), 'data2': array([7, 8, 9], dtype=int64)} -{'data1': array([1, 2, 3], dtype=int64), 'data2': array([7, 8, 9], dtype=int64)} - -{'data1': array([1, 2, 3], dtype=int64)} -{'data1': array([1, 2, 3], dtype=int64)} +{'data1': Tensor(shape=[3], dtype=Int64, value= [0, 0, 0])} +{'data1': Tensor(shape=[3], dtype=Int64, value= [0, 0, 0])} +{'data1': Tensor(shape=[3], dtype=Int64, value= [1, 2, 3])} +{'data1': Tensor(shape=[3], dtype=Int64, value= [1, 2, 3])} ``` diff --git a/api/source_zh_cn/programming_guide/probability.md b/api/source_zh_cn/programming_guide/probability.md new file mode 100644 index 0000000000000000000000000000000000000000..f9362f115e98acbb449b46c656af1f6efaf5ff22 --- /dev/null +++ b/api/source_zh_cn/programming_guide/probability.md @@ -0,0 +1,965 @@ +# 深度概率编程库 + + + +- [深度概率编程库](#深度概率编程库) + - [概率分布](#概率分布) + - [概率分布类](#概率分布类) + - [Distribution基类](#distribution基类) + - [伯努利分布(Bernoulli)](#伯努利分布bernoulli) + - [指数分布(Exponential)](#指数分布exponential) + - [几何分布(Geometric)](#几何分布geometric) + - [正态分布(Normal)](#正态分布normal) + - [均匀分布(Uniform)](#均匀分布uniform) + - [概率分布类在PyNative模式下的应用](#概率分布类在pynative模式下的应用) + - [概率分布类在图模式下的应用](#概率分布类在图模式下的应用) + - [TransformedDistribution类接口设计](#transformeddistribution类接口设计) + - [PyNative模式下调用TransformedDistribution实例](#pynative模式下调用transformeddistribution实例) + - [图模式下调用TransformedDistribution实例](#图模式下调用transformeddistribution实例) + - [概率分布映射](#概率分布映射) + - [Bijector类接口设计](#bijector类接口设计) + - [Bijector基类](#bijector基类) + - [幂函数变换映射(PowerTransform)](#幂函数变换映射powertransform) + - [指数变换映射(Exp)](#指数变换映射exp) + - [标量仿射变换映射(ScalarAffine)](#标量仿射变换映射scalaraffine) + - [Softplus变换映射(Softplus)](#softplus变换映射softplus) + - [PyNative模式下调用Bijector实例](#pynative模式下调用bijector实例) + - [图模式下调用Bijector实例](#图模式下调用bijector实例) + - [深度概率网络](#深度概率网络) + - [VAE](#vae) + - [ConditionalVAE](#conditionalvae) + - [概率推断算法](#概率推断算法) + - [贝叶斯层](#贝叶斯层) + - [贝叶斯转换](#贝叶斯转换) + - [贝叶斯工具箱](#贝叶斯工具箱) + + + + + +MindSpore深度概率编程的目标是将深度学习和贝叶斯学习结合,包括概率分布、概率分布映射、深度概率网络、概率推断算法、贝叶斯层、贝叶斯转换和贝叶斯工具箱,面向不同的开发者。对于专业的贝叶斯学习用户,提供概率采样、推理算法和模型构建库;另一方面,为不熟悉贝叶斯深度学习的用户提供了高级的API,从而不用更改深度学习编程逻辑,即可利用贝叶斯模型。 + +## 概率分布 + +概率分布(`mindspore.nn.probability.distribution`)是概率编程的基础。`Distribution` 类提供多样的概率统计接口,例如概率密度函数 *pdf* 、累积密度函数 *cdf* 、散度计算 *kl_loss* 、抽样 *sample* 等。现有的概率分布实例包括高斯分布,伯努利分布,指数型分布,几何分布和均匀分布。 + +### 概率分布类 + +- `Distribution`:所有概率分布的基类。 + +- `Bernoulli`:伯努利分布。参数为试验成功的概率。 + +- `Exponential`: 指数型分布。参数为率参数。 + +- `Geometric`:几何分布。参数为一次伯努利试验成功的概率。 + +- `Normal`:正态(高斯)分布。参数为均值和标准差。 + +- `Uniform`:均匀分布。参数为数轴上的最小值和最大值。 + +#### Distribution基类 + +`Distribution` 是所有概率分布的基类。 + +接口介绍:`Distribution` 类支持的函数包括 `prob`、`log_prob`、`cdf`、`log_cdf`、`survival_function`、`log_survival`、`mean`、`sd`、`var`、`entropy`、`kl_loss`、`cross_entropy` 和 `sample` 。分布不同,所需传入的参数也不同。只有在派生类中才能使用,由派生类的函数实现决定参数。 + +- `prob` :概率密度函数(PDF)/ 概率质量函数(PMF)。 +- `log_prob` :对数似然函数。 +- `cdf` :累积分布函数(CDF)。 +- `log_cdf` :对数累积分布函数。 +- `survival_function` :生存函数。 +- `log_survival` :对数生存函数。 +- `mean` :均值。 +- `sd` :标准差。 +- `var` :方差。 +- `entropy` :熵。 +- `kl_loss` :Kullback-Leibler 散度。 +- `cross_entropy` :两个概率分布的交叉熵。 +- `sample` :概率分布的随机抽样。 + +#### 伯努利分布(Bernoulli) + +伯努利分布,继承自 `Distribution` 类。 + +属性: +- `Bernoulli.probs`:伯努利试验成功的概率。 + +`Distribution` 基类调用 `Bernoulli` 中私有接口以实现基类中的公有接口。`Bernoulli` 支持的公有接口为: + +- `mean`,`mode`,`var`:可选择传入 试验成功的概率 *probs1* 。 +- `entropy`:可选择传入 试验成功的概率 *probs1* 。 +- `cross_entropy`,`kl_loss`:必须传入 *dist* 和 *probs1_b* 。*dist* 为另一分布的类型,目前只支持此处为 *‘Bernoulli’* 。 *probs1_b* 为分布 *b* 的试验成功概率。可选择传入分布 *a* 的参数 *probs1_a* 。 +- `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入试验成功的概率 *probs* 。 +- `sample`:可选择传入样本形状 *shape* 和试验成功的概率 *probs1* 。 + +#### 指数分布(Exponential) + +指数分布,继承自 `Distribution` 类。 + +属性: +- `Exponential.rate`:率参数。 + +`Distribution` 基类调用 `Exponential` 私有接口以实现基类中的公有接口。`Exponential` 支持的公有接口为: + +- `mean`,`mode`,`var`:可选择传入率参数 *rate* 。 +- `entropy`:可选择传入率参数 *rate* 。 +- `cross_entropy`,`kl_loss`:必须传入 *dist* 和 *rate_b* 。 *dist* 为另一分布的类型的名称, 目前只支持此处为 *‘Exponential’* 。*rate_b* 为分布 *b* 的率参数。可选择传入分布 *a* 的参数 *rate_a* 。 +- `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入率参数 *rate* 。 +- `sample`:可选择传入样本形状 *shape* 和率参数 *rate* 。 + +#### 几何分布(Geometric) + +几何分布,继承自 `Distribution` 类。 + +属性: +- `Geometric.probs`:伯努利试验成功的概率。 + +`Distribution` 基类调用 `Geometric` 中私有接口以实现基类中的公有接口。`Geometric` 支持的公有接口为: + +- `mean`,`mode`,`var`:可选择传入 试验成功的概率 *probs1* 。 +- `entropy`:可选择传入 试验成功的概率 *probs1* 。 +- `cross_entropy`,`kl_loss`:必须传入 *dist* 和 *probs1_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Geometric’* 。 *probs1_b* 为分布 *b* 的试验成功概率。可选择传入分布 *a* 的参数 *probs1_a* 。 +- `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入试验成功的概率 *probs1* 。 +- `sample`:可选择传入样本形状 *shape* 和试验成功的概率 *probs1* 。 + +#### 正态分布(Normal) + +正态(高斯)分布,继承自 `Distribution` 类。 + +`Distribution` 基类调用 `Normal` 中私有接口以实现基类中的公有接口。`Normal` 支持的公有接口为: +- `mean`,`mode`,`var`:可选择传入分布的参数均值 *mean* 和标准差 *sd* 。 +- `entropy`:可选择传入分布的参数均值 *mean* 和标准差 *sd* 。 +- `cross_entropy`,`kl_loss`:必须传入 *dist* ,*mean_b* 和 *sd_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Normal’* 。*mean_b* 和 *sd_b* 为分布 *b* 的均值和标准差。可选择传入分布的参数 *a* 均值 *mean_a* 和标准差 *sd_a* 。 +- `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择分布的参数包括均值 *mean_a* 和标准差 *sd_a* 。 +- `sample`:可选择传入样本形状 *shape* 和分布的参数包括均值 *mean_a* 和标准差 *sd_a* 。 + +#### 均匀分布(Uniform) + +均匀分布,继承自 `Distribution` 类。 + +属性: +- `Uniform.low`:最小值。 +- `Uniform.high`:最大值。 + +`Distribution` 基类调用 `Uniform` 以实现基类中的公有接口。`Uniform` 支持的公有接口为: + +- `mean`,`mode`,`var`:可选择传入分布的参数最大值 *high* 和最小值 *low* 。 +- `entropy`:可选择传入分布的参数最大值 *high* 和最小值 *low* 。 +- `cross_entropy`,`kl_loss`:必须传入 *dist* ,*high_b* 和 *low_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Uniform’* 。 *high_b* 和 *low_b* 为分布 *b* 的参数。可选择传入分布 *a* 的参数即最大值 *high_a* 和最小值 *low_a* 。 +- `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入分布的参数最大值 *high* 和最小值 *low* 。 +- `sample`:可选择传入 *shape* 和分布的参数即最大值 *high* 和最小值 *low* 。 + +### 概率分布类在PyNative模式下的应用 + +`Distribution` 子类可在 **PyNative** 模式下使用。 + +导入相关模块: + +```python +from mindspore import Tensor +from mindspore import dtype as mstype +import mindspore.context as context +import mindspore.nn.probability.distribution as msd +context.set_context(mode=context.PYNATIVE_MODE) +``` +以 `Normal` 为例, 创建一个均值为0.0、标准差为1.0的正态分布: +```python +my_normal = msd.Normal(0.0, 1.0, dtype=mstype.float32) +``` +计算均值: +```python +mean = my_normal.mean() +print(mean) +``` +输出为: +```python +0.0 +``` +计算方差: +```python +var = my_normal.var() +print(var) +``` +输出为: +```python +1.0 +``` +计算熵: +```python +entropy = my_normal.entropy() +print(entropy) +``` +输出为: +```python +1.4189385 +``` +计算概率密度函数: +```python +value = Tensor([-0.5, 0.0, 0.5], dtype=mstype.float32) +prob = my_normal.prob(value) +print(prob) +``` +输出为: +```python +[0.35206532, 0.3989423, 0.35206532] +``` +计算累积分布函数: +```python +cdf = my_normal.cdf(value) +print(cdf) +``` +输出为: +```python +[0.30852754, 0.5, 0.69146246] +``` +计算 Kullback-Leibler 散度: +```python +mean_b = Tensor(1.0, dtype=mstype.float32) +sd_b = Tensor(2.0, dtype=mstype.float32) +kl = my_normal.kl_loss('Normal', mean_b, sd_b) +print(kl) +``` +输出为: +```python +0.44314718 +``` + +### 概率分布类在图模式下的应用 + +在图模式下,`Distribution` 子类可用在网络中。 + +导入相关模块: +```python +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import dtype as mstype +import mindspore.context as context +import mindspore.nn.probability.distribution as msd +context.set_context(mode=context.GRAPH_MODE) +``` +创建网络: +```python +# 网络继承nn.Cell +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.normal = msd.Normal(0.0, 1.0, dtype=mstype.float32) + + def construct(self, value, mean, sd): + pdf = self.normal.prob(value) + kl = self.normal.kl_loss("Normal", mean, sd) + return pdf, kl +``` +调用网络: +```python +net = Net() +value = Tensor([-0.5, 0.0, 0.5], dtype=mstype.float32) +mean = Tensor(1.0, dtype=mstype.float32) +sd = Tensor(1.0, dtype=mstype.float32) +pdf, kl = net(value, mean, sd) +print("pdf: ", pdf) +print("kl: ", kl) +``` +输出为: +```python +pdf: [0.3520653, 0.39894226, 0.3520653] +kl: 0.5 +``` + +### TransformedDistribution类接口设计 + +`TransformedDistribution` 继承自 `Distribution` ,是可通过映射f(x)变化得到的数学分布的基类。其接口包括: + +1. 类特征函数 + + - `bijector`:无参函数,返回分布的变换方法。 + - `distribution`:无参函数,返回原始分布。 + - `is_linear_transformation`:无参函数,返回线性变换标志。 + +2. 接口函数(以下接口函数的参数与构造函数中 `distribution` 的对应接口的参数相同)。 + + - `cdf`:累积分布函数(CDF)。 + - `log_cdf`:对数累积分布函数。 + - `survival_function`:生存函数。 + - `log_survival`:对数生存函数。 + - `prob`:概率密度函数(PDF)/ 概率质量函数(PMF)。 + - `log_prob`:对数似然函数。 + - `sample`:随机取样。 + - `mean`:无参数。只有当 `Bijector.is_constant_jacobian=true` 时可调用。 + +### PyNative模式下调用TransformedDistribution实例 + +`TransformedDistribution` 子类可在 **PyNative** 模式下使用。 +在执行之前,我们需要导入需要的库文件包。 + +导入相关模块: +```python +import numpy as np +import mindspore.nn as nn +import mindspore.nn.probability.bijector as msb +import mindspore.nn.probability.distribution as msd +import mindspore.context as context +from mindspore import Tensor +from mindspore import dtype +context.set_context(mode=context.PYNATIVE_MODE) +``` + +构造一个 `TransformedDistribution` 实例,使用 `Normal` 分布作为需要变换的分布类,使用 `Exp` 作为映射变换,可以生成 `LogNormal` 分布。 +```python +normal = msd.Normal(0.0, 1.0, dtype=dtype.float32) +exp = msb.Exp() +LogNormal = msd.TransformedDistribution(exp, normal, dtype=dtype.float32, seed=0, name="LogNormal") +print(LogNormal) +``` + +输出为: +```python +TransformedDistribution< + (_bijector): Exp + (_distribution): Normal + > +``` + +可以对 `LogNormal` 进行概率分布计算。例如: + +计算累积分布函数: +```python +x = np.array([2.0, 5.0, 10.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +cdf = LogNormal.cdf(tx) +print(cdf) +``` + +输出为: +```python +[7.55891383e-01, 9.46239710e-01, 9.89348888e-01] +``` + +计算对数累积分布函数: +```python +x = np.array([2.0, 5.0, 10.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +log_cdf = LogNormal.log_cdf(tx) +print(log_cdf) +``` + +输出为: +```python +[-2.79857576e-01, -5.52593507e-02, -1.07082408e-02] +``` + +计算生存函数: +```python +x = np.array([2.0, 5.0, 10.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +survival_function = LogNormal.survival_function(tx) +print(survival_function) +``` + +输出为: +```python +[2.44108617e-01, 5.37602901e-02, 1.06511116e-02] +``` + +计算对数生存函数: +```python +x = np.array([2.0, 5.0, 10.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +log_survival = LogNormal.log_survival(tx) +print(log_survival) +``` + +输出为: +```python +[-1.41014194e+00, -2.92322016e+00, -4.54209089e+00] +``` + +计算概率密度函数: +```python +x = np.array([2.0, 5.0, 10.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +prob = LogNormal.prob(tx) +print(prob) +``` + +输出为: +```python +[1.56874031e-01, 2.18507163e-02, 2.81590177e-03] +``` + +计算对数概率密度函数: +```python +x = np.array([2.0, 5.0, 10.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +log_prob = LogNormal.log_prob(tx) +print(log_prob) +``` + +输出为: +```python +[-1.85231221e+00, -3.82352161e+00, -5.87247276e+00] +``` + +调用取样函数 `sample` 抽样: +```python +shape = ((3, 2)) +sample = LogNormal.sample(shape) +print(sample) +``` + +输出为: +```python +[[7.64315844e-01, 3.01435232e-01], + [1.17166102e+00, 2.60277224e+00], + [7.02699006e-01, 3.91564220e-01]]) +``` + +当构造 `TransformedDistribution` 映射变换的 `is_constant_jacobian = true` 时(如 `ScalarAffine`),构造的 `TransformedDistribution` 实例可以使用直接使用 `mean` 接口计算均值,例如: +```python +normal = msd.Normal(0.0, 1.0, dtype=dtype.float32) +scalaraffine = msb.ScalarAffine(1.0, 2.0) +trans_dist = msd.TransformedDistribution(scalaraffine, normal, dtype=dtype.float32, seed=0) +mean = trans_dist.mean() +print(mean) +``` +输出为: +```python +2.0 +``` +### 图模式下调用TransformedDistribution实例 + +在图模式下,`TransformedDistribution` 类可用在网络中。 + +导入相关模块: +```python +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import dtype +import mindspore.context as context +import mindspore.nn.probability.Bijector as msb +import mindspore.nn.probability.Distribution as msd +context.set_context(mode=self.GRAPH_MODE) +``` + +创建网络: +```python +class Net(nn.Cell): + def __init__(self, shape, dtype=dtype.float32, seed=0, name='transformed_distribution'): + super(Net, self).__init__() + # 创建TransformedDistribution实例 + self.exp = msb.Exp() + self.normal = msd.Normal(0.0, 1.0, dtype=dtype) + self.lognormal = msd.TransformedDistribution(self.exp, self.normal, dtype=dtype, seed=seed, name=name) + self.shape = shape + + def construct(self, value): + cdf = self.lognormal.cdf(value) + sample = self.lognormal.sample(self.shape) + return cdf, sample +``` + +调用网络: +```python +shape = (2, 3) +net = Net(shape=shape, name="LogNormal") +x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32) +tx = Tensor(x, dtype=dtype.float32) +cdf, sample = net(tx) +print("cdf: ", cdf) +print("sample: ", sample) +``` +输出为: +```python +cdf: [0.7558914 0.8640314 0.9171715 0.9462397] +sample: [[0.21036398 0.44932044 0.5669641 ] + [1.4103683 6.724116 0.97894996]] +``` + +## 概率分布映射 + +Bijector(`mindspore.nn.probability.bijector`)是概率编程的基本组成部分。Bijector描述了一种随机变量的变换方法,可以通过一个已有的随机变量X和一个映射函数f生成一个新的随机变量$Y = f(x)$。 +`Bijector` 提供了映射相关的四种变换方法。它可以当做算子直接使用,也可以作用在某个随机变量 `Distribution` 类实例上生成新的随机变量的 `Distribution` 类实例。 + +### Bijector类接口设计 + +#### Bijector基类 + +`Bijector` 类是所有概率分布映射的基类。其接口包括: + +1. 类特征函数 + - `name`:无参函数,返回 `name` 的值。 + - `is_dtype`:无参函数,返回 `dtype` 的值。 + - `parameter`:无参函数,返回 `parameter` 的值。 + - `is_constant_jacobian`:无参函数,返回 `is_constant_jacobian` 的值。 + - `is_injective`:无参函数,返回 `is_injective` 的值。 + +2. 映射函数 + - `forward`:正向映射,创建派生类后由派生类的 `_forward` 决定参数。 + - `inverse`:反向映射,创建派生类后由派生类的 `_inverse` 决定参数。 + - `forward_log_jacobian`:正向映射的导数的对数,创建派生类后由派生类的 `_forward_log_jacobian` 决定参数。 + - `inverse_log_jacobian`:反向映射的导数的对数,创建派生类后由派生类的 `_inverse_log_jacobian` 决定参数。 + +* `Bijector` 作为函数调用: +输入是一个 `Distribution` 类:生成一个 `TransformedDistribution` **(不可在图内调用)**。 + +#### 幂函数变换映射(PowerTransform) +`PowerTransform` 做如下变量替换:$Y = g(X) = {(1 + X * c)}^{1 / c}$。其接口包括: + +1. 类特征函数 + - `power`:无参函数,返回 `power` 的值。 + +2. 映射函数 + - `forward`:正向映射,输入为 `Tensor` 。 + - `inverse`:反向映射,输入为 `Tensor` 。 + - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 + - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 + +#### 指数变换映射(Exp) +`Exp` 做如下变量替换:$Y = g(X)= exp(X)$。其接口包括: + +映射函数 +- `forward`:正向映射,输入为 `Tensor` 。 +- `inverse`:反向映射,输入为 `Tensor` 。 +- `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 +- `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 + +#### 标量仿射变换映射(ScalarAffine) +`ScalarAffine` 做如下变量替换:Y = g(X) = a * X + b。其接口包括: + +1. 类特征函数 + - `scale`:无参函数,返回scale的值。 + - `shift`:无参函数,返回shift的值。 + +2. 映射函数 + - `forward`:正向映射,输入为 `Tensor` 。 + - `inverse`:反向映射,输入为 `Tensor` 。 + - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 + - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 + +#### Softplus变换映射(Softplus) +`Softplus` 做如下变量替换:$Y = g(X) = log(1 + e ^ {kX}) / k $。其接口包括: + +1. 类特征函数 + - `sharpness`:无参函数,返回 `sharpness` 的值。 + +2. 映射函数 + - `forward`:正向映射,输入为 `Tensor` 。 + - `inverse`:反向映射,输入为 `Tensor` 。 + - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 + - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 + +### PyNative模式下调用Bijector实例 + +在执行之前,我们需要导入需要的库文件包。双射类最主要的库是 `mindspore.nn.probability.bijector`,导入后我们使用 `msb` 作为库的缩写并进行调用。 + +导入相关模块: +```python +import numpy as np +import mindspore.nn as nn +import mindspore.nn.probability.bijector as msb +import mindspore.context as context +from mindspore import Tensor +from mindspore import dtype +context.set_context(mode=context.PYNATIVE_MODE) +``` + +下面我们以 `PowerTransform` 为例。创建一个指数为2的 `PowerTransform` 对象。 + +构造 `PowerTransform`: +```python +powertransform = msb.PowerTransform(power=2) +print(powertransform) +``` + +输出: +```python +PowerTransform +``` + +接下来可以使用映射函数进行运算。 + +调用 `forward` 方法,计算正向映射: +```python +x = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) +tx = Tensor(x, dtype=dtype.float32) +forward = powertransform.forward(tx) +print(forward) +``` + +输出为: +```python +[2.23606801e+00, 2.64575124e+00, 3.00000000e+00, 3.31662488e+00] +``` + +输入 `inverse` 方法,计算反向映射: +```python +inverse = powertransform.inverse(tx) +print(inverse) +``` + +输出为: +```python +[1.50000000e+00, 4.00000048e+00, 7.50000000e+00, 1.20000010e+01] +``` + +输入 `forward_log_jacobian` 方法,计算正向映射导数的对数: +```python +forward_log_jaco = powertransform.forward_log_jacobian(tx) +print(forward_log_jaco) +``` + +输出: +```python +[-8.04718971e-01, -9.72955048e-01, -1.09861231e+00, -1.19894767e+00] +``` + +输入 `inverse_log_jacobian` 方法,计算反向映射导数的对数: +```python +inverse_log_jaco = powertransform.inverse_log_jacobian(tx) +print(inverse_log_jaco) +``` + +输出为: +```python +[6.93147182e-01 1.09861231e+00 1.38629436e+00 1.60943794e+00] +``` + +### 图模式下调用Bijector实例 + +在图模式下,`Bijector` 子类可用在网络中。 + +导入相关模块: +```python +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import dtype as mstype +import mindspore.context as context +import mindspore.nn.probability.Bijector as msb +context.set_context(mode=context.GRAPH_MODE) +``` + +创建网络: +```python +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + # 创建PowerTransform实例 + self.powertransform = msb.PowerTransform(power=2) + + def construct(self, value): + forward = self.s1.forward(value) + inverse = self.s1.inverse(value) + forward_log_jaco = self.s1.forward_log_jacobian(value) + inverse_log_jaco = self.s1.inverse_log_jacobian(value) + return forward, inverse, forward_log_jaco, inverse_log_jaco +``` +调用网络: +```python +net = Net() +x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32) +tx = Tensor(x, dtype=dtype.float32) +forward, inverse, forward_log_jaco, inverse_log_jaco = net(tx) +print("forward: ", forward) +print("inverse: ", inverse) +print("forward_log_jaco: ", forward_log_jaco) +print("inverse_log_jaco: ", inverse_log_jaco) +``` +输出为: +```python +forward: [2.236068 2.6457512 3. 3.3166249] +inverse: [ 1.5 4.0000005 7.5 12.000001 ] +forward_log_jaco: [-0.804719 -0.97295505 -1.0986123 -1.1989477 ] +inverse_log_jaco: [0.6931472 1.0986123 1.3862944 1.609438 ] +``` + +## 深度概率网络 + +使用MindSpore深度概率编程库(`mindspore.nn.probability.dpn`)来构造变分自编码器(VAE)进行推理尤为简单。我们只需要自定义编码器和解码器(DNN模型),调用VAE或CVAE接口形成其派生网络,然后调用ELBO接口进行优化,最后使用SVI接口进行变分推理。这样做的好处是,不熟悉变分推理的用户可以像构建DNN模型一样来构建概率模型,而熟悉的用户可以调用这些接口来构建更为复杂的概率模型。VAE的接口在`mindspore.nn.probability.dpn`下面,dpn代表的是Deep probabilistic network,这里提供了一些基本的深度概率网络的接口,例如VAE。 + +### VAE + +首先,我们需要先自定义encoder和decoder,调用`mindspore.nn.probability.dpn.VAE`接口来构建VAE网络,我们除了传入encoder和decoder之外,还需要传入encoder输出变量的维度hidden size,以及VAE网络存储潜在变量的维度latent size,一般latent size会小于hidden size。 + +```python +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindspore.nn.probability.dpn import VAE + +IMAGE_SHAPE = (-1, 1, 32, 32) + + +class Encoder(nn.Cell): + def __init__(self): + super(Encoder, self).__init__() + self.fc1 = nn.Dense(1024, 800) + self.fc2 = nn.Dense(800, 400) + self.relu = nn.ReLU() + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.flatten(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + return x + + +class Decoder(nn.Cell): + def __init__(self): + super(Decoder, self).__init__() + self.fc1 = nn.Dense(400, 1024) + self.sigmoid = nn.Sigmoid() + self.reshape = P.Reshape() + + def construct(self, z): + z = self.fc1(z) + z = self.reshape(z, IMAGE_SHAPE) + z = self.sigmoid(z) + return z + + +encoder = Encoder() +decoder = Decoder() +vae = VAE(encoder, decoder, hidden_size=400, latent_size=20) +``` +### ConditionalVAE + +类似地,ConditionalVAE与VAE的使用方法比较相近,不同的是,ConditionalVAE利用了数据集的标签信息,属于有监督学习算法,其生成效果一般会比VAE好。 + +首先,先自定义encoder和decoder,并调用`mindspore.nn.probability.dpn.ConditionalVAE`接口来构建ConditionalVAE网络,这里的encoder和VAE的不同,因为需要传入数据集的标签信息;decoder和上述的一样。ConditionalVAE接口的传入则还需要传入数据集的标签类别个数,其余和VAE接口一样。 + +```python +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindspore.nn.probability.dpn import ConditionalVAE + +IMAGE_SHAPE = (-1, 1, 32, 32) + + +class Encoder(nn.Cell): + def __init__(self, num_classes): + super(Encoder, self).__init__() + self.fc1 = nn.Dense(1024 + num_classes, 400) + self.relu = nn.ReLU() + self.flatten = nn.Flatten() + self.concat = P.Concat(axis=1) + self.one_hot = nn.OneHot(depth=num_classes) + + def construct(self, x, y): + x = self.flatten(x) + y = self.one_hot(y) + input_x = self.concat((x, y)) + input_x = self.fc1(input_x) + input_x = self.relu(input_x) + return input_x + + +class Decoder(nn.Cell): + def __init__(self): + super(Decoder, self).__init__() + self.fc1 = nn.Dense(400, 1024) + self.sigmoid = nn.Sigmoid() + self.reshape = P.Reshape() + + def construct(self, z): + z = self.fc1(z) + z = self.reshape(z, IMAGE_SHAPE) + z = self.sigmoid(z) + return z + + +encoder = Encoder(num_classes=10) +decoder = Decoder() +cvae = ConditionalVAE(encoder, decoder, hidden_size=400, latent_size=20, num_classes=10) +``` + +加载数据集,我们可以使用Mnist数据集,具体的数据加载和预处理过程可以参考这里[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html),这里会用到create_dataset函数创建数据迭代器。 + +```python +ds_train = create_dataset(image_path, 128, 1) +``` +接下来,需要用到infer接口进行VAE网络的变分推断。 + +## 概率推断算法 + +调用ELBO接口(`mindspore.nn.probability.infer.ELBO`)来定义VAE网络的损失函数,调用`WithLossCell`封装VAE网络和损失函数,并定义优化器,之后传入SVI接口(`mindspore.nn.probability.infer.SVI`)。SVI的`run`函数可理解为VAE网络的训练,可以指定训练的`epochs`,返回结果为训练好的网络;`get_train_loss`函数可以返回训练好后模型的loss。 + +```python +from mindspore.nn.probability.infer import ELBO, SVI + +net_loss = ELBO(latent_prior='Normal', output_prior='Normal') +net_with_loss = nn.WithLossCell(vae, net_loss) +optimizer = nn.Adam(params=vae.trainable_params(), learning_rate=0.001) + +vi = SVI(net_with_loss=net_with_loss, optimizer=optimizer) +vae = vi.run(train_dataset=ds_train, epochs=10) +trained_loss = vi.get_train_loss() +``` +最后,得到训练好的VAE网络后,我们可以使用`vae.generate_sample`生成新样本,需要传入待生成样本的个数,及生成样本的shape,shape需要保持和原数据集中的样本shape一样;当然,我们也可以使用`vae.reconstruct_sample`重构原来数据集中的样本,来测试VAE网络的重建能力。 +```python +generated_sample = vae.generate_sample(64, IMAGE_SHAPE) +for sample in ds_train.create_dict_iterator(): + sample_x = Tensor(sample['image'], dtype=mstype.float32) + reconstructed_sample = vae.reconstruct_sample(sample_x) +print('The shape of the generated sample is ', generated_sample.shape) +``` +我们可以看一下新生成样本的shape: +```python +The shape of the generated sample is (64, 1, 32, 32) +``` +ConditionalVAE训练过程和VAE的过程类似,但需要注意的是使用训练好的ConditionalVAE网络生成新样本和重建新样本时,需要输入标签信息,例如下面生成的新样本就是64个0-7的数字。 + +```python +sample_label = Tensor([i for i in range(0, 8)] * 8, dtype=mstype.int32) +generated_sample = cvae.generate_sample(sample_label, 64, IMAGE_SHAPE) +for sample in ds_train.create_dict_iterator(): + sample_x = Tensor(sample['image'], dtype=mstype.float32) + sample_y = Tensor(sample['label'], dtype=mstype.int32) + reconstructed_sample = cvae.reconstruct_sample(sample_x, sample_y) +print('The shape of the generated sample is ', generated_sample.shape) +``` +查看一下新生成的样本的shape: +```python +The shape of the generated sample is (64, 1, 32, 32) +``` + +如果希望新生成的样本更好,更清晰,用户可以自己定义更复杂的encoder和decoder,这里的示例只用了两层全连接层,仅供示例的指导。 + +## 贝叶斯层 + +下面的范例使用MindSpore的`nn.probability.bnn_layers`中的API实现BNN图片分类模型。MindSpore的`nn.probability.bnn_layers`中的API包括`NormalPrior`,`NormalPosterior`,`ConvReparam`,`DenseReparam`和`WithBNNLossCell`。BNN与DNN的最大区别在于,BNN层的weight和bias不再是确定的值,而是服从一个分布。其中,`NormalPrior`,`NormalPosterior`分别用来生成服从正态分布的先验分布和后验分布;`ConvReparam`和`DenseReparam`分别是使用reparameteration方法实现的贝叶斯卷积层和全连接层;`WithBNNLossCell`是用来封装BNN和损失函数的。 + +如何使用`nn.probability.bnn_layers`中的API构建贝叶斯神经网络并实现图片分类,可以参考教程[使用贝叶斯网络](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/deep_probability_program.html#id3)。 + +## 贝叶斯转换 + +对于不熟悉贝叶斯模型的研究人员,MDP提供了贝叶斯转换接口(`mindspore.nn.probability.transform`),支持DNN (Deep Neural Network)模型一键转换成BNN (Bayesian Neural Network)模型。 + +其中的模型转换API`TransformToBNN`的`__init__`函数定义如下: + +```python +class TransformToBNN: + def __init__(self, trainable_dnn, dnn_factor=1, bnn_factor=1): + net_with_loss = trainable_dnn.network + self.optimizer = trainable_dnn.optimizer + self.backbone = net_with_loss.backbone_network + self.loss_fn = getattr(net_with_loss, "_loss_fn") + self.dnn_factor = dnn_factor + self.bnn_factor = bnn_factor + self.bnn_loss_file = None +``` +参数`trainable_bnn`是经过`TrainOneStepCell`包装的可训练DNN模型,`dnn_factor`和`bnn_factor`分别为由损失函数计算得到的网络整体损失的系数和每个贝叶斯层的KL散度的系数。 +API`TransformToBNN`主要实现了两个功能: +- 功能一:转换整个模型 + + `transform_to_bnn_model`方法可以将整个DNN模型转换为BNN模型。其定义如下: + + ```python + def transform_to_bnn_model(self, + get_dense_args=lambda dp: {"in_channels": dp.in_channels, "has_bias": dp.has_bias, + "out_channels": dp.out_channels, "activation": dp.activation}, + get_conv_args=lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, + "pad_mode": dp.pad_mode, "kernel_size": dp.kernel_size, + "stride": dp.stride, "has_bias": dp.has_bias, + "padding": dp.padding, "dilation": dp.dilation, + "group": dp.group}, + add_dense_args=None, + add_conv_args=None): + r""" + Transform the whole DNN model to BNN model, and wrap BNN model by TrainOneStepCell. + + Args: + get_dense_args (function): The arguments gotten from the DNN full connection layer. Default: lambda dp: + {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "has_bias": dp.has_bias}. + get_conv_args (function): The arguments gotten from the DNN convolutional layer. Default: lambda dp: + {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "pad_mode": dp.pad_mode, + "kernel_size": dp.kernel_size, "stride": dp.stride, "has_bias": dp.has_bias}. + add_dense_args (dict): The new arguments added to BNN full connection layer. Default: {}. + add_conv_args (dict): The new arguments added to BNN convolutional layer. Default: {}. + + Returns: + Cell, a trainable BNN model wrapped by TrainOneStepCell. + """ + + ``` + 参数`get_dense_args`指定从DNN模型的全连接层中获取哪些参数,默认值是DNN模型的全连接层和BNN的全连接层所共有的参数,参数具体的含义可以参考[API说明文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Dense);`get_conv_args`指定从DNN模型的卷积层中获取哪些参数,默认值是DNN模型的卷积层和BNN的卷积层所共有的参数,参数具体的含义可以参考[API说明文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2d);参数`add_dense_args`和`add_conv_args`分别指定了要为BNN层指定哪些新的参数值。需要注意的是,`add_dense_args`中的参数不能与`get_dense_args`重复,`add_conv_args`和`get_conv_args`也是如此。 + +- 功能二:转换指定类型的层 + + `transform_to_bnn_layer`方法可以将DNN模型中指定类型的层(`nn.Dense`或者`nn.Conv2d`)转换为对应的贝叶斯层。其定义如下: + + ```python + def transform_to_bnn_layer(self, dnn_layer, bnn_layer, get_args=None, add_args=None): + r""" + Transform a specific type of layers in DNN model to corresponding BNN layer. + + Args: + dnn_layer_type (Cell): The type of DNN layer to be transformed to BNN layer. The optional values are + nn.Dense, nn.Conv2d. + bnn_layer_type (Cell): The type of BNN layer to be transformed to. The optional values are + DenseReparameterization, ConvReparameterization. + get_args (dict): The arguments gotten from the DNN layer. Default: None. + add_args (dict): The new arguments added to BNN layer. Default: None. + + Returns: + Cell, a trainable model wrapped by TrainOneStepCell, whose sprcific type of layer is transformed to the corresponding bayesian layer. + """ + ``` + 参数`dnn_layer`指定将哪个类型的DNN层转换成BNN层,`bnn_layer`指定DNN层将转换成哪个类型的BNN层,`get_args`和`add_args`分别指定从DNN层中获取哪些参数和要为BNN层的哪些参数重新赋值。 + +如何在MindSpore中使用API`TransformToBNN`可以参考教程[DNN一键转换成BNN](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/deep_probability_program.html#dnnbnn) + +## 贝叶斯工具箱 + +贝叶斯神经网络的优势之一就是可以获取不确定性,MDP在上层提供了不确定性估计的工具箱(`mindspore.nn.probability.toolbox`),用户可以很方便地使用该工具箱计算不确定性。不确定性意味着深度学习模型对预测结果的不确定程度。目前,大多数深度学习算法只能给出高置信度的预测结果,而不能判断预测结果的确定性,不确定性主要有两种类型:偶然不确定性和认知不确定性。 + +- 偶然不确定性(Aleatoric Uncertainty):描述数据中的内在噪声,即无法避免的误差,这个现象不能通过增加采样数据来削弱。 +- 认知不确定性(Epistemic Uncertainty):模型自身对输入数据的估计可能因为训练不佳、训练数据不够等原因而不准确,可以通过增加训练数据等方式来缓解。 + +不确定性评估工具箱的接口如下: +- `model`:待评估不确定性的已训练好的模型。 +- `train_dataset`:用于训练的数据集,迭代器类型。 +- `task_type`:模型的类型,字符串,输入“regression”或者“classification”。 +- `num_classes`:如果是分类模型,需要指定类别的标签数量。 +- `epochs`:用于训练不确定模型的迭代数。 +- `epi_uncer_model_path`:用于存储或加载计算认知不确定性的模型的路径。 +- `ale_uncer_model_path`:用于存储或加载计算偶然不确定性的模型的路径。 +- `save_model`:布尔类型,是否需要存储模型。 + +在使用前,需要先训练好模型,以LeNet5为例,使用方式如下: +```python +from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +if __name__ == '__main__': + # get trained model + network = LeNet5() + param_dict = load_checkpoint('checkpoint_lenet.ckpt') + load_param_into_net(network, param_dict) + # get train and eval dataset + ds_train = create_dataset('workspace/mnist/train') + ds_eval = create_dataset('workspace/mnist/test') + evaluation = UncertaintyEvaluation(model=network, + train_dataset=ds_train, + task_type='classification', + num_classes=10, + epochs=1, + epi_uncer_model_path=None, + ale_uncer_model_path=None, + save_model=False) + for eval_data in ds_eval.create_dict_iterator(): + eval_data = Tensor(eval_data['image'], mstype.float32) + epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data) + aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data) + print('The shape of epistemic uncertainty is ', epistemic_uncertainty.shape) + print('The shape of epistemic uncertainty is ', aleatoric_uncertainty.shape) +``` +`eval_epistemic_uncertainty`计算的是认知不确定性,也叫模型不确定性,对于每一个样本的每个预测标签都会有一个不确定值;`eval_aleatoric_uncertainty`计算的是偶然不确定性,也叫数据不确定性,对于每一个样本都会有一个不确定值。 +所以输出为: + +```python +The shape of epistemic uncertainty is (32, 10) +The shape of epistemic uncertainty is (32,) +``` +uncertainty的值位于[0,1]之间,越大表示不确定性越高。 diff --git a/api/source_zh_cn/programming_guide/run.md b/api/source_zh_cn/programming_guide/run.md new file mode 100644 index 0000000000000000000000000000000000000000..e3c4d59a4785ec62599de23db0c34599c5d8ce70 --- /dev/null +++ b/api/source_zh_cn/programming_guide/run.md @@ -0,0 +1,381 @@ +# 运行方式 + + + +- [运行方式](#运行方式) + - [概述](#概述) + - [执行单算子](#执行单算子) + - [执行普通函数](#执行普通函数) + - [执行网络模型](#执行网络模型) + - [执行训练模型](#执行训练模型) + - [执行推理模型](#执行推理模型) + + + + + +## 概述 +执行主要有三种方式:单算子、普通函数和网络训练模型。 + + +## 执行单算子 + +执行单个算子,并打印相关结果。 + +代码样例如下: +```python +import numpy as np +import mindspore.nn as nn +from mindspore import context, Tensor + +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + +conv = nn.Conv2d(3, 4, 3, bias_init='zeros') +input_data = Tensor(np.ones([1, 3, 5, 5]).astype(np.float32)) +output = conv(input_data) +print(output.asnumpy()) +``` + +输出如下: +```python +[[[[ 0.06022915 0.06149777 0.06149777 0.06149777 0.01145121] + [ 0.06402162 0.05889071 0.05889071 0.05889071 -0.00933781] + [ 0.06402162 0.05889071 0.05889071 0.05889071 -0.00933781] + [ 0.06402162 0.05889071 0.05889071 0.05889071 -0.00933781] + [ 0.02712326 0.02096302 0.02096302 0.02096302 -0.01119636]] + + [[-0.0258286 -0.03362969 -0.03362969 -0.03362969 -0.00799183] + [-0.0513729 -0.06778982 -0.06778982 -0.06778982 -0.03168458] + [-0.0513729 -0.06778982 -0.06778982 -0.06778982 -0.03168458] + [-0.0513729 -0.06778982 -0.06778982 -0.06778982 -0.03168458] + [-0.04186669 -0.07266843 -0.07266843 -0.07266843 -0.04836193]] + + [[-0.00840744 -0.03043237 -0.03043237 -0.03043237 0.00172079] + [ 0.00401019 -0.03755453 -0.03755453 -0.03755453 -0.00851137] + [ 0.00401019 -0.03755453 -0.03755453 -0.03755453 -0.00851137] + [ 0.00401019 -0.03755453 -0.03755453 -0.03755453 -0.00851137] + [ 0.00270888 -0.03718876 -0.03718876 -0.03718876 -0.03043662]] + + [[-0.00982172 0.02009856 0.02009856 0.02009856 0.03327979] + [ 0.02529106 0.04035065 0.04035065 0.04035065 0.01782833] + [ 0.02529106 0.04035065 0.04035065 0.04035065 0.01782833] + [ 0.02529106 0.04035065 0.04035065 0.04035065 0.01782833] + [ 0.01015155 0.00781826 0.00781826 0.00781826 -0.02884173]]]] +``` + + +## 执行普通函数 + +将若干算子组合成一个函数,然后直接通过函数调用的方式执行这些算子,并打印相关结果,如下例所示。 + +代码样例如下: +```python +import numpy as np +from mindspore import context, Tensor +from mindspore.ops import functional as F + +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + +def tensor_add_func(x, y): + z = F.tensor_add(x, y) + z = F.tensor_add(z, x) + return z + +x = Tensor(np.ones([3, 3], dtype=np.float32)) +y = Tensor(np.ones([3, 3], dtype=np.float32)) +output = tensor_add_func(x, y) +print(output.asnumpy()) +``` + +输出如下: +```python +[[3. 3. 3.] + [3. 3. 3.] + [3. 3. 3.]] +``` + +## 执行网络模型 +MindSpore的Model接口是用于训练和验证的高级接口。可以将有训练或推理功能的layers组合成一个对象,通过调用train、eval、predict接口可以分别实现训练、推理和预测功能。 + +用户可以根据实际需要传入网络、损失函数和优化器等初始化Model接口,还可以通过配置amp_level实现混合精度,配置metrics实现模型评估。 + +### 执行训练模型 +通过调用Model的train接口可以实现训练。 + +代码样例如下: +```python +import os + +import mindspore.dataset.vision.c_transforms as CV +from mindspore.dataset.vision import Inter + +import mindspore.dataset as ds +import mindspore.dataset.transforms.c_transforms as CT +import mindspore.dataset.vision.c_transforms as CV +import mindspore.nn as nn +from mindspore import context +from mindspore.common import dtype as mstype +from mindspore.common.initializer import Normal +from mindspore.common.initializer import TruncatedNormal +from mindspore.dataset.vision import Inter +from mindspore.train import Model +from mindspore.train.callback import LossMonitor + + +def create_dataset(data_path, batch_size=32, repeat_size=1, + num_parallel_workers=1): + """ + create dataset for train or test + """ + # define dataset + mnist_ds = ds.MnistDataset(data_path) + + resize_height, resize_width = 32, 32 + rescale = 1.0 / 255.0 + shift = 0.0 + rescale_nml = 1 / 0.3081 + shift_nml = -1 * 0.1307 / 0.3081 + + # define map operations + resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode + rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) + rescale_op = CV.Rescale(rescale, shift) + hwc2chw_op = CV.HWC2CHW() + type_cast_op = CT.TypeCast(mstype.int32) + + # apply map operations on images + mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) + + # apply DatasetOps + buffer_size = 10000 + mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script + mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) + mnist_ds = mnist_ds.repeat(repeat_size) + + return mnist_ds + + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): + """weight initial for conv layer""" + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode="valid") + + +def fc_with_initialize(input_channels, out_channels): + """weight initial for fc layer""" + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +def weight_variable(): + """weight initial""" + return TruncatedNormal(0.02) + + +class LeNet5(nn.Cell): + """ + Lenet network + + Args: + num_class (int): Num classes. Default: 10. + num_channel (int): Num channels. Default: 1. + + Returns: + Tensor, output tensor + Examples: + >>> LeNet(num_class=10) + + """ + + def __init__(self, num_class=10, num_channel=1): + super(LeNet5, self).__init__() + self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') + self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') + self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02)) + self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) + self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02)) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.max_pool2d(self.relu(self.conv1(x))) + x = self.max_pool2d(self.relu(self.conv2(x))) + x = self.flatten(x) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +if __name__ == "__main__": + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + ds_train = create_dataset(os.path.join("/home/workspace/mindspore_dataset/MNIST_Data/", "train"), 32) + + network = LeNet5(10) + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") + net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) + model = Model(network, net_loss, net_opt) + + print("============== Starting Training ==============") + model.train(1, ds_train, callbacks=[LossMonitor()], dataset_sink_mode=True) +``` + +> 示例中用到的MNIST数据集的获取方法,可以参照[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)的下载数据集部分,下同。 + +输出如下: +```python +epoch: 1 step: 1, loss is 2.300784 +epoch: 1 step: 2, loss is 2.3076947 +epoch: 1 step: 3, loss is 2.2993166 +... +epoch: 1 step: 1873, loss is 0.13014838 +epoch: 1 step: 1874, loss is 0.0346688 +epoch: 1 step: 1875, loss is 0.017264696 +``` + +> 使用PyNative模式调试, 请参考[使用PyNative模式调试](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/debugging_in_pynative_mode.html), 包括单算子、普通函数和网络训练模型的执行。 + +### 执行推理模型 +通过调用Model的train接口可以实现推理。为了方便评估模型的好坏,可以在Model接口初始化的时候设置评估指标Metric。 + +Metric是用于评估模型好坏的指标。常见的主要有Accuracy、Fbeta、Precision、Recall和TopKCategoricalAccuracy等,通常情况下,一种模型指标无法全面的评估模型的好坏,一般会结合多个指标共同作用对模型进行评估。 + +常用的内置评估指标: +- `Accuracy`(准确率):是一个用于评估分类模型的指标。通俗来说,准确率是指我们的模型预测正确的结果所占的比例。 公式:$$Accuracy = (TP+TN)/(TP+TN+FP+FN)$$ + +- `Precision`(精确率):在被识别为正类别的样本中,确实为正类别的比例。公式:$$Precision = TP/(TP+FP)$$ + +- `Recall`(召回率):在所有正类别样本中,被正确识别为正类别的比例。 公式:$$Recall = TP/(TP+FN)$$ + +- `Fbeta`(调和均值):综合考虑precision和recall的调和均值。 +公式:$$F_\beta = (1 + \beta^2) \cdot \frac{precisiont \cdot recall}{(\beta^2 \cdot precision) + recall}$$ + +- `TopKCategoricalAccuracy`(多分类TopK准确率):计算TopK分类准确率。 + +代码样例如下: +```python +import os + +import mindspore.dataset as ds +import mindspore.dataset.transforms.c_transforms as CT +import mindspore.dataset.vision.c_transforms as CV +import mindspore.nn as nn +from mindspore import context +from mindspore.common import dtype as mstype +from mindspore.common.initializer import Normal +from mindspore.dataset.vision import Inter +from mindspore.nn.metrics import Accuracy, Precision +from mindspore.train import Model +from mindspore.train.serialization import load_checkpoint, load_param_into_net + + +class LeNet5(nn.Cell): + """ + Lenet network + + Args: + num_class (int): Num classes. Default: 10. + num_channel (int): Num channels. Default: 1. + + Returns: + Tensor, output tensor + Examples: + >>> LeNet(num_class=10) + + """ + + def __init__(self, num_class=10, num_channel=1): + super(LeNet5, self).__init__() + self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') + self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') + self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02)) + self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) + self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02)) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.max_pool2d(self.relu(self.conv1(x))) + x = self.max_pool2d(self.relu(self.conv2(x))) + x = self.flatten(x) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +def create_dataset(data_path, batch_size=32, repeat_size=1, + num_parallel_workers=1): + """ + create dataset for train or test + """ + # define dataset + mnist_ds = ds.MnistDataset(data_path) + + resize_height, resize_width = 32, 32 + rescale = 1.0 / 255.0 + shift = 0.0 + rescale_nml = 1 / 0.3081 + shift_nml = -1 * 0.1307 / 0.3081 + + # define map operations + resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode + rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) + rescale_op = CV.Rescale(rescale, shift) + hwc2chw_op = CV.HWC2CHW() + type_cast_op = CT.TypeCast(mstype.int32) + + # apply map operations on images + mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) + + # apply DatasetOps + buffer_size = 10000 + mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script + mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) + mnist_ds = mnist_ds.repeat(repeat_size) + + return mnist_ds + + +if __name__ == "__main__": + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + + network = LeNet5(10) + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") + repeat_size = 10 + net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) + model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy(), "Precision": Precision()}) + + print("============== Starting Testing ==============") + param_dict = load_checkpoint("./ckpt/checkpoint_lenet-1_1875.ckpt") + load_param_into_net(network, param_dict) + ds_eval = create_dataset(os.path.join("/home/workspace/mindspore_dataset/MNIST_Data", "test"), 32, 1) + acc = model.eval(ds_eval, dataset_sink_mode=True) + print("============== {} ==============".format(acc)) +``` + +其中, +`load_checkpoint`:通过该接口加载CheckPoint模型参数文件,返回一个参数字典。 +`checkpoint_lenet-1_1875.ckpt`:保存的CheckPoint模型文件名称。 +`load_param_into_net`:通过该接口把参数加载到网络中。 + +> `checkpoint_lenet-1_1875.ckpt`文件的保存方法,可以参考[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)的训练网络部分。 + +输出如下: +```python +============== {'Accuracy': 0.96875, 'Precision': array([0.97782258, 0.99451052, 0.98031496, 0.92723881, 0.98352214, + 0.97165533, 0.98726115, 0.9472196 , 0.9394551 , 0.98236515])} ============== +``` \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/sampler.md b/api/source_zh_cn/programming_guide/sampler.md index dc76658852fda5925547c3898657fe64b5a69a0a..0d910a10fa605f85f1e5ec1fd99def2e79bf13d3 100644 --- a/api/source_zh_cn/programming_guide/sampler.md +++ b/api/source_zh_cn/programming_guide/sampler.md @@ -5,7 +5,6 @@ - [采样器](#采样器) - [概述](#概述) - [MindSpore采样器](#mindspore采样器) - - [SequentialSampler](#sequentialsampler) - [RandomSampler](#randomsampler) - [WeightedRandomSampler](#weightedrandomsampler) - [SubsetRandomSampler](#subsetrandomsampler) @@ -19,7 +18,7 @@ ## 概述 -MindSpore提供了多种用途的采样器,帮助用户对数据集进行不同形式的采样,以满足训练需求,能够解决诸如数据集过大或样本类别分布不均等问题。只需在加载数据集时将采样器对象传入,即可实现数据的采样。 +MindSpore提供了多种用途的采样器(Sampler),帮助用户对数据集进行不同形式的采样,以满足训练需求,能够解决诸如数据集过大或样本类别分布不均等问题。只需在加载数据集时传入采样器对象,即可实现数据的采样。 MindSpore目前提供的采样器类别如下表所示。此外,用户也可以根据需要实现自定义的采样器类。 @@ -34,124 +33,45 @@ MindSpore目前提供的采样器类别如下表所示。此外,用户也可 ## MindSpore采样器 -下面以[CIFAR10数据集](https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz)为例,介绍MindSpore采样器使用方法。 - -### SequentialSampler - -从指定的索引位置开始顺序采样指定数目的数据。 - -```python -# 通过SequentialSampler定义一个顺序采样器,并作用于数据集 - -import mindspore.dataset as ds - -# CIFAR10数据集路径 -DATA_DIR = "Cifar10Data/" - -# 1. 定义一个顺序采样器SequentialSampler,按照读取顺序获取5个样本数据 -sampler = ds.SequentialSampler(num_samples=5) -dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) - -# 启动数据管道,输出5个样本数据 -for data in dataset1.create_dict_iterator(): - print("Image shape:", data['image'].shape, ", Label:", data['label']) - -print("") - -# 2. 定义一个顺序采样器SequentialSampler,跳过前2个数据,继续按照读取顺序获取5个样本数据 -sampler = ds.SequentialSampler(start_index=2, num_samples=5) -dataset2 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) - -# 启动数据管道,输出5个样本数据 -for data in dataset2.create_dict_iterator(): - print("Image shape:", data['image'].shape, ", Label:", data['label']) - -print("") - -# 3. 同类用法,指定数据集中的num_samples参数为5,shuffle参数为False,同样可以达到1的效果 -dataset3 = ds.Cifar10Dataset(DATA_DIR, num_samples=5, shuffle=False) - -# 启动数据管道,输出5个样本数据 -for data in dataset3.create_dict_iterator(): - print("Image shape:", data['image'].shape, ", Label:", data['label']) -``` - -``` -Image shape: (32, 32, 3) , Label: 0 -Image shape: (32, 32, 3) , Label: 1 -Image shape: (32, 32, 3) , Label: 2 -Image shape: (32, 32, 3) , Label: 3 -Image shape: (32, 32, 3) , Label: 4 - -Image shape: (32, 32, 3) , Label: 2 -Image shape: (32, 32, 3) , Label: 3 -Image shape: (32, 32, 3) , Label: 4 -Image shape: (32, 32, 3) , Label: 5 -Image shape: (32, 32, 3) , Label: 6 - -Image shape: (32, 32, 3) , Label: 0 -Image shape: (32, 32, 3) , Label: 1 -Image shape: (32, 32, 3) , Label: 2 -Image shape: (32, 32, 3) , Label: 3 -Image shape: (32, 32, 3) , Label: 4 -``` +下面以[CIFAR-10数据集](https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz)为例,介绍几种常用MindSpore采样器的使用方法。 ### RandomSampler 从索引序列中随机采样指定数目的数据。 -```python -# 通过RandomSampler定义一个随机采样器,并作用于数据集 +下面的样例使用随机采样器分别从CIFAR-10数据集中有放回和无放回地随机采样5个数据,并展示已加载数据的形状和标签。 +```python import mindspore.dataset as ds -# 设置全局随机种子,确保RandomSampler的行为可预测 ds.config.set_seed(0) -# CIFAR数据集路径 DATA_DIR = "Cifar10Data/" -# 1. 定义一个随机采样器SequentialSampler,随机获取5个样本数据 sampler = ds.RandomSampler(num_samples=5) dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 启动数据管道,输出5个样本数据 for data in dataset1.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) -print("") +print("------------") -# 2. 定义一个随机采样器RandomSampler,replacement=True意味着有放回抽样 sampler = ds.RandomSampler(replacement=True, num_samples=5) dataset2 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 启动数据管道,输出5个样本数据 for data in dataset2.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) - -print("") - -# 3. 同类用法,指定数据集中的num_samples参数为5,shuffle参数为True,同样可以达到2的效果 -dataset3 = ds.Cifar10Dataset(DATA_DIR, num_samples=5, shuffle=True) - -# 启动数据管道,输出5个样本数据 -for data in dataset3.create_dict_iterator(): - print("Image shape:", data['image'].shape, ", Label:", data['label']) ``` +输出结果如下: + ``` Image shape: (32, 32, 3) , Label: 0 Image shape: (32, 32, 3) , Label: 2 Image shape: (32, 32, 3) , Label: 6 Image shape: (32, 32, 3) , Label: 4 Image shape: (32, 32, 3) , Label: 6 - -Image shape: (32, 32, 3) , Label: 8 -Image shape: (32, 32, 3) , Label: 8 -Image shape: (32, 32, 3) , Label: 1 -Image shape: (32, 32, 3) , Label: 2 -Image shape: (32, 32, 3) , Label: 7 - +------------ Image shape: (32, 32, 3) , Label: 8 Image shape: (32, 32, 3) , Label: 8 Image shape: (32, 32, 3) , Label: 1 @@ -163,29 +83,25 @@ Image shape: (32, 32, 3) , Label: 7 指定每种类别的采样概率,按照概率在各类别中随机采样指定数目的数据。 -```python -# 通过WeightedRandomSampler定义一个带权重的随机采样器,并作用于数据集 +下面的样例使用带权随机采样器从CIFAR-10数据集的10个类别中按概率获取6个样本,并展示已读取数据的形状和标签。 +```python import mindspore.dataset as ds -# 设置全局随机种子,确保WeightedRandomSampler的行为可预测 ds.config.set_seed(1) -# CIFAR数据集路径 DATA_DIR = "Cifar10Data/" -# 定义一个带权重的随机采样器WeightedRandomSampler -# weights代表CIFAR10中10类数据的采样概率,num_samples表示随机获取6个样本数据 -# replacement参数与RandomSampler中一致 weights = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0] sampler = ds.WeightedRandomSampler(weights, num_samples=6) -dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) +dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 启动数据管道,输出6个样本数据 -for data in dataset1.create_dict_iterator(): +for data in dataset.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) ``` +输出结果如下: + ``` Image shape: (32, 32, 3) , Label: 1 Image shape: (32, 32, 3) , Label: 1 @@ -199,28 +115,25 @@ Image shape: (32, 32, 3) , Label: 0 从指定索引子序列中随机采样指定数目的数据。 -```python -# 通过SubsetRandomSampler定义一个子集随机采样器,并作用于数据集 +下面的样例使用子序列随机采样器从CIFAR-10数据集的指定子序列中抽样3个样本,并展示已读取数据的形状和标签。 +```python import mindspore.dataset as ds -# 设置全局随机种子,确保SubsetRandomSampler的行为可预测 ds.config.set_seed(2) -# CIFAR数据集路径 DATA_DIR = "Cifar10Data/" -# 定义一个带采样集合的随机采样器SubsetRandomSampler -# indice代表可采样的集合,num_samples表示获取3个样本数据,即从可采样集合中(0~5)随机获取3个值,作为下标访问数据集的数据 indices = [0, 1, 2, 3, 4, 5] sampler = ds.SubsetRandomSampler(indices, num_samples=3) -dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) +dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 启动数据管道,输出3个样本数据 -for data in dataset1.create_dict_iterator(): +for data in dataset.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) ``` +输出结果如下: + ``` Image shape: (32, 32, 3) , Label: 5 Image shape: (32, 32, 3) , Label: 0 @@ -231,29 +144,24 @@ Image shape: (32, 32, 3) , Label: 3 在指定的数据集类别P中,每种类别各采样K条数据。 -```python -# 通过PKSampler定义一个针对各个类别随机采样器,并作用于数据集 +下面的样例使用PK采样器从CIFAR-10数据集中每种类别抽样2个样本,最多20个样本,并展示已读取数据的形状和标签。 +```python import mindspore.dataset as ds -# 设置全局随机种子,确保PKSampler的shuffle参数行为可预测 ds.config.set_seed(3) -# CIFAR数据集路径 DATA_DIR = "Cifar10Data/" -# 定义一个针对类别采样的随机采样器PKSampler -# num_val代表从每个类别采样K个样本,class_column代表针对特定的数据列采样(一般是label) -# num_samples代表输出的样本数,设置num_samples = num_val*class_nums,确保每个类别平均采样 -# shuffle代表样本是否需要被混洗 sampler = ds.PKSampler(num_val=2, class_column='label', num_samples=20) -dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) +dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler) -# 启动数据管道,输出20个样本数据 -for data in dataset1.create_dict_iterator(): +for data in dataset.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) ``` +输出结果如下: + ``` Image shape: (32, 32, 3) , Label: 0 Image shape: (32, 32, 3) , Label: 0 @@ -281,66 +189,53 @@ Image shape: (32, 32, 3) , Label: 9 在分布式训练中,对数据集分片进行采样。 -```python -# 通过DistributedSampler定义一个将数据集进行分片操作,并获取某个分片进行采样的采样器,并作用于数据集 +下面的样例使用分布式采样器将构建的数据集分为3片,在每个分片中采样3个数据样本,并展示已读取的数据。 +```python import numpy as np import mindspore.dataset as ds -# 构建一个list data_source = [0, 1, 2, 3, 4, 5, 6, 7, 8] -# 定义一个采样器DistributedSampler -# num_shards代表将CIFAR数据集拆分成n个分片 -# shard_id代表获取第m个分片 -# num_samples代表获取该分片的10个样本 -# shuffle代表样本是否需要被混洗 sampler = ds.DistributedSampler(num_shards=3, shard_id=0, shuffle=False, num_samples=3) - -# 从list中构建数据管道 dataset = ds.NumpySlicesDataset(data_source, column_names=["data"], sampler=sampler) -# 经过DistributedSampler分片后,数据集的内容为 -# shard_id 0: 0, 3, 6 -# shard_id 1: 1, 4, 7 -# shard_id 2: 2, 5, 8 -# 因此第0个分片拥有数据为0, 3, 6 for data in dataset.create_dict_iterator(): print(data) ``` +输出结果如下: + ``` -{'data': array(0, dtype=int64)} -{'data': array(3, dtype=int64)} -{'data': array(6, dtype=int64)} +{'data': Tensor(shape=[], dtype=Int64, value= 0)} +{'data': Tensor(shape=[], dtype=Int64, value= 3)} +{'data': Tensor(shape=[], dtype=Int64, value= 6)} ``` ## 自定义采样器 -用户可以继承Sampler基类,通过实现`__iter__`方法来自定义采样器的采样方式。 +用户可以继承`Sampler`基类,通过实现`__iter__`方法来自定义采样器的采样方式。 -```python -# 继承Sampler基类,重载__iter__成为新的采样器 +下面的样例定义了一个从下标0至下标9间隔为2采样的采样器,将其作用于CIFAR-10数据集,并展示已读取数据的形状和标签。 +```python import mindspore.dataset as ds class MySampler(ds.Sampler): def __iter__(self): - # 采样器的行为是,从下标0开始到下标9,以2为间隔采样 for i in range(0, 10, 2): yield i -# CIFAR数据集路径 DATA_DIR = "Cifar10Data/" -# 将自定义构建的采样器传入到sampler参数 -dataset1 = ds.Cifar10Dataset(DATA_DIR, sampler=MySampler()) +dataset = ds.Cifar10Dataset(DATA_DIR, sampler=MySampler()) -# 启动数据管道,输出5个样本数据 -for data in dataset1.create_dict_iterator(): +for data in dataset.create_dict_iterator(): print("Image shape:", data['image'].shape, ", Label:", data['label']) ``` +输出结果如下: + ``` Image shape: (32, 32, 3) , Label: 0 Image shape: (32, 32, 3) , Label: 2 diff --git a/api/source_zh_cn/programming_guide/security_and_privacy.md b/api/source_zh_cn/programming_guide/security_and_privacy.md index 8b6d6c20c9846c8847f76851223ad04ba451fa14..7fb6c5d187c7169ae46a8384291bdb41234fb6d8 100644 --- a/api/source_zh_cn/programming_guide/security_and_privacy.md +++ b/api/source_zh_cn/programming_guide/security_and_privacy.md @@ -2,60 +2,60 @@ -- [AI安全与隐私保护](AI安全与隐私保护) +- [AI安全与隐私保护](#ai安全与隐私保护) - [概述](#概述) - [对抗鲁棒性](#对抗鲁棒性) - - [Attack](#Attack) - - [Defense](#Defense) - - [Detector](#Detector) + - [Attack](#attack) + - [Defense](#defense) + - [Detector](#detector) - [模型安全测试](#模型安全测试) - - [Fuzzer](#Fuzzer) + - [Fuzzer](#fuzzer) - [差分隐私训练](#差分隐私训练) - - [DPModel](#DPModel) + - [DPModel](#dpmodel) - [隐私泄露风险评估](#隐私泄露风险评估) - - [MembershipInference](#MembershipInference) + - [MembershipInference](#membershipinference) -## 概述 + -本篇是AI安全与隐私保护的编程指南。 +## 概述 -AI作为一种通用技术,在带来巨大机遇和效益的同时也面临着新的安全与隐私保护的挑战。MindArmour是MindSpore的一个子项目,为MindSpore提供安全与隐私保护能力,主要包括对抗鲁棒性、模型安全测试、差分隐私训练、隐私泄露风险评估等技术。 +本篇主要介绍AI安全与隐私保护。AI作为一种通用技术,在带来巨大机遇和效益的同时也面临着新的安全与隐私保护的挑战。MindArmour是MindSpore的一个子项目,为MindSpore提供安全与隐私保护能力,主要包括对抗鲁棒性、模型安全测试、差分隐私训练、隐私泄露风险评估等技术。 ## 对抗鲁棒性 ### Attack -Attack基类定义了对抗样本生成的使用接口,其子类实现了各种具体的生成算法,支持安全工作人员快速高效地生成对抗样本,用于攻击AI模型,以评估模型的鲁棒性。 +`Attack`基类定义了对抗样本生成的使用接口,其子类实现了各种具体的生成算法,支持安全工作人员快速高效地生成对抗样本,用于攻击AI模型,以评估模型的鲁棒性。 ### Defense -Defense基类定义了对抗训练的使用接口,其子类实现了各种具体的对抗训练算法,增强模型的对抗鲁棒性。 +`Defense`基类定义了对抗训练的使用接口,其子类实现了各种具体的对抗训练算法,增强模型的对抗鲁棒性。 ### Detector -Detector基类定义了对抗样本检测的使用借口,其子类实现了各种具体的检测算法,增强模型的对抗鲁棒性。 +`Detector`基类定义了对抗样本检测的使用借口,其子类实现了各种具体的检测算法,增强模型的对抗鲁棒性。 -详细内容,请参考[对抗鲁棒性官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/model_security.html) +详细内容,请参考[对抗鲁棒性官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/model_security.html)。 ## 模型安全测试 ### Fuzzer -Fuzzer类基于神经元覆盖率增益控制fuzzing流程,采用自然扰动和对抗样本生成方法作为变异策略,激活更多的神经元,从而探索不同类型的模型输出结果、错误行为,指导用户增强模型鲁棒性。 +`Fuzzer`类基于神经元覆盖率增益控制fuzzing流程,采用自然扰动和对抗样本生成方法作为变异策略,激活更多的神经元,从而探索不同类型的模型输出结果、错误行为,指导用户增强模型鲁棒性。 -详细内容,请参考[模型安全测试官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/fuzzer.html) +详细内容,请参考[模型安全测试官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/fuzzer.html)。 ## 差分隐私训练 ### DPModel -DPModel继承了mindspore.Model,提供了差分隐私训练的入口函数。 +`DPModel`继承了`mindspore.Model`,提供了差分隐私训练的入口函数。 -详细内容,请参考[差分隐私官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/differential_privacy.html) +详细内容,请参考[差分隐私官网教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/differential_privacy.html)。 ## 隐私泄露风险评估 ### MembershipInference -MembershipInference类提供了一种模型逆向分析方法,能够基于模型对样本的预测信息,推测某个样本是否在模型的训练集中,以此评估模型的隐私泄露风险。 +`MembershipInference`类提供了一种模型逆向分析方法,能够基于模型对样本的预测信息,推测某个样本是否在模型的训练集中,以此评估模型的隐私泄露风险。 -详细内容,请参考[隐私泄露风险评估官方教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/membership_inference.html) +详细内容,请参考[隐私泄露风险评估官方教程](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/membership_inference.html)。 diff --git a/api/source_zh_cn/programming_guide/tensor.md b/api/source_zh_cn/programming_guide/tensor.md index 3959362bb808dba14c31328977e11075d25c773c..875a819479bd007e1a905a2978c819280ebe6049 100644 --- a/api/source_zh_cn/programming_guide/tensor.md +++ b/api/source_zh_cn/programming_guide/tensor.md @@ -1,8 +1,8 @@ -# 张量 +# Tensor -- [张量](#张量) +- [Tensor](#tensor) - [概述](#概述) - [张量构造](#张量构造) - [张量的属性和方法](#张量的属性和方法) @@ -15,20 +15,17 @@ ## 概述 -张量是MindSpore网络运算中的基本数据结构,即为多维数组。张量里的数据分为不同的类型, -支持的类型有`int8`、`int16`、`int32`、`int64`、`uint8`、`uint16`、`uint32`、`uint64`、`float16`、`float32`、`float64`、`bool_`, -与NumPy里的数据类型一一对应。 +张量(Tensor)是MindSpore网络运算中的基本数据结构。张量中的数据类型可参考[dtype](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dtype.html)。 不同维度的张量分别表示不同的数据,0维张量表示标量,1维张量表示向量,2维张量表示矩阵,3维张量可以表示彩色图像的RGB三通道等等。 -> 本文档中的所有示例,都是在PyNative模式下运行的,暂不支持CPU。 +> 本文中的所有示例,支持在PyNative模式下运行,暂不支持CPU。 ## 张量构造 -构造张量时支持传入`Tensor`、`float`、`int`、`bool`、`tuple`、`list`和`NumPy.array`。 +构造张量时,支持传入`Tensor`、`float`、`int`、`bool`、`tuple`、`list`和`NumPy.array`类型。 -`Tensor`作为初始值可指定dtype,如果没有指定dtype,`int`、`float`、`bool`分别对应`int32`、`float32`、`bool_`, -`tuple`和`list`生成的1维`Tensor`数据类型与`tuple`和`list`里存放数据的类型相对应。 +`Tensor`作为初始值时,可指定dtype,如果没有指定dtype,`int`、`float`、`bool`分别对应`int32`、`float32`、`bool_`,`tuple`和`list`生成的1维`Tensor`数据类型与`tuple`和`list`里存放数据的类型相对应。 代码样例如下: @@ -65,6 +62,7 @@ True ``` ## 张量的属性和方法 + ### 属性 张量的属性包括形状(shape)和数据类型(dtype)。 @@ -93,9 +91,9 @@ print(x_shape, x_dtype) ### 方法 -张量的方法包括`all`、`any`和`asnumpy`。 -- `all(axis, keep_dims)`:在指定维度上通过`and`操作进行归约,axis代表归约维度,keep_dims表示是否保留归约后的维度。 -- `any(axis, keep_dims)`:在指定维度上通过`or`操作进行归约,axis代表归约维度,keep_dims表示是否保留归约后的维度。 +张量的方法包括`all`、`any`和`asnumpy`,`all`和`any`方法目前只支持Ascend。 +- `all(axis, keep_dims)`:在指定维度上通过`and`操作进行归约,`axis`代表归约维度,`keep_dims`表示是否保留归约后的维度。 +- `any(axis, keep_dims)`:在指定维度上通过`or`操作进行归约,参数含义同`all`。 - `asnumpy()`:将`Tensor`转换为NumPy的array。 代码样例如下: diff --git a/api/source_zh_cn/programming_guide/tokenizer.md b/api/source_zh_cn/programming_guide/tokenizer.md index 8ac94e6a54d453785728c66681b78d410f1288c0..7106aa3d20fb059b3a59317c13cb1c5fb0d7e543 100644 --- a/api/source_zh_cn/programming_guide/tokenizer.md +++ b/api/source_zh_cn/programming_guide/tokenizer.md @@ -5,6 +5,12 @@ - [分词器](#分词器) - [概述](#概述) - [MindSpore分词器](#mindspore分词器) + - [BertTokenizer](#BertTokenizer) + - [JiebaTokenizer](#JiebaTokenizer) + - [SentencePieceTokenizer](#SentencePieceTokenizer) + - [UnicodeCharTokenizer](#UnicodeCharTokenizer) + - [WhitespaceTokenizer](#WhitespaceTokenizer) + - [WordpieceTokenizer](#WordpieceTokenizer) @@ -14,7 +20,7 @@ 分词就是将连续的字序列按照一定的规范重新组合成词序列的过程,合理的进行分词有助于语义的理解。 -MindSpore提供了多种用途的分词器,能够帮助用户高性能地处理文本,用户可以构建自己的字典,使用适当的标记器将句子拆分为不同的标记,并通过查找操作获取字典中标记的索引。 +MindSpore提供了多种用途的分词器(Tokenizer),能够帮助用户高性能地处理文本,用户可以构建自己的字典,使用适当的标记器将句子拆分为不同的标记,并通过查找操作获取字典中标记的索引。 MindSpore目前提供的分词器如下表所示。此外,用户也可以根据需要实现自定义的分词器。 @@ -34,105 +40,47 @@ MindSpore目前提供的分词器如下表所示。此外,用户也可以根 ## MindSpore分词器 -### BasicTokenizer - -`BasicTokenizer`是通过大小写折叠、编码统一、去除重音符,按照正则匹配模式来分词的。 - -```python -import mindspore.dataset as ds -import mindspore.dataset.text as text - -# 构建输入的数据列表 -input_list = ["Welcome to Beijing北京欢迎您", "長風破浪會有時,直掛雲帆濟滄海","😀嘿嘿😃哈哈😄大笑😁嘻嘻", - "明朝(1368—1644年)和清朝(1644—1911年),是中国封建王朝史上最后两个朝代", - "明代(1368-1644)と清代(1644-1911)は、中国の封建王朝の歴史における最後の2つの王朝でした", - "명나라 (1368-1644)와 청나라 (1644-1911)는 중국 봉건 왕조의 역사에서 마지막 두 왕조였다"] - -dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) - -print("------------------------before tokenize----------------------------") - -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): - print(text.to_str(data['text'])) - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - -# 输出分词之后的数据 -# BasicTokenizer为分词的函数 -basic_tokenizer = text.BasicTokenizer() - -dataset = dataset.map(operations=basic_tokenizer) - -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']) - print(token) -``` - -``` -------------------------before tokenize---------------------------- -Welcome to Beijing北京欢迎您 -長風破浪會有時,直掛雲帆濟滄海 -😀嘿嘿😃哈哈😄大笑😁嘻嘻 -明朝(1368—1644年)和清朝(1644—1911年),是中国封建王朝史上最后两个朝代 -明代(1368-1644)と清代(1644-1911)は、中国の封建王朝の歴史における最後の2つの王朝でした -명나라 (1368-1644)와 청나라 (1644-1911)는 중국 봉건 왕조의 역사에서 마지막 두 왕조였다 -------------------------after tokenize----------------------------- -['Welcome' 'to' 'Beijing' '北' '京' '欢' '迎' '您'] -['長' '風' '破' '浪' '會' '有' '時' ',' '直' '掛' '雲' '帆' '濟' '滄' '海'] -['😀' '嘿' '嘿' '😃' '哈' '哈' '😄' '大' '笑' '😁' '嘻' '嘻'] -['明' '朝' '(' '1368' '—' '1644' '年' ')' '和' '清' '朝' '(' '1644' '—' '1911' '年' ')' ',' '是' '中' '国' '封' '建' '王' '朝' '史' '上' '最' '后' '两' '个' '朝' '代'] -['明' '代' '(' '1368' '-' '1644' ')' 'と' '清' '代' '(' '1644' '-' '1911' ')' 'は' '、' '中' '国' 'の' '封' '建' '王' '朝' 'の' '歴' '史' 'における' '最' '後' 'の2つの' '王' '朝' 'でした'] -['명나라' '(' '1368' '-' '1644' ')' '와' '청나라' '(' '1644' '-' '1911' ')' '는' '중국' '봉건' '왕조의' '역사에서' '마지막' '두' '왕조였다'] -``` +下面介绍几种常用分词器的使用方法。 ### BertTokenizer `BertTokenizer`是通过调用`BasicTokenizer`和`WordpieceTokenizer`来进行分词的。 +下面的样例首先构建了一个文本数据集和字符串列表,然后通过`BertTokenizer`对数据集进行分词,并展示了分词前后的文本结果。 + ```python import mindspore.dataset as ds import mindspore.dataset.text as text -# 构建输入的数据列表 input_list = ["床前明月光", "疑是地上霜", "举头望明月", "低头思故乡", "I am making small mistakes during working hours", "😀嘿嘿😃哈哈😄大笑😁嘻嘻", "繁體字"] - dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) -print("------------------------before tokenize----------------------------") +print("------------------------before tokenization----------------------------") -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): +for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) -# 字符串列表,其中每个元素都是字符串类型的单词。 vocab_list = [ "床", "前", "明", "月", "光", "疑", "是", "地", "上", "霜", "举", "头", "望", "低", "思", "故", "乡", "繁", "體", "字", "嘿", "哈", "大", "笑", "嘻", "i", "am", "mak", "make", "small", "mistake", "##s", "during", "work", "##ing", "hour", "😀", "😃", "😄", "😁", "+", "/", "-", "=", "12", "28", "40", "16", " ", "I", "[CLS]", "[SEP]", "[UNK]", "[PAD]", "[MASK]", "[unused1]", "[unused10]"] -# 从单词列表中构建一个vocab对象 vocab = text.Vocab.from_list(vocab_list) - -# 输出分词之后的数据 -# BertTokenizer为分词的函数 tokenizer_op = text.BertTokenizer(vocab=vocab) - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - dataset = dataset.map(operations=tokenizer_op) -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']) - print(token) +print("------------------------after tokenization-----------------------------") + +for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + print(text.to_str(i['text'])) ``` +输出结果如下: + ``` -------------------------before tokenize---------------------------- +------------------------before tokenization---------------------------- 床前明月光 疑是地上霜 举头望明月 @@ -140,7 +88,7 @@ for i in dataset.create_dict_iterator(num_epochs=1): I am making small mistakes during working hours 😀嘿嘿😃哈哈😄大笑😁嘻嘻 繁體字 -------------------------after tokenize----------------------------- +------------------------after tokenization----------------------------- ['床' '前' '明' '月' '光'] ['疑' '是' '地' '上' '霜'] ['举' '头' '望' '明' '月'] @@ -154,123 +102,75 @@ I am making small mistakes during working hours `JiebaTokenizer`是基于jieba的中文分词。 +下面的样例首先构建了一个文本数据集,然后使用HMM与MP字典文件创建`JiebaTokenizer`对象,并对数据集进行分词,最后展示了分词前后的文本结果。 + ```python import mindspore.dataset as ds import mindspore.dataset.text as text -# 构建输入的数据列表 input_list = ["床前明月光", "疑是地上霜", "举头望明月", "低头思故乡", "I am making small mistakes during working hours", "😀嘿嘿😃哈哈😄大笑😁嘻嘻", "繁體字"] - -# 字典文件由HMMSegment算法和MPSegment算法使用,该字典可在cppjieba的官方网站上获得。 -HMM_FILE = "hmm_model.utf8" -MP_FILE = "jieba.dict.utf8" - dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) -print("------------------------before tokenize----------------------------") +print("------------------------before tokenization----------------------------") -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): +for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) -tokenizer_op = text.JiebaTokenizer(HMM_FILE, MP_FILE) - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") +HMM_FILE = "hmm_model.utf8" +MP_FILE = "jieba.dict.utf8" +jieba_op = text.JiebaTokenizer(HMM_FILE, MP_FILE) +dataset = dataset.map(operations=jieba_op, input_columns=["text"], num_parallel_workers=1) -dataset = dataset.map(input_columns=["text"], operations=jieba_op, num_parallel_workers=1) +print("------------------------after tokenization-----------------------------") -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']) - print(token) +for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + print(text.to_str(i['text'])) ``` +输出结果如下: + ``` -------------------------before tokenize---------------------------- +------------------------before tokenization---------------------------- 今天天气太好了我们一起去外面玩吧 -------------------------after tokenize----------------------------- +------------------------after tokenization----------------------------- ['今天天气' '太好了' '我们' '一起' '去' '外面' '玩吧'] ``` -### RegexTokenizer - -`RegexTokenizer`是通正则表达式匹配模式来进行分词的。 - -```python -import mindspore.dataset as ds -import mindspore.dataset.text as text - -# 构建输入的数据列表 -input_list = ["Welcome to Shenzhen!"] - -# 原始字符串将由匹配的元素分隔。 -delim_pattern = "\\s+" - -dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) - -print("------------------------before tokenize----------------------------") - -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): - print(text.to_str(data['text'])) - -tokenizer_op = text.RegexTokenizer(delim_pattern) - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - -dataset = dataset.map(operations=tokenizer_op) - -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']).tolist() - print(token) -``` - -``` -------------------------before tokenize---------------------------- -Welcome to Shenzhen! -------------------------after tokenize----------------------------- -['Welcome', 'to', 'Shenzhen!'] -``` - ### SentencePieceTokenizer -`SentencePieceTokenizer`是基于SentencePiece这个开源的自然语言处理工具包。 +`SentencePieceTokenizer`是基于[SentencePiece](https://github.com/google/sentencepiece)这个开源的自然语言处理工具包。 + +下面的样例首先构建了一个文本数据集,然后从`VOCAB_FILE`文件中构建一个`vocab`对象,再通过`SentencePieceTokenizer`对数据集进行分词,并展示了分词前后的文本结果。 ```python import mindspore.dataset as ds import mindspore.dataset.text as text -# 构建输入的数据列表 input_list = ["I saw a girl with a telescope."] - dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) -print("------------------------before tokenize----------------------------") +print("------------------------before tokenization----------------------------") -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): +for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) -# 从文件数据中构建一个vocab对象 -vocab = text.SentencePieceVocab.from_file([VOCAB_FILE], 5000, 0.9995, SentencePieceModel.UNIGRAM, {}) +vocab = text.SentencePieceVocab.from_dataset(dataset, 5000, 0.9995, SentencePieceModel.UNIGRAM, {}) tokenizer_op = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING) - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - dataset = dataset.map(operations=tokenizer_op) -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']) - print(token) +print("------------------------after tokenization-----------------------------") + +for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + print(text.to_str(i['text'])) ``` +输出结果如下: + ``` -------------------------before tokenize---------------------------- +------------------------before tokenization---------------------------- I saw a girl with a telescope. -------------------------after tokenize----------------------------- +------------------------after tokenization----------------------------- ['▁I' '▁sa' 'w' '▁a' '▁girl' '▁with' '▁a' '▁te' 'les' 'co' 'pe' '.'] ``` @@ -278,124 +178,77 @@ I saw a girl with a telescope. `UnicodeCharTokenizer`是根据Unicode字符集来分词的。 +下面的样例首先构建了一个文本数据集,然后通过`UnicodeCharTokenizer`对数据集进行分词,并展示了分词前后的文本结果。 + ```python import mindspore.dataset as ds import mindspore.dataset.text as text -# 构建输入的数据列表 input_list = ["Welcome to Beijing!", "北京欢迎您!", "我喜欢English!"] - dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) -print("------------------------before tokenize----------------------------") +print("------------------------before tokenization----------------------------") -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): +for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) tokenizer_op = text.UnicodeCharTokenizer() - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - dataset = dataset.map(operations=tokenizer_op) -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']).tolist() - print(token) +print("------------------------after tokenization-----------------------------") + +for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + print(text.to_str(i['text']).tolist()) ``` +输出结果如下: + ``` -------------------------before tokenize---------------------------- +------------------------before tokenization---------------------------- Welcome to Beijing! 北京欢迎您! 我喜欢English! -------------------------after tokenize----------------------------- +------------------------after tokenization----------------------------- ['W', 'e', 'l', 'c', 'o', 'm', 'e', ' ', 't', 'o', ' ', 'B', 'e', 'i', 'j', 'i', 'n', 'g', '!'] ['北', '京', '欢', '迎', '您', '!'] ['我', '喜', '欢', 'E', 'n', 'g', 'l', 'i', 's', 'h', '!'] ``` -### UnicodeScriptTokenizer - -`UnicodeScriptTokenizer`是根据不同的Unicode的边界来进行分词的。 - -```python -import mindspore.dataset as ds -import mindspore.dataset.text as text - -# 构建输入的数据列表 -input_list = ["Welcome to Beijing!", "北京欢迎您!", "我喜欢English!"] - -dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) - -print("------------------------before tokenize----------------------------") - -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): - print(text.to_str(data['text'])) - -tokenizer_op = text.UnicodeScriptTokenizer() - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - -dataset = dataset.map(operations=tokenizer_op) - -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']).tolist() - print(token) -``` - -``` -------------------------before tokenize---------------------------- -Welcome to Beijing! -北京欢迎您! -我喜欢English! -------------------------after tokenize----------------------------- -['Welcome', 'to', 'Beijing', '!'] -['北京欢迎您', '!'] -['我喜欢', 'English', '!'] -``` - ### WhitespaceTokenizer `WhitespaceTokenizer`是根据空格来进行分词的。 +下面的样例首先构建了一个文本数据集,然后通过`WhitespaceTokenizer`对数据集进行分词,并展示了分词前后的文本结果。 + ```python import mindspore.dataset as ds import mindspore.dataset.text as text -# 构建输入的数据列表 input_list = ["Welcome to Beijing!", "北京欢迎您!", "我喜欢English!"] - dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) -print("------------------------before tokenize----------------------------") +print("------------------------before tokenization----------------------------") -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): +for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) tokenizer_op = text.WhitespaceTokenizer() - -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - dataset = dataset.map(operations=tokenizer_op) -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']).tolist() - print(token) +print("------------------------after tokenization-----------------------------") + +for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + print(text.to_str(i['text']).tolist()) ``` +输出结果如下: + ``` ->> Tokenize Result -------------------------before tokenize---------------------------- +------------------------before tokenization---------------------------- Welcome to Beijing! 北京欢迎您! 我喜欢English! -------------------------after tokenize----------------------------- +------------------------after tokenization----------------------------- ['Welcome', 'to', 'Beijing!'] ['北京欢迎您!'] ['我喜欢English!'] @@ -405,40 +258,34 @@ Welcome to Beijing! `WordpieceTokenizer`是基于单词集来划分的,单词集里没有的,但是有组合的也会划分出来。 +下面的样例首先构建了一个文本数据集,然后从单词列表中构建`vocab`对象,通过`WordpieceTokenizer`对数据集进行分词,并展示了分词前后的文本结果。 + ```python import mindspore.dataset as ds import mindspore.dataset.text as text -# 构建输入的数据列表 -input_list = ["my", "favorite", "book", "is", "love", "during", "the", "cholera", "era", "what", "我", "最", "喜", "欢", "的", "书", "是", "霍", "乱", "时", "期", "的", "爱", "情", "您"] - +input_list = ["my", "favorite", "book", "is", "love", "during", "the", "cholera", "era", "what", "我", "最", "喜", "欢", "书", "是", "霍", "乱", "时", "期", "的", "爱", "情", "您"] dataset = ds.NumpySlicesDataset(input_list, column_names=["text"], shuffle=False) -print("------------------------before tokenize----------------------------") +print("------------------------before tokenization----------------------------") -# 输出分词之前的数据 -for data in dataset.create_dict_iterator(): +for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) -#打印分词后的数据输出 -print("------------------------after tokenize-----------------------------") - -# 从单词列表中构建一个vocab对象 -vocab = text.Vocab.from_list(vocab_list) - -# 输出分词之后的数据 -# BasicTokenizer为分词的函数 +vocab = text.Vocab.from_list(input_list) tokenizer_op = text.WordpieceTokenizer(vocab=vocab) - dataset = dataset.map(operations=tokenizer_op) -for i in dataset.create_dict_iterator(num_epochs=1): - token = text.to_str(i['text']) - print(token) +print("------------------------after tokenization-----------------------------") + +for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + print(text.to_str(i['text'])) ``` +输出结果如下: + ``` -------------------------before tokenize---------------------------- +------------------------before tokenization---------------------------- my favorite book @@ -464,7 +311,7 @@ what 爱 情 您 -------------------------after tokenize----------------------------- +------------------------after tokenization----------------------------- ['my'] ['favor' '##ite'] ['book'] diff --git a/api/source_zh_cn/programming_guide/train.md b/api/source_zh_cn/programming_guide/train.md new file mode 100644 index 0000000000000000000000000000000000000000..448fa3c6d168171c0ccdeeed589c3fe5b4725552 --- /dev/null +++ b/api/source_zh_cn/programming_guide/train.md @@ -0,0 +1,444 @@ +# 训练 + + + +- [训练](#训练) + - [概述](#概述) + - [自定义训练网络](#自定义训练网络) + - [自定义训练循环](#自定义训练循环) + - [边训练边推理](#边训练边推理) + - [on-device执行](#on-device执行) + - [计算图下沉](#计算图下沉) + - [数据下沉](#数据下沉) + + + + + +## 概述 +MindSpore在Model_zoo也已经提供了大量的目标检测、自然语言处理等多种网络模型,供用户直接使用,但是对于某些高级用户而言可能想要自行设计网络或者自定义训练循环,下面就对自定义训练网络、自定义训练循环和边训练边推理三种场景进行介绍,另外对On device执行方式进行详细介绍。 + +## 自定义训练网络 +在自定义训练网络前,需要先了解下MindSpore的网络支持、Python源码构造网络约束和算子支持情况。 + +- 网络支持:当前MindSpore已经支持多种网络,按类型分为计算机视觉、自然语言处理、推荐和图神经网络,可以通过[网络支持](https://www.mindspore.cn/docs/zh-CN/master/network_list.html)查看具体支持的网络情况。如果现有网络无法满足用户需求,用户可以根据实际需要定义自己的网络。 + +- Python源码构造网络约束:MindSpore暂不支持将任意Python源码转换成计算图,所以对于用户源码支持的写法有所限制,主要包括语法约束和网络定义约束两方面。详细情况可以查看[Python源码构造网络约束](https://www.mindspore.cn/docs/zh-CN/master/constraints_on_network_construction.html)了解。随着MindSpore的演进,这些约束可能会发生变化。 + +- 算子支持:顾名思义,网络的基础是算子,所以用户自定义训练网络前要对MindSpore当前支持的算子有所了解,可以通过查看[算子支持](https://www.mindspore.cn/docs/zh-CN/master/operator_list.html)了解不同的后端(Ascend、GPU和CPU)的算子实现情况。 + +> 当开发网络遇到内置算子不足以满足需求时,用户也可以参考[自定义算子](https://www.mindspore.cn/tutorial/zh-CN/master/use/custom_operator.html),方便快捷地扩展昇腾AI处理器的自定义算子。 + +代码样例如下: +```python +import numpy as np + +from mindspore.common.tensor import Tensor +from mindspore.nn import Cell, Dense, SoftmaxCrossEntropyWithLogits, Momentum, TrainOneStepCell, WithLossCell +from mindspore.ops import operations as P + + +class ReLUReduceMeanDense(Cell): + def __init__(self, kernel, bias, in_channel, num_class): + super().__init__() + self.relu = P.ReLU() + self.mean = P.ReduceMean(keep_dims=False) + self.dense = Dense(in_channel, num_class, kernel, bias) + + def construct(self, x): + x = self.relu(x) + x = self.mean(x, (2, 3)) + x = self.dense(x) + return x + + +if __name__ == "__main__": + weight_np = np.ones((1000, 2048)).astype(np.float32) + weight = Tensor(weight_np.copy()) + bias_np = np.ones((1000,)).astype(np.float32) + bias = Tensor(bias_np.copy()) + net = ReLUReduceMeanDense(weight, bias, 2048, 1000) + criterion = SoftmaxCrossEntropyWithLogits(sparse=False) + optimizer = Momentum(learning_rate=0.1, momentum=0.1, + params=filter(lambda x: x.requires_grad, net.get_parameters())) + net_with_criterion = WithLossCell(net, criterion) + train_network = TrainOneStepCell(net_with_criterion, optimizer) + train_network.set_train() + input_np = np.random.randn(32, 2048, 7, 7).astype(np.float32) + input = Tensor(input_np.copy()) + label_np_onehot = np.zeros(shape=(32, 1000)).astype(np.float32) + label = Tensor(label_np_onehot.copy()) + for i in range(1): + loss = train_network(input, label) + print("-------loss------", loss) +``` + +输出如下: +```python +-------loss------ [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. + 0. 0. 0. 0. 0. 0. 0. 0.] +``` + +## 自定义训练循环 +用户如果不想使用MindSpore提供的Model接口,可以将模仿Model的train接口自由控制循环的迭代次数和每个epoch的step数量。 + +代码样例如下: +```python +import os + +import mindspore.dataset as ds +import mindspore.dataset.transforms.c_transforms as CT +import mindspore.dataset.vision.c_transforms as CV +import mindspore.nn as nn +from mindspore import context +from mindspore.common import dtype as mstype +from mindspore.common.initializer import TruncatedNormal +from mindspore.common.parameter import ParameterTuple +from mindspore.dataset.vision import Inter +from mindspore.nn.wrap.cell_wrapper import WithLossCell +from mindspore.ops import composite as C +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindspore.train.dataset_helper import DatasetHelper, connect_network_with_dataset + + +def create_dataset(data_path, batch_size=32, repeat_size=1, + num_parallel_workers=1): + """ + create dataset for train or test + """ + # define dataset + mnist_ds = ds.MnistDataset(data_path) + + resize_height, resize_width = 32, 32 + rescale = 1.0 / 255.0 + shift = 0.0 + rescale_nml = 1 / 0.3081 + shift_nml = -1 * 0.1307 / 0.3081 + + # define map operations + resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode + rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) + rescale_op = CV.Rescale(rescale, shift) + hwc2chw_op = CV.HWC2CHW() + type_cast_op = CT.TypeCast(mstype.int32) + + # apply map operations on images + mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) + + # apply DatasetOps + buffer_size = 10000 + mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script + mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) + mnist_ds = mnist_ds.repeat(repeat_size) + + return mnist_ds + + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): + """weight initial for conv layer""" + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode="valid") + + +def fc_with_initialize(input_channels, out_channels): + """weight initial for fc layer""" + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +def weight_variable(): + """weight initial""" + return TruncatedNormal(0.02) + + +class LeNet5(nn.Cell): + """ + Lenet network + Args: + num_class (int): Num classes. Default: 10. + + Returns: + Tensor, output tensor + + Examples: + >>> LeNet(num_class=10) + """ + + def __init__(self, num_class=10): + super(LeNet5, self).__init__() + self.num_class = num_class + self.batch_size = 32 + self.conv1 = conv(1, 6, 5) + self.conv2 = conv(6, 16, 5) + self.fc1 = fc_with_initialize(16 * 5 * 5, 120) + self.fc2 = fc_with_initialize(120, 84) + self.fc3 = fc_with_initialize(84, self.num_class) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.reshape = P.Reshape() + + def construct(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv2(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.reshape(x, (self.batch_size, -1)) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x + + +class TrainOneStepCell(nn.Cell): + def __init__(self, network, optimizer, sens=1.0): + super(TrainOneStepCell, self).__init__(auto_prefix=False) + self.network = network + self.weights = ParameterTuple(network.trainable_params()) + self.optimizer = optimizer + self.grad = C.GradOperation(get_by_list=True, sens_param=True) + self.sens = sens + + def set_sens(self, value): + self.sens = value + + def construct(self, data, label): + weights = self.weights + loss = self.network(data, label) + sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens) + grads = self.grad(self.network, weights)(data, label, sens) + return F.depend(loss, self.optimizer(grads)) + + +if __name__ == "__main__": + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + ds_train = create_dataset(os.path.join("/home/workspace/mindspore_dataset/MNIST_Data/", "train"), 32) + + network = LeNet5(10) + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") + net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) + net = WithLossCell(network, net_loss) + net = TrainOneStepCell(net, net_opt) + dataset_helper = DatasetHelper(ds_train, dataset_sink_mode=True, sink_size=100, epoch_num=10) + net = connect_network_with_dataset(net, dataset_helper) + network.set_train() + print("============== Starting Training ==============") + epoch = 10 + for step in range(epoch): + for inputs in dataset_helper: + output = net(*inputs) + print("epoch: {0}/{1}, losses: {2}".format(step + 1, epoch, output.asnumpy(), flush=True)) +``` + +> 示例中用到的MNIST数据集的获取方法,可以参照[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)的下载数据集部分,下同。 + +输出如下: +```python +epoch: 1/10, losses: 2.294034719467163 +epoch: 2/10, losses: 2.3150298595428467 +epoch: 3/10, losses: 2.3107073307037354 +epoch: 4/10, losses: 2.3155436515808105 +epoch: 5/10, losses: 2.28973388671875 +epoch: 6/10, losses: 2.3108928203582764 +epoch: 7/10, losses: 2.293713092803955 +epoch: 8/10, losses: 2.29837703704834 +epoch: 9/10, losses: 2.305952548980713 +epoch: 10/10, losses: 1.4282708168029785 +``` + +> 典型的使用场景是梯度累积,详细查看[梯度累积](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/gradient_accumulation.html)。 + +## 边训练边推理 +对于某些数据量较大、训练时间较长的复杂网络,为了能掌握训练的不同阶段模型精度的指标变化情况,可以通过边训练边推理的方式跟踪精度的变化情况。具体可以参考[同步训练和验证模型](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/synchronization_training_and_evaluation.html)。 + +## on-device执行 +当前MindSpore支持的后端包括Ascend、GPU、CPU,所谓On Device中的Device通常指Ascend(昇腾)AI处理器。 + +昇腾芯片上集成了AICORE、AICPU和CPU。其中,AICORE负责大型Tensor Vector运算,AICPU负责标量运算,CPU负责逻辑控制和任务分发。 + +Host侧CPU负责将图或算子下发到昇腾芯片。昇腾芯片由于具备了运算、逻辑控制和任务分发的功能,所以不需要与Host侧的CPU进行频繁的交互,只需要将计算完的最终结果返回给Host侧,实现整图下沉到Device执行,避免Host-Device频繁交互,减小了开销。 + +以下是Device的主要组成结构: +- 片上32G内存:5G(parameter) + 26G(feature map) + 1G(HCCL) +- 多流水线并行:6条流水线 +- AICORE&带宽:32Cores、读写带宽128GBps +- 通信协议:HCCS、PCIe4.0、RoCEv2 + +### 计算图下沉 +计算图整图下沉到Device上执行,减少Host-Device交互开销。可以结合循环下沉实现多个Step下沉,进一步减少Host和Device的交互次数。 + +循环下沉是在On Device执行的基础上的优化,目的是进一步减少Host侧和Device侧之间的交互次数。通常情况下,每个step都返回一个结果,循环下沉是控制每隔多少个step返回一次结果。 + +默认配置下是每一个epoch返回一次结果,这样每个epoch里,Host侧和Device侧只需要进行一次数据交互。 + +也可以结合`train`接口的`dataset_sink_mode`和`sink_size`控制每个epoch的下沉数据量。 + +### 数据下沉 +`Model`的`train`接口参数`dataset_sink_mode`可以控制数据是否下沉。`dataset_sink_mode`为True表示数据下沉,否则为非下沉。所谓下沉即数据通过通道直接传送到Device上。 + +dataset_sink_mode参数可以配合`sink_size`控制每个`epoch`下沉的数据量大小。当`dataset_sink_mode`设置为True,即数据下沉模式时: + +如果`sink_size`为默认值-1,则每一个`epoch`下沉的数据量为原始的整个数据集大小; + +如果`sink_size`>0,此时原始数据集可以被无限次遍历,每个`epoch`下沉`sink_size`大小的数据量,下一个`epoch`继续从上次遍历的结束位置继续遍历。 + +下沉的总数据量由`epoch`和`sink_size`两个变量共同控制,即总数据量=`epoch`*`sink_size`。 + +代码样例如下: +```python +import os + +import mindspore.dataset as ds +import mindspore.dataset.transforms.c_transforms as CT +import mindspore.dataset.vision.c_transforms as CV +import mindspore.nn as nn +from mindspore import context +from mindspore.common import dtype as mstype +from mindspore.common.initializer import TruncatedNormal +from mindspore.dataset.vision import Inter +from mindspore.nn.metrics import Accuracy +from mindspore.ops import operations as P +from mindspore.train import Model +from mindspore.train.callback import LossMonitor + + +def create_dataset(data_path, batch_size=32, repeat_size=1, + num_parallel_workers=1): + """ + create dataset for train or test + """ + # define dataset + mnist_ds = ds.MnistDataset(data_path) + + resize_height, resize_width = 32, 32 + rescale = 1.0 / 255.0 + shift = 0.0 + rescale_nml = 1 / 0.3081 + shift_nml = -1 * 0.1307 / 0.3081 + + # define map operations + resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode + rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) + rescale_op = CV.Rescale(rescale, shift) + hwc2chw_op = CV.HWC2CHW() + type_cast_op = CT.TypeCast(mstype.int32) + + # apply map operations on images + mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) + + # apply DatasetOps + buffer_size = 10000 + mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script + mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) + mnist_ds = mnist_ds.repeat(repeat_size) + + return mnist_ds + + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): + """weight initial for conv layer""" + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode="valid") + + +def fc_with_initialize(input_channels, out_channels): + """weight initial for fc layer""" + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +def weight_variable(): + """weight initial""" + return TruncatedNormal(0.02) + + +class LeNet5(nn.Cell): + """ + Lenet network + Args: + num_class (int): Num classes. Default: 10. + + Returns: + Tensor, output tensor + + Examples: + >>> LeNet(num_class=10) + """ + + def __init__(self, num_class=10): + super(LeNet5, self).__init__() + self.num_class = num_class + self.batch_size = 32 + self.conv1 = conv(1, 6, 5) + self.conv2 = conv(6, 16, 5) + self.fc1 = fc_with_initialize(16 * 5 * 5, 120) + self.fc2 = fc_with_initialize(120, 84) + self.fc3 = fc_with_initialize(84, self.num_class) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.reshape = P.Reshape() + + def construct(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv2(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.reshape(x, (self.batch_size, -1)) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x + + +if __name__ == "__main__": + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + ds_train = create_dataset(os.path.join("/home/workspace/mindspore_dataset/MNIST_Data/", "train"), 32) + + network = LeNet5(10) + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") + net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) + model = Model(network, net_loss, net_opt) + + print("============== Starting Training ==============") + model.train(epoch=10, train_dataset=ds_train, callbacks=[LossMonitor()], dataset_sink_mode=True, sink_size=1000) +``` + +`batch_size`为32的情况下,数据集的大小为1875,当`sink_size`设置为1000时,表示每个`epoch`下沉1000个batch的数据,下沉次数为`epoch`=10,下沉的总数据量为:`epoch`*`sink_size`=10000。 + +输出如下: +```python +epoch: 1 step: 1000, loss is 0.5399815 +epoch: 2 step: 1000, loss is 0.033433747 +epoch: 3 step: 1000, loss is 0.054761313 +epoch: 4 step: 1000, loss is 0.007882872 +epoch: 5 step: 1000, loss is 0.00658499 +epoch: 6 step: 1000, loss is 0.0413095 +epoch: 7 step: 1000, loss is 0.13373856 +epoch: 8 step: 1000, loss is 0.015793817 +epoch: 9 step: 1000, loss is 0.00017951085 +epoch: 10 step: 1000, loss is 0.01490275 +``` + +> `dataset_sink_mode`为False时,`sink_size`参数设置无效。 \ No newline at end of file diff --git a/api/source_zh_cn/programming_guide/type.md b/api/source_zh_cn/programming_guide/type.md deleted file mode 100644 index 3ccdb560386cb1a9fa71fd8dc6e724f2ca135662..0000000000000000000000000000000000000000 --- a/api/source_zh_cn/programming_guide/type.md +++ /dev/null @@ -1,54 +0,0 @@ -# 数据类型 - - - -- [数据类型](#数据类型) - - [概述](#概述) - - [操作接口](#操作接口) - - - - - - -## 概述 - -MindSpore张量支持不同的数据类型,有`int8`、`int16`、`int32`、`int64`、`uint8`、`uint16`、`uint32`、`uint64`、 -`float16`、`float32`、`float64`、`bool_`, 与NumPy的数据类型一一对应,Python里的`int`数会被转换为定义的int64进行运算, -Python里的`float`数会被转换为定义的`float32`进行运算。 - -## 操作接口 -- `dtype_to_nptype` - - 可通过该接口将MindSpore的数据类型转换为NumPy对应的数据类型。 - -- `dtype_to_pytype` - - 可通过该接口将MindSpore的数据类型转换为Python对应的内置数据类型。 - - -- `pytype_to_dtype` - - 可通过该接口将Python内置的数据类型转换为MindSpore对应的数据类型。 - -示例如下: - -``` -from mindspore import dtype as mstype - -np_type = mstype.dtype_to_nptype(mstype.int32) -ms_type = mstype.pytype_to_dtype(int) -py_type = mstype.dtype_to_pytype(mstype.float64) - -print(np_type) -print(ms_type) -print(py_type) -``` - -输出如下: - -``` - -Int64 - -``` diff --git a/api/source_zh_cn/programming_guide/user_defined.rst b/api/source_zh_cn/programming_guide/user_defined.rst new file mode 100644 index 0000000000000000000000000000000000000000..5fc891eb186746f7f138362cf5e38ae041542c19 --- /dev/null +++ b/api/source_zh_cn/programming_guide/user_defined.rst @@ -0,0 +1,9 @@ +自定义 +=========== + +.. toctree:: + :maxdepth: 1 + + 自定义TBE算子 + 自定义GPU算子 + 自定义CPU算子 \ No newline at end of file diff --git a/docs/source_en/_static/logo_source.png b/docs/source_en/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/docs/source_en/_static/logo_source.png and b/docs/source_en/_static/logo_source.png differ diff --git a/docs/source_en/constraints_on_network_construction.md b/docs/source_en/constraints_on_network_construction.md index 2da31582ec511af4c51e499d81e350b0b4d91797..1272231355aee6ce8f69d75ff5b0cd45e0071c62 100644 --- a/docs/source_en/constraints_on_network_construction.md +++ b/docs/source_en/constraints_on_network_construction.md @@ -5,23 +5,23 @@ - [Constraints on Network Construction Using Python](#constraints-on-network-construction-using-python) - - [Overview](#overview) - - [Syntax Constraints](#syntax-constraints) - - [Supported Python Data Types](#supported-python-data-types) - - [MindSpore Extended Data Type](#mindspore-extended-data-type) - - [Expression Types](#expression-types) - - [Statement Types](#statement-types) - - [System Functions/Class](#system-functionsclasses) - - [Function Parameters](#function-parameters) - - [Operators](#operators) - - [Index operation](#index-operation) - - [Unsupported Syntax](#unsupported-syntax) - - [Network Definition Constraints](#network-definition-constraints) - - [Instance Types on the Entire Network](#instance-types-on-the-entire-network) - - [Network Input Type](#network-input-type) - - [Network Graph Optimization](#network-graph-optimization) - - [Network Construction Components](#network-construction-components) - - [Other Constraints](#other-constraints) + - [Overview](#overview) + - [Syntax Constraints](#syntax-constraints) + - [Supported Python Data Types](#supported-python-data-types) + - [MindSpore Extended Data Type](#mindspore-extended-data-type) + - [Expression Types](#expression-types) + - [Statement Types](#statement-types) + - [System Functions/Classes](#system-functionsclasses) + - [Function Parameters](#function-parameters) + - [Operators](#operators) + - [Index operation](#index-operation) + - [Unsupported Syntax](#unsupported-syntax) + - [Network Definition Constraints](#network-definition-constraints) + - [Instance Types on the Entire Network](#instance-types-on-the-entire-network) + - [Network Input Type](#network-input-type) + - [Network Graph Optimization](#network-graph-optimization) + - [Network Construction Components](#network-construction-components) + - [Other Constraints](#other-constraints) @@ -226,40 +226,67 @@ Currently, the following syntax is not supported in network constructors: | Member function of a `Cell` instance | Member functions of other classes in the construct function of Cell can be called. | Function | Custom Python functions and system functions listed in the preceding content. | Dataclass instance | Class decorated with @dataclass. -| Primitive operator |[mindspore/ops/operations/*](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html). -| Composite operator |[mindspore/ops/composite/*](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.composite.html). +| Primitive operator |[mindspore/ops/operations/*](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html). +| Composite operator |[mindspore/ops/composite/*](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html). | Operator generated by constexpr |Uses the value generated by [@constexpr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.constexpr) to calculate operators. ### Other Constraints -Input parameters of the construct function on the entire network and parameters of functions modified by the ms_function decorator are generalized during the graph compilation. Therefore, they cannot be transferred to operators as constant input. Therefore, in graph mode, the parameter passed to the entry network can only be Tensor. As shown in the following example: -* The following is an example of incorrect input: - ```python - class ExpandDimsTest(Cell): - def __init__(self): - super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() - - def construct(self, input_x, input_axis): - return self.expandDims(input_x, input_axis) - expand_dim = ExpandDimsTest() - input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) - expand_dim(input_x, 0) - ``` - In the example, ExpandDimsTest is a single-operator network with two inputs: input_x and input_axis. The second input of the ExpandDims operator must be a constant. This is because input_axis is required when the output dimension of the ExpandDims operator is deduced during graph compilation. As the network parameter input, the value of input_axis is generalized into a variable and cannot be determined. As a result, the output dimension of the operator cannot be deduced, causing the graph compilation failure. Therefore, the input required by deduction in the graph compilation phase must be a constant. In APIs, the "constant input is needed" is marked for parameters that require constant input of these operators. +1. Input parameters of the `construct` function on the entire network and parameters of functions modified by the `ms_function` decorator are generalized during the graph compilation and cannot be passed to operators as constant input. Therefore, in graph mode, the parameter passed to the entry network can only be `Tensor`. As shown in the following example: + + * The following is an example of incorrect input: + ```python + class ExpandDimsTest(Cell): + def __init__(self): + super(ExpandDimsTest, self).__init__() + self.expandDims = P.ExpandDims() + + def construct(self, input_x, input_axis): + return self.expandDims(input_x, input_axis) + expand_dim = ExpandDimsTest() + input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) + expand_dim(input_x, 0) + ``` + In the example, `ExpandDimsTest` is a single-operator network with two inputs: `input_x` and `input_axis`. The second input of the `ExpandDims` operator must be a constant. This is because `input_axis` is required when the output dimension of the `ExpandDims` operator is deduced during graph compilation. As the network parameter input, the value of `input_axis` is generalized into a variable and cannot be determined. As a result, the output dimension of the operator cannot be deduced, causing the graph compilation failure. Therefore, the input required by deduction in the graph compilation phase must be a constant. In the API, the parameters of this type of operator that require constant input will be explained, marked `const input is needed`. + + * Directly enter the needed value or a member variable in a class for the constant input of the operator in the construct function. The following is an example of correct input: + ```python + class ExpandDimsTest(Cell): + def __init__(self, axis): + super(ExpandDimsTest, self).__init__() + self.expandDims = P.ExpandDims() + self.axis = axis + + def construct(self, input_x): + return self.expandDims(input_x, self.axis) + axis = 0 + expand_dim = ExpandDimsTest(axis) + input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) + expand_dim(input_x) + ``` + +2. It is not allowed to modify `non-Parameter` type data members of the network. Examples are as follows: -* Directly enter the needed value or a member variable in a class for the constant input of the operator in the construct function. The following is an example of correct input: - ```python - class ExpandDimsTest(Cell): - def __init__(self, axis): - super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() - self.axis = axis - - def construct(self, input_x): - return self.expandDims(input_x, self.axis) - axis = 0 - expand_dim = ExpandDimsTest(axis) - input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) - expand_dim(input_x) ``` + class Net(Cell): + def __init__(self): + super(Net, self).__init__() + self.num = 2 + self.par = Parameter(Tensor(np.ones((2, 3, 4))), name="par") + + def construct(self, x, y): + return x + y + ``` + In the network defined above, `self.num` is not a `Parameter` and cannot be modified, but `self.par` is a `Parameter` and can be modified. + +3. When an undefined class member is used in the `construct` function, it will be treated as `None` instead of throwing `AttributeError` like the Python interpreter. Examples are as follows: + + ``` + class Net(Cell): + def __init__(self): + super(Net, self).__init__() + + def construct(self, x): + return x + self.y + ``` + In the network defined above, the undefined class member `self.y` is used in `construct`, and `self.y` will be treated as `None`. \ No newline at end of file diff --git a/docs/source_en/design.rst b/docs/source_en/design.rst index 359add5edcdd0d373da5eb99037c88cf5bfd99e7..4367b213d1d0cdb56c671a7dc23085e79043490f 100644 --- a/docs/source_en/design.rst +++ b/docs/source_en/design.rst @@ -8,4 +8,6 @@ Design design/mindspore/ir design/mindinsight/training_visual_design design/mindinsight/graph_visual_design - design/mindinsight/tensor_visual_design \ No newline at end of file + design/mindinsight/tensor_visual_design + design/mindarmour/differential_privacy_design + design/mindarmour/fuzzer_design diff --git a/docs/source_en/design/mindarmour/differential_privacy_design.md b/docs/source_en/design/mindarmour/differential_privacy_design.md new file mode 100644 index 0000000000000000000000000000000000000000..71f4b5e5cf5d9730d5030e968e7cf28b92907b0b --- /dev/null +++ b/docs/source_en/design/mindarmour/differential_privacy_design.md @@ -0,0 +1,71 @@ +# Differential Privacy + +`Ascend` `Model Development` `Model Optimization` `Framework Development` `Enterprise` `Expert` `Contributor` + + + +- [Differential Privacy](#differential-privacy) + - [Overall Design](#overall-design) + - [DP Optimizer](#dp-optimizer) + - [DP Mechanisms](#dp-mechanisms) + - [Monitor](#monitor) + - [Code Implementation](#code-implementation) + - [References](#references) + + + + + +## Overall Design + +The Differential-Privacy module of MindArmour implements the differential privacy training capability. Model training consists of building training dataset, computing loss, computing gradient, and updating model parameters. Currently, the differential privacy training of MindArmour focuses on the gradient computing process and uses the corresponding algorithm to clip and add noise to the gradient. In this way, user data privacy is protected. + +![dp_arch](./images/dp_arch.png) + +
Figure 1 Overall design of differential privacy
+ +Figure 1 shows an overall design of differential privacy training, and mainly including differential privacy noise mechanisms (DP mechanisms), a differential privacy optimizer (DP optimizer), and a privacy monitor. + + +### DP Optimizer + +DP optimizer inherits capabilities of the MindSpore optimizer and uses the DP mechanisms to scramble and protect gradients. Currently, MindArmour provides three types of DP optimizers: constant Gaussian optimizer, adaptive Gaussian optimizer, and adaptive clipping optimizer. Each type of DP optimizer adds differential privacy protection capabilities to common optimizers such as SGD and Momentum from different perspectives. + +* Constant Gaussian optimizer is a DP optimizer for non-adaptive Gaussian noise. The advantage is that the differential privacy budget ϵ can be strictly controlled. The disadvantage is that in the model training process, the noise amount added in each step is fixed. If the number of training steps is too large, the noise in the later phase of training makes the model convergence difficult, or even causes the performance to deteriorate greatly and the model availability to be poor. +* Adaptive Gaussian optimizer adaptively adjusts the standard deviation to adjust the Gaussian distribution noise. In the initial phase of model training, a large amount of noise is added. As the model gradually converges, the noise amount gradually decreases, and the impact of the noise on the model availability is reduced. A disadvantage of the adaptive Gaussian noise is that a differential privacy budget cannot be strictly controlled. +* Adaptive clipping optimizer is a DP optimizer that adaptively adjusts a clipping granularity. Gradient clipping is an important operation in differential privacy training. The adaptive clipping optimizer can control a ratio of gradient clipping to fluctuate within a given range and control the gradient clipping granularity during training steps. + +### DP Mechanisms + +The noise mechanism is a basis for building a differential privacy training capability. Different noise mechanisms meet requirements of different DP optimizers, including multiple mechanisms such as constant Gaussian distribution noise, adaptive Gaussian distribution noise, adaptive clipping Gaussian distribution noise, and Laplace distribution noise. + +### Monitor + +Monitor provides callback functions such as Rényi differential privacy (RDP) and zero-concentrated differential privacy (ZCDP) to monitor the differential privacy budget of the model. + +* ZCDP[2] + + ZCDP is a loose differential privacy definition. It uses the Rényi divergence to measure the distribution difference of random functions on adjacent datasets. + +* RDP[3] + + RDP is a more general differential privacy definition based on the Rényi divergence. It uses the Rényi divergence to measure the distribution difference between two adjacent datasets. + + +Compared with traditional differential privacy, ZCDP and RDP provide stricter privacy budget upper bound guarantee. + + +## Code Implementation + +* [mechanisms.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py): implements the noise generation mechanism required by differential privacy training, including simple Gaussian noise, adaptive Gaussian noise, and adaptive clipping Gaussian noise. +* [optimizer.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/privacy/diff_privacy/optimizer/optimizer.py): implements the fundamental logic of using the noise generation mechanism to add noise during backward propagation. +* [monitor.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/privacy/diff_privacy/monitor/monitor.py): implements the callback function for computing the differential privacy budget. During model training, the current differential privacy budget is returned. +* [model.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/privacy/diff_privacy/train/model.py): implements the logic of computing the loss and gradient as well as the gradient truncation logic of differential privacy training, which is the entry for users to use the differential privacy training capability. + +## References + +[1] Dwork, Cynthia, and Jing Lei. "Differential privacy and robust statistics." *Proceedings of the forty-first annual ACM symposium on Theory of computing*. 2009. + +[2] Lee, Jaewoo, and Daniel Kifer. "Concentrated differentially private gradient descent with adaptive per-iteration privacy budget." *Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining*. 2018. + +[3] Mironov, Ilya. "Rényi differential privacy." *2017 IEEE 30th Computer Security Foundations Symposium (CSF)*. IEEE, 2017. diff --git a/docs/source_en/design/mindarmour/fuzzer_design.md b/docs/source_en/design/mindarmour/fuzzer_design.md new file mode 100644 index 0000000000000000000000000000000000000000..2a41c2342eb3ed7fe13804890f7d97f491e2f20e --- /dev/null +++ b/docs/source_en/design/mindarmour/fuzzer_design.md @@ -0,0 +1,74 @@ +# AI Model Security Test + +`Linux` `Ascend` `GPU` `CPU` `Data Preparation` `Model Development` `Model Training` `Model Optimization` `Enterprise` `Expert` + + + + +- [AI Model Security Test](#ai-model-security-test) + - [Background](#background) + - [Fuzz Testing Design](#fuzz-testing-design) + - [Fuzz Testing Process](#fuzz-testing-process) + - [Code Implementation](#code-implementation) + - [References](#references) + + + + + +## Background + +Different from [fuzzing security test for traditional programs](https://zhuanlan.zhihu.com/p/43432370), MindArmour provides the AI model security test module fuzz_testing for deep neural network. Based on the neural network features, the concept of neuron coverage rate [1] is introduced to guide the fuzz testing. Fuzz testing is guided to generate samples in the direction of increasing neuron coverage rate so that more neurons can be activated by inputs. The distribution range of neuron values is wider to fully test DNN and explore the output results of different types of models and model error behavior. + +## Fuzz Testing Design + +The following figure shows the security test design of the AI model. + +![fuzz_architecture](./images/fuzz_architecture.png) + +At the user interface layer, users need to provide the original dataset `DataSet`, tested model `Model`, and Fuzzer parameter `Fuzzer configuration`. After fuzzing the model and data, Fuzzer module returns the security report `Security Report`. + +Fuzz testting architecture consists of three modules: + +1. Natural Threat/Adversarial Example Generator: + + Randomly select a mutation method to mutate seed data and generate multiple variants. Mutation policies supporting multiple samples include: + + - Image affine transformation methods: Translate, Rotate, Scale, and Shear. + - Methods based on image pixel value changes: Contrast, Brightness, Blur, and Noise. + - Methods for generating adversarial examples based on white-box and black-box attacks: FGSM, PGD, and MDIIM. + +2. Fuzzer Moduler: + + Perform fuzz testing on the mutated data to observe the change of the neuron coverage rate. If the generated data increases the neuron coverage rate, add the data to the mutated seed queue for the next round of data mutation. Currently, the following neuron coverage metrics are supported: KMNC, NBC, and SNAC [2]. + +3. Evaluation: + + Evaluate the fuzz testing effect, quality of generated data, and strength of mutation methods. Five metrics of three types are supported, including the general evaluation metric (accuracy), neuron coverage rate metrics (kmnc, nbc, and snac), and adversarial attack evaluation metric (attack_success_rate). + +## Fuzz Testing Process + +![fuzz_process](./images/fuzz_process.png) + +The fuzz testing process is as follows: + +1. Select seed A from the seed queue according to the policy. +2. Randomly select a mutation policy to mutate seed A and generate multiple variants A1, A2, ... +3. Use the target model to predict the variants. If the semantics of variant is consistent with the seed, the variant enters the Fuzzed Tests. +4. If the prediction is correct, use the neuron coverage metric for analysis. +5. If a variant increases the coverage rate, place the variant in the seed queue for the next round of mutation. + +Through multiple rounds of mutations, you can obtain a series of variant data in the Fuzzed Tests, perform further analysis, and provide security reports from multiple perspectives. You can use them to deeply analyze defects of the neural network model and enhance the model to improve its universality and robustness. + +## Code Implementation + +1. [fuzzing.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/fuzz_testing/fuzzing.py): overall fuzz testing process. +2. [model_coverage_metrics.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/fuzz_testing/model_coverage_metrics.py): neuron coverage rate metrics, including KMNC, NBC, and SNAC. +3. [image_transform.py](https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/fuzz_testing/image_transform.py): image mutation methods, including methods based on image pixel value changes and affine transformation methods. +4. [adversarial attacks](https://gitee.com/mindspore/mindarmour/tree/master/mindarmour/adv_robustness/attacks): methods for generating adversarial examples based on white-box and black-box attacks. + +## References + +[1] Pei K, Cao Y, Yang J, et al. Deepxplore: Automated whitebox testing of deep learning systems[C]//Proceedings of the 26th Symposium on Operating Systems Principles. ACM, 2017: 1-18. + +[2] Ma L, Juefei-Xu F, Zhang F, et al. Deepgauge: Multi-granularity testing criteria for deep learning systems[C]//Proceedings of the 33rd ACM/IEEE International Conference on Automated Software Engineering. ACM, 2018: 120-131. \ No newline at end of file diff --git a/docs/source_en/design/mindarmour/images/dp_arch.png b/docs/source_en/design/mindarmour/images/dp_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..c903e4e2acece6c6de882852dc3570126b6fcb05 Binary files /dev/null and b/docs/source_en/design/mindarmour/images/dp_arch.png differ diff --git a/docs/source_en/design/mindarmour/images/fuzz_architecture.png b/docs/source_en/design/mindarmour/images/fuzz_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..d4e8b89bd9a9f4844c59790f5b2114d1d477f927 Binary files /dev/null and b/docs/source_en/design/mindarmour/images/fuzz_architecture.png differ diff --git a/docs/source_en/design/mindarmour/images/fuzz_process.png b/docs/source_en/design/mindarmour/images/fuzz_process.png new file mode 100644 index 0000000000000000000000000000000000000000..2e04347f7cfb0819562578a6be1e91b5cc7ce9d5 Binary files /dev/null and b/docs/source_en/design/mindarmour/images/fuzz_process.png differ diff --git a/docs/source_en/design/mindinsight/images/analyser_class_profiler.png b/docs/source_en/design/mindinsight/images/analyser_class_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..3f785786eb8652e8d1cfc09795e48895da80eef9 Binary files /dev/null and b/docs/source_en/design/mindinsight/images/analyser_class_profiler.png differ diff --git a/docs/source_en/design/mindinsight/images/context_profiler.png b/docs/source_en/design/mindinsight/images/context_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..f11782ebfe473ddfaec9736055c9012a5129a26f Binary files /dev/null and b/docs/source_en/design/mindinsight/images/context_profiler.png differ diff --git a/docs/source_en/design/mindinsight/images/graph_visual_main.png b/docs/source_en/design/mindinsight/images/graph_visual_main.png index 55ca7d7183c818a15b69a3a6ee2c4ef29655460c..0bc13636b5c84952978469c652c38500e6d34f43 100644 Binary files a/docs/source_en/design/mindinsight/images/graph_visual_main.png and b/docs/source_en/design/mindinsight/images/graph_visual_main.png differ diff --git a/docs/source_en/design/mindinsight/images/graph_visual_right_side.png b/docs/source_en/design/mindinsight/images/graph_visual_right_side.png index 90e8d868b5ff9d68ae14d55d8f3ff188db412556..e138bcfbbfda77ff3468442a3e5e169dcd7fed03 100644 Binary files a/docs/source_en/design/mindinsight/images/graph_visual_right_side.png and b/docs/source_en/design/mindinsight/images/graph_visual_right_side.png differ diff --git a/docs/source_en/design/mindinsight/images/module_profiler.png b/docs/source_en/design/mindinsight/images/module_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..f30582b53e046a37e5d97450b148d4e665ba174d Binary files /dev/null and b/docs/source_en/design/mindinsight/images/module_profiler.png differ diff --git a/docs/source_en/design/mindinsight/images/parser_module_profiler.png b/docs/source_en/design/mindinsight/images/parser_module_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..8ef3c927013517e341fbe44c7f96f0be05536b80 Binary files /dev/null and b/docs/source_en/design/mindinsight/images/parser_module_profiler.png differ diff --git a/docs/source_en/design/mindinsight/images/proposer_class_profiler.png b/docs/source_en/design/mindinsight/images/proposer_class_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..3e2d4363e92821b05cafc330573c981a1ab99bbf Binary files /dev/null and b/docs/source_en/design/mindinsight/images/proposer_class_profiler.png differ diff --git a/docs/source_en/design/mindinsight/images/proposer_module_profiler.png b/docs/source_en/design/mindinsight/images/proposer_module_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..909dd42c89715d49a11c35764d84aab231b91fb4 Binary files /dev/null and b/docs/source_en/design/mindinsight/images/proposer_module_profiler.png differ diff --git a/docs/source_en/design/mindinsight/images/tensor_table.png b/docs/source_en/design/mindinsight/images/tensor_table.png index 725bd9f8481826d682b593c2224a766854e9b4f8..f2d1ad90b3930f71fa4014d94ae52df909bea434 100644 Binary files a/docs/source_en/design/mindinsight/images/tensor_table.png and b/docs/source_en/design/mindinsight/images/tensor_table.png differ diff --git a/docs/source_en/design/mindinsight/images/time_order_profiler.png b/docs/source_en/design/mindinsight/images/time_order_profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..35eef99934ce9d743ebe0294e18ff0b5ea40abab Binary files /dev/null and b/docs/source_en/design/mindinsight/images/time_order_profiler.png differ diff --git a/docs/source_en/design/mindinsight/profiler_design.md b/docs/source_en/design/mindinsight/profiler_design.md new file mode 100644 index 0000000000000000000000000000000000000000..e18497237388c37ddada8552fa01844026926fa6 --- /dev/null +++ b/docs/source_en/design/mindinsight/profiler_design.md @@ -0,0 +1,175 @@ +# Profiler Design Document + +`Ascend` `GPU` `Model Development` `Model Optimization` `Framework Development` `Intermediate` `Expert` `Contributor` + + + +- [Profiler Design Document](#profiler-design-document) + - [Background](#background) + - [Profiler Architecture Design](#profiler-architecture-design) + - [Context](#context) + - [Module Structure](#module-structure) + - [Internal Module Interaction](#internal-module-interaction) + - [Sub-Module Design](#sub-module-design) + - [ProfilerAPI and Controller](#profilerapi-and-controller) + - [Description](#description) + - [Design](#design) + - [Parser](#parser) + - [Description](#description-1) + - [Design](#design-1) + - [Analyser](#analyser) + - [Description](#description-2) + - [Design](#design-2) + - [Proposer](#proposer) + - [Description](#description-3) + - [Design](#design-3) + + + + + +## Background + +To support model development and performance debugging in MindSpore, an easy-to-use profile tool is required to intuitively display the performance information of each dimension of a network model, provide users with easy-to-use and abundant profiling functions, and help users quickly locate network performance faults. + +## Profiler Architecture Design +The Profiler architecture design is introduced from the following three aspects: the overall context interaction relationship of Profiler; the internal structure of Profiler, including the module structure and module layers; the interactive calling relationship between modules. + +### Context + +Profiler is a part of the MindSpore debugging and optimization tool. The following figure shows the tool context. + +![context_profiler.png](./images/context_profiler.png) + +Figure 1 Context relationship + +As shown in the preceding figure, the interaction between the Profiler and other components is as follows: + +1. In the training script, MindSpore Profiler is called to send the command to the MindSpore ada communication module for starting performance data collection. Finally, the ada generates original performance data. + +2. MindSpore Profiler parses the original data in the user script and generates the intermediate data results in the specified folder. + +3. MindInsight Profiler connects to the intermediate data and provides the visualized Profiler function for users. +### Module Structure + +Modules are classified into the following layers: + +![module_profiler.png](./images/module_profiler.png) + +Figure 2 Relationships between modules at different layers + + +Module functions are as follows: +1. ProfilerAPI is a calling entry provided by code, including the performance collection startup API and analysis API. +2. Controller is a module at a layer lower than that of ProfilerAPI. It is called by the startup API of ProfilerAPI to start or stop the performance collection function. The original data is written to a fixed position by ada. +3. Parser is a module for parsing original performance data which is collected on the device and cannot be directly understood by users. Parser parses, combines, and converts the data to generate intermediate results that can be understood by users and analyzed by upper layers. +4. Analyser obtains the intermediate results parsed by the lower-layer Parser, encapsulates, filters, and sorts the intermediate results, and returns the various information to the upper-layer Profiler API and RESTful. +5. RESTful is used to call the common API provided by the backend Analyser to obtain objective data and use RESTful to connect to the frontend. + +### Internal Module Interaction +Users can use API or RESTful to complete internal module interaction process. The following uses the API as an example: + +![time_order_profiler.png](./images/time_order_profiler.png) + +Figure 3 Module interaction + +The interaction process of each module is as follows: + +1. ProfilerAPI calls the control function of the lower-layer Controller to control the lower-layer collection module to collect performance information. Currently, the collection module (ada) receives commands in resident process mode and independently collects performance information. + +2. After the training is complete, users call the analysis API of ProfilerAPI. + +3. Profiler API analysis API uses the Parser module to parse performance data, generates intermediate results, calls the Aalayser module to analyze the results, and returns various information to users. + +## Sub-Module Design +### ProfilerAPI and Controller + +#### Description +ProfilerAPI provides an entry API in the training script for users to start performance collection and analyze performance data. +ProfilerAPI delivers commands through Controller to control the startup of ada. + +#### Design +ProfilerAPI belongs to the API layer of upper-layer application and is integrated by the training script. The function is divided into two parts: + +- Before training, call the bottom-layer Controller API to deliver a command to start a profiling task. + +- After training, call the bottom-layer Controller API to deliver commands to stop the profiling task, call the Analyser and Parser APIs to parse data files and generate result data such as operator performance statistics and training trace statistics. + + +Controller provides an API for the upper layer, calls API of the lower-layer performance collection module, and delivers commands for starting and stopping performance collection. + +The generated original performance data includes: + +- `hwts.log.data.45.dev.profiler_default_tag` file: stores operator execution information, including the start and end of a task and stream ID. +- `DATA_PREPROCESS.dev.AICPU` file: specifies AI CPU operator execution time at each stage. +- `Framework.host.task_desc_info` file: stores the mapping between operator IDs and operator names and the input and output information of each operator. +- `training_trace.46.dev.profiler_default_tag` file: stores the start and end time of each step and time of step interval, forward and backward propagation, and step tail. + +### Parser +#### Description +Parser is a module for parsing original performance data which is collected on the device and cannot be directly understood by users. Parser parses, combines, and converts the data to generate intermediate results that can be understood by users and analyzed by upper layers. +#### Design +![parser_module_profiler.png](./images/parser_module_profiler.png) + +Figure 4 Parser module + +As shown in the preceding figure, there are HWTS Parser, AI CPU Parser, Framework Parser, and Training Trace Parser modules. Each module parses a type of original data to obtain the intermediate file that can be read by users. + +- HWTS Parser: parses the `hwts.log.data.45.dev.profiler_default_tag` file to obtain the task-based statistics of the device, such as the start and end of each task and stream ID, which are used to compute the operator execution time. +- AI CPU Parser: parses the `DATA_PREPROCESS.dev.AICPU` file to obtain the AI CPU operator execution time at each stage. +- Framework Parser: parses the `Framework.host.task_desc_info` file to obtain the mapping between AI Core operator and task, and key operator information. +- Training Trace Parser: parses the `training_trace.46.dev.profiler_default_tag` file to analyze the time at each training stage. + +### Analyser + +#### Description +Analyzer is used to filter, sort, query, and page the intermediate results generated at the parsing stage. + +#### Design + +This module parses the intermediate files generated by Parser, provides a general API for upper-layer data analysis, and returns the analyzed data to the upper layer for display. Various intermediate files have certain common points which can be abstracted. Therefore, following figure shows the design of the Analyser class. + +![analyser_class_profiler.png](./images/analyser_class_profiler.png) + +Figure 5 Analyser class + +As shown in the preceding figure, multiple Analysers are implemented for different contents to be queried. Filter, sorting, and pagination conditions can be defined for each Analyser. Each Analyser knows which intermediate files are required to merge, filter, and sort data. Analyser is associated with Parser through the intermediate files generated by Parser, and no function is called. In this way, Analyser and Parser are decoupled. + +Currently, there are two types of analyzers for operator information: + +- Filter the average information of operator types. +- Filter the detailed average information of each operator in two Analysers (AicoreTypeAnalyser and AicoreDetailAnalyser). + +To hide the internal implementation of Analyser and facilitate calling, the simple factory mode is used to obtain the specified Analyser through AnalyserFactory. + + +### Proposer +#### Description +Proposer is a Profiler performance optimization suggestion module. Proposer calls the Analyser module to obtain performance data, analyzes the performance data based on optimization rules, and displays optimization suggestions for users through the UI and API. + +#### Design + +Modules are classified into the following layers: + +![proposer_module_profiler.png](./images/proposer_module_profiler.png) + +Figure 6 Proposer module + +As shown in the preceding figure: + +- Proposer provides API for calling the API and RESTful to obtain optimization suggestions. +- Proposer calls the Analyser API to obtain performance data and obtain optimization suggestions based on optimization rules. +- Proposer calls Analyser factory to obtain the Analyser object. + +You can call the query API of the Analyser object to obtain information, including the top N AICore, AICoreType, and AICpu operators that are sorted by time and the time information of each traning trace stage. + +The following figure shows the module class design: + +![proposer_class_profiler.png](./images/proposer_class_profiler.png) + +Figure 7 Proposer class + +As shown in the preceding figure: + +- Proposers of various types inherit the abstract class Proposer and implement the analyze methods. +- API and CLI call the ProposerFactory to obtain the Proposer and call the Proposer.analyze function to obtain the optimization suggestions of each type of Proposer. \ No newline at end of file diff --git a/docs/source_en/design/mindinsight/tensor_visual_design.md b/docs/source_en/design/mindinsight/tensor_visual_design.md index ce3839d5b9affa269ccf802cf10a697412a82b78..f142f425963bb10ede2b84144bcaa6e4bcb6403d 100644 --- a/docs/source_en/design/mindinsight/tensor_visual_design.md +++ b/docs/source_en/design/mindinsight/tensor_visual_design.md @@ -44,7 +44,7 @@ Figure 1: Table view Figure 1 displays tensors recorded by a user in a form of a table. The following functions are included: -- The input boxes under the table display the tensor data of the current dimension. The colon (:) indicates all values of the current dimension. You can enter the corresponding index in the box (the meaning is the same as that of the Python index, and negative values are supported) or use `:` to query tensor data in a specific dimension. +- The input boxes under the table display the tensor data of the current dimension. The colon (:) indicates index range of the current dimension which is basically the same as the meaning of Python index. If no specific index is specified, it indicates all the values of the current dimension and `2:5` indicates the value of index from 2 to 5 (not including 5). You can enter the corresponding index in the box or use index range containing `:` to query tensor data in a specific dimension. - Drag the thumb of the linear slider below the table to query the tensor data of a specific step. ![tensor_histogram.png](./images/tensor_histogram.png) diff --git a/docs/source_en/design/mindspore/distributed_training_design.md b/docs/source_en/design/mindspore/distributed_training_design.md new file mode 100644 index 0000000000000000000000000000000000000000..14c13e4f3e90e4ee08a8acd14d95f9f7e604220f --- /dev/null +++ b/docs/source_en/design/mindspore/distributed_training_design.md @@ -0,0 +1,144 @@ +# Distributed Training Design + +`Linux` `Ascend` `GPU` `Model Development` `Model Optimization` `Framework Development` `Intermediate` `Expert` `Contributor` + + + +- [Distributed Training Design](#distributed-training-design) + - [Background](#background) + - [Concepts](#concepts) + - [Collective Communication](#collective-communication) + - [Synchronization Mode](#synchronization-mode) + - [Data Parallelism](#data-parallelism) + - [Principle of Data Parallelism](#principle-of-data-parallelism) + - [Data Parallel Code](#data-parallel-code) + - [Automatic Parallelism](#automatic-parallelism) + - [Principle of Automatic Parallelism](#principle-of-automatic-parallelism) + - [Automatic Parallel Code](#automatic-parallel-code) + + + + + +## Background + +With the rapid development of deep learning, the number of datasets and parameters are growing exponentially to improve the accuracy and generalization capability of neural networks. Parallel distributed training has become a development trend to resolve the performance bottleneck of ultra-large scale networks. MindSpore supports the mainstream distributed training paradigm and develops an automatic hybrid parallel solution. The following describes the design principles of several parallel training modes and provides guidance for users to perform custom development. + + +## Concepts + +### Collective Communication + +Collective communication is defined as communication that involves a group of processes. All processes in the group send and receive data after meeting certain conditions. MindSpore implements data transmission during parallel training through collective communication. On Ascend chips, MindSpore depends on the Huawei Collective Communication Library (`HCCL`) to implement the task. On GPU, MindSpore depends on the NVIDIA Collective Communication Library (`NCCL`) to implement the task. + +### Synchronization Mode + +In synchronous mode, all devices strart training at the same time and update parameter values synchronously after the backward propagation algorithm is executed. Currently, MindSpore uses the synchronous training mode. + +## Data Parallelism + +This section describes how the data parallel mode `ParallelMode.DATA_PARALLEL` works in MindSpore. + +### Principle of Data Parallelism + +![Data Parallel Description](./images/data_parallel.png) + +1. Environment dependencies + + Each time before parallel training starts, the `mindspore.communication.init` API is called to initialize communication resources and the global communication group `WORLD_COMM_GROUP` is automatically created. + +2. Data distribution + + The key of data parallelism is to split datasets based on the sample dimension and deliver the split datasets to different devices. Each dataset loading API provided by the `mindspore.dataset` module has the `num_shards` and `shard_id` parameters. The parameters are used to split a dataset into multiple datasets, perform cyclic sampling, and collect data of the `batch` size to each device. When the data volume is insufficient, the sampling restarts from the beginning. + +3. Network structure + + The scripting method of data parallel network is the same as that of standalone network. This is because, although models of each device are executed independently during the forward and backward propagation processes, the same network structure is maintained. To ensure the synchronous training between devices, the initial values of corresponding network parameters must be the same. You are advised to set the same random number seed on each device by using `numpy.random.seed` to broadcast models. + +4. Gradient aggregation + + Theoretically, the training effect of data parallel network should be the same as that of the standalone network. To ensure the consistency of the calculation logic, the `AllReduce` operator is inserted after gradient calculation to implement the gradient aggregation operation between devices. You can enable `mean` to average the sum of gradient values, or regard `mean` as a hyperparameter. Enabling `mean` is equivalent to reducing the learning rate by multiple times. + +5. Parameter update + + Because the gradient aggregation operation is introduced, the models of each device perform parameter update with the same gradient value. Therefore, MindSpore implements a synchronous data parallel training mode. Theoretically, models trained by each device are the same. If the reduce operation on samples is involved on the network, the network output may be different. This is determined by the sharding attribute of data parallelism. + +### Data Parallel Code + +1. Collective communication + + - [management.py](https://gitee.com/mindspore/mindspore/blob/master/mindspore/communication/management.py): This file covers the `helper` function APIs commonly used during the collective communication process, for example, the APIs for obtaining the number of clusters and device ID. When collective communication is executed on the Ascend chip, the framework loads the `libhccl.so` library file in the environment and uses it to call the communication APIs from the Python layer to the underlying layer. + - [comm_ops.py](https://gitee.com/mindspore/mindspore/blob/master/mindspore/ops/operations/comm_ops.py): MindSpore encapsulates supported collective communication operations as operators and stores the operators in this file. The operators include `AllReduce`, `AllGather`, `ReduceScatter`, and `Broadcast`. `PrimitiveWithInfer` defines the attributes required by the operators, as well as the `shape` and `dtype` inference methods from the input to the output during graph composition. + +2. Gradient aggregation + + - [grad_reducer.py](https://gitee.com/mindspore/mindspore/blob/master/mindspore/nn/wrap/grad_reducer.py): This file implements the gradient aggregation process. After the input parameter `grads` is expanded by using `HyperMap`, the `AllReduce` operator is inserted. The global communication group is used. You can also perform custom development by referring to this section based on your network requirements. In MindSpore, standalone and distributed execution shares a set of network encapsulation APIs. In the `Cell`, `ParallelMode` is used to determine whether to perform gradient aggregation. For details about the network encapsulation APIs, see the `TrainOneStepCell` code implementation. + + +## Automatic Parallelism + +As a key feature of MindSpore, automatic parallelism is used to implement hybrid parallel training that combines automatic data parallelism and model parallelism. It aims to help users express the parallel algorithm logic using standalone scripts, reduce the difficulty of distributed training, improve the algorithm R&D efficiency, and maintain the high performance of training. This section describes how the automatic parallel mode `ParallelMode.AUTO_PARALLEL` and semi-automatic parallel mode `ParallelMode.SEMI_AUTO_PARALLEL` work in MindSpore. + +### Principle of Automatic Parallelism + +![Automatic Parallel Description](./images/auto_parallel.png) + +1. Distributed operator and tensor layout + + As shown in the preceding figure, the automatic parallel process traverses the standalone forward ANF graphs and performs shard modeling on tensors in the unit of distributed operator, indicating how the input and output tensors of an operator are distributed to each device of the cluster, that is, the tensor layout. Users do not need to know which device runs which slice of a model. The framework automatically schedules and allocates model slices. + + To obtain the tensor layout model, each operator has a shard strategy, which indicates the shard status of each input of the operator in the corresponding dimension. Generally, tensors can be sharded in any dimension as long as the value is a multiple of 2, and the even distribution principle is met. The following figure shows an example of the three-dimensional `BatchMatmul` operation. The parallel strategy consists of two tuples, indicating the sharding of `input` and `weight`, respectively. Elements in a tuple correspond to tensor dimensions one by one. `2^N` indicates the shard unit, and `1` indicates that the tuple is not sharded. If you want to express a parallel data shard strategy, that is, only data in the `batch` dimension of `input` is sharded, and data in other dimensions are not sharded, you can use `strategy=((2^N, 1, 1),(1, 1, 1))`. If you want to express a parallel model shard strategy, that is, only model in the non-`batch` dimension of `weight` is sharded, for example, only the `channel` dimension is sharded, you can use `strategy=((1, 1, 1),(1, 1, 2^N))`. If you want to express a hybrid parallel shard strategy, one of which is `strategy=((2^N, 1, 1),(1, 1, 2^N))`. + + ![Operator Sharding Definition](./images/operator_split.png) + + Based on the shard strategy of an operator, the framework automatically derives the distribution model of input tensors and output tensors of the operator. This distribution model consists of `device_matrix`, `tensor_shape`, and `tensor map`, which indicate the device matrix shape, tensor shape, and mapping between devices and tensor dimensions, respectively. Based on the tensor layout model, distributed operator determines whether to insert extra computation and communication operations in the graph to ensure that the operator computing logic is correct. + +2. Tensor Redistribution + + When the output tensor model of an operator is inconsistent with the input tensor model of the next operator, computation and communication operations need to be introduced to implement the change between tensor layouts. The automatic parallel process introduces the tensor redistribution algorithm, which can be used to derive the communication conversion operations between random tensor layouts. The following three examples represent a parallel computing process of the formula `Z=(X×W)×V`, that is, a `MatMul` operation of two two-dimensional matrices, and show how to perform conversion between different parallel modes. + + In example 1, the output of the first data parallel matrix multiplication is sharded in the row rection, and the input of the second model parallel matrix multiplication requires full tensors. The framework automatically inserts the `AllGather` operator to implement redistribution. + + ![Tensor Redistribution](./images/tensor_redistribution1.png) + + In example 2, the output of parallel matrix multiplication of the first model is sharded in the column direction, and the input of parallel matrix multiplication of the second model is sharded in the row direction. The framework automatically inserts a communication operator equivalent to the `AlltoAll` operation in collective communication to implement redistribution. + + ![Tensor Redistribution](./images/tensor_redistribution2.png) + + In example 3, an output shard mode of the first hybrid parallel matrix multiplication is the same as an input shard mode of the second hybrid parallel matrix multiplication. Therefore, redistribution does not need to be introduced. In the second matrix multiplication operation, the related dimensions of the two inputs are sharded. Therefore, the `AllReduce` operator needs to be inserted to ensure the operation correctness. + + ![Tensor Redistribution](./images/tensor_redistribution3.png) + + In general, this distributed representation breaks the boundary between data parallelism and model parallelism, making it easy to implement hybrid parallelism. From the perspective of scripts, users only need to construct a standalone network to express the parallel algorithm logic. Framework automatically shards the entire graph. + +3. Efficient parallel strategy search algorithm + + The `SEMI_AUTO_PARALLEL` semi-automatic parallel mode indicates that you manually configure the parallel strategy for operators when you are familiar with the operator sharding representation. This mode is helpful for manual optimization, with certain commissioning difficulty. You need to master the parallel principle and obtain a high-performance parallel solution based on the network structure and cluster topology. To further help users accelerate the parallel network training process, the automatic parallel mode `AUTO_PARALLEL` introduces the automatic search feature of the parallel strategy on the basis of the semi-automatic parallel mode. Automatic parallelism builds cost models based on the hardware platform, and calculates the computation cost, memory cost, and communication cost of a certain amount of data and specific operators based on different parallel strategies Then, by using the dynamic programming algorithm or recursive programming algorithm and taking the memory upper limit of a single device as a constraint condition, a parallel strategy with optimal performance is efficiently searched out. + + Strategy search replaces manual model sharding and provides a high-performance sharding solution within a short period of time, greatly reducing the threshold for parallel training. + + +4. Convenient distributed automatic differentiation + + In addition to forward network communication, the traditional manual model sharding needs to consider backward parallel computing. MindSpore encapsulates communication operations into operators and automatically generates backward propagation of communication operators based on the original automatic differentiation operations of the framework. Therefore, even during distributed training, users only need to pay attention to the forward propagation of the network to implement actual automatic parallel training. + +### Automatic Parallel Code + +1. Tensor layout model + - [tensor_layout](https://gitee.com/mindspore/mindspore/tree/master/mindspore/ccsrc/frontend/parallel/tensor_layout): This directory contains the definitions and implementation of functions related to the tensor distribution model. `tensor_layout.h` declares the member variables `tensor_map_origin_`, `tensor_shape_`, and `device_arrangement_` required by a tensor distribution model. In `tensor_redistribution.h`, the related methods for implementing the `from_origin_` and `to_origin_` transformation between tensor distributions are declared. The deduced redistribution operation is stored in `operator_list_` and returned, in addition, the communication cost `comm_cost_`,, memory cost `memory_cost_`, and calculation cost `computation_cost_` required for redistribution are calculated. + +2. Distributed operators + - [ops_info](https://gitee.com/mindspore/mindspore/tree/master/mindspore/ccsrc/frontend/parallel/ops_info): This directory contains the implementation of distributed operators. In `operator_info.h`, the base class `OperatorInfo` of distributed operator implementation is defined. A distributed operator to be developed shall inherit the base class and explicitly implement related imaginary functions. The `InferTensorInfo`, `InferTensorMap`, and `InferDevMatrixShape` functions define the algorithms for deriving the input and output tensor distribution model of the operator. The `InferForwardCommunication` and `InferMirrorOps` functions define the extra calculation and communication operations to be inserted for operator sharding. The `CheckStrategy` and `GenerateStrategies` functions define the parallel strategy validation and generation for the operator. According to the parallel strategy `SetCostUnderStrategy`, the parallel cost `operator_cost_` of the distributed operator is generated. + +3. Strategy search algorithm + - [auto_parallel](https://gitee.com/mindspore/mindspore/tree/master/mindspore/ccsrc/frontend/parallel/auto_parallel): The shard strategy search algorithm is implemented in this directory. `graph_costmodel.h` defines the graph composition information. Each point indicates an operator `OperatorInfo`. The directed edge `edge_costmodel.h` indicates the input and output relationship of operators and the redistribution cost. `operator_costmodel.h` defines the cost model of each operator, including the calculation cost, communication cost, and memory cost. `dp_algorithm_costmodel.h` describes the main process of the dynamic planning algorithm, which consists of a series of graph operations. `costmodel.h` defines the data structures of cost and graph operations. + +4. Device management + - [device_manager.h](https://gitee.com/mindspore/mindspore/blob/master/mindspore/ccsrc/frontend/parallel/device_manager.h): This file is used to create and manage cluster device communication groups. The device matrix model is defined by `device_matrix.h`, and the communication domain is managed by `group_manager.h`. + +5. Entire graph sharding + - [step_auto_parallel.h](https://gitee.com/mindspore/mindspore/blob/master/mindspore/ccsrc/frontend/parallel/step_auto_parallel.h), and [step_parallel.h](https://gitee.com/mindspore/mindspore/blob/master/mindspore/ccsrc/frontend/parallel/step_parallel.h): The two files contain the core implementation of the automatic parallel process. `step_auto_parallel.h` calls the strategy search process and generates the `OperatorInfo` of the distributed operator. Then in `step_parallel.h`, processes such as operator sharding and tensor redistribution are processed to reconstruct the standalone computing graph in distributed mode. + + +6. Backward propagation of communication operators + - [grad_comm_ops.py](https://gitee.com/mindspore/mindspore/blob/master/mindspore/ops/_grad/grad_comm_ops.py): This file defines the backward propagation of communication operators, such as `AllReduce` and `AllGather`. diff --git a/docs/source_en/design/mindspore/images/auto_parallel.png b/docs/source_en/design/mindspore/images/auto_parallel.png new file mode 100644 index 0000000000000000000000000000000000000000..800b3b2536c739dcc48a1e46b5f65fc327e4ce8d Binary files /dev/null and b/docs/source_en/design/mindspore/images/auto_parallel.png differ diff --git a/docs/source_en/design/mindspore/images/data_parallel.png b/docs/source_en/design/mindspore/images/data_parallel.png new file mode 100644 index 0000000000000000000000000000000000000000..a92c82aa64615b398e83b9bc2cf0aa2c5db9f904 Binary files /dev/null and b/docs/source_en/design/mindspore/images/data_parallel.png differ diff --git a/docs/source_en/design/mindspore/images/operator_split.png b/docs/source_en/design/mindspore/images/operator_split.png new file mode 100644 index 0000000000000000000000000000000000000000..4063170990c6816884361f195db5851cfbdf932e Binary files /dev/null and b/docs/source_en/design/mindspore/images/operator_split.png differ diff --git a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution.png b/docs/source_en/design/mindspore/images/tensor_redistribution.png similarity index 100% rename from docs/source_zh_cn/design/mindspore/images/tensor_redistribution.png rename to docs/source_en/design/mindspore/images/tensor_redistribution.png diff --git a/docs/source_en/design/mindspore/images/tensor_redistribution1.png b/docs/source_en/design/mindspore/images/tensor_redistribution1.png new file mode 100644 index 0000000000000000000000000000000000000000..ed4d79416a0a07f8d75e738aa544d214834ae778 Binary files /dev/null and b/docs/source_en/design/mindspore/images/tensor_redistribution1.png differ diff --git a/docs/source_en/design/mindspore/images/tensor_redistribution2.png b/docs/source_en/design/mindspore/images/tensor_redistribution2.png new file mode 100644 index 0000000000000000000000000000000000000000..114f984c66ae578722dbcdbb59ab03c44dbcb097 Binary files /dev/null and b/docs/source_en/design/mindspore/images/tensor_redistribution2.png differ diff --git a/docs/source_en/design/mindspore/images/tensor_redistribution3.png b/docs/source_en/design/mindspore/images/tensor_redistribution3.png new file mode 100644 index 0000000000000000000000000000000000000000..dd66c9120615f50f2b3f60cfe139954cb4adf307 Binary files /dev/null and b/docs/source_en/design/mindspore/images/tensor_redistribution3.png differ diff --git a/docs/source_en/design/mindspore/ir.md b/docs/source_en/design/mindspore/ir.md index 4837ba94baccb0f15638d6bb744ec13f9035bb1b..98743518453e919a3b70d280ef5e72f1f34b9a25 100644 --- a/docs/source_en/design/mindspore/ir.md +++ b/docs/source_en/design/mindspore/ir.md @@ -1,7 +1,7 @@ # MindSpore IR (MindIR) -`Framework Development` `Intermediate` `Expert` `Contributor` +`Linux` `Windows` `Framework Development` `Intermediate` `Expert` `Contributor` diff --git a/docs/source_en/glossary.md b/docs/source_en/glossary.md index ae1fb21e9168f00bd574fdef787b2a7b3a86f831..3f08ac2a4124b14bf6551de670ec44f8eddaffcf 100644 --- a/docs/source_en/glossary.md +++ b/docs/source_en/glossary.md @@ -32,9 +32,10 @@ | LSTM | Long short-term memory, an artificial recurrent neural network (RNN) architecture used for processing and predicting an important event with a long interval and delay in a time sequence. | | Manifest | A data format file. Huawei ModelArt adopts this format. For details, see . | | ME | Mind Expression, MindSpore frontend, which is used to compile tasks from user source code to computational graphs, control execution during training, maintain contexts (in non-sink mode), and dynamically generate graphs (in PyNative mode). | -| MindArmour | MindSpore security component, which is used for AI adversarial example management, AI model attack defense and enhancement, and AI model robustness evaluation. | +| MindArmour | The security module of MindSpore, which improves the confidentiality, integrity and usability of the model through technical means such as differential privacy and adversarial attack and defense. MindArmour prevents attackers from maliciously modifying the model or cracking the internal components of the model to steal the parameters of the model. | | MindData | MindSpore data framework, which provides data loading, enhancement, dataset management, and visualization. | | MindInsight | MindSpore visualization component, which visualizes information such as scalars, images, computational graphs, and model hyperparameters. | +| MindRecord | It is a data format defined by MindSpore, it is a module for reading, writing, searching and converting data sets in MindSpore format. | | MindSpore | Huawei-leaded open-source deep learning framework. | | MindSpore Lite | A lightweight deep neural network inference engine that provides the inference function for models trained by MindSpore on the device side. | | MNIST database | Modified National Handwriting of Images and Technology database, a large handwritten digit database, which is usually used to train various image processing systems. | @@ -43,5 +44,5 @@ | ResNet-50 | Residual Neural Network 50, a residual neural network proposed by four Chinese people, including Kaiming He from Microsoft Research Institute. | | Schema | Data set structure definition file, which defines the fields contained in a dataset and the field types. | | Summary | An operator that monitors the values of tensors on the network. It is a peripheral operation in the figure and does not affect the data flow. | -| TBE | Tensor Boost Engine, an operator development tool that is extended based on the Tensor Virtual Machine (TVM) framework. | +| TBE | Tensor Boost Engine, it is a self-developed NPU operator development tool developed by Huawei, which is expanded on the basis of the TVM (Tensor Virtual Machine) framework. It provides a set of Python API to implement development activities and develop custom operators. | | TFRecord | Data format defined by TensorFlow. | diff --git a/docs/source_en/network_list.md b/docs/source_en/network_list.md index 897111be5078687a3c4b4671c0c9f05904226128..fcad7fc7f16e7d5edc291cb3c801fe403e8e1bef 100644 --- a/docs/source_en/network_list.md +++ b/docs/source_en/network_list.md @@ -6,7 +6,6 @@ - [Network List](#network-list) - [Model Zoo](#model-zoo) - - [Pre-trained Models](#pre-trained-models) @@ -14,47 +13,33 @@ ## Model Zoo -| Domain | Sub Domain | Network | Ascend | GPU | CPU -|:------ |:------| :----------- |:------ |:------ |:----- -|Computer Vision (CV) | Image Classification | [AlexNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/alexnet/src/alexnet.py) | Supported | Supported | Doing -| Computer Vision (CV) | Image Classification | [GoogleNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/src/googlenet.py) | Supported | Doing | Doing -| Computer Vision (CV) | Image Classification | [LeNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/src/lenet.py) | Supported | Supported | Supported -| Computer Vision (CV) | Image Classification | [ResNet-50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Supported | Doing -|Computer Vision (CV) | Image Classification | [ResNet-101](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported |Doing | Doing -|Computer Vision (CV) | Image Classification | [SE-ResNet50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported |Doing | Doing -|Computer Vision (CV) | Image Classification | [ResNext50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnext50/src/image_classification.py) | Supported | Supported | Doing -| Computer Vision (CV) | Image Classification | [VGG16](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/vgg16/src/vgg.py) | Supported | Doing | Doing -| Computer Vision (CV) | Image Classification | [InceptionV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/inceptionv3/src/inception_v3.py) | Supported | Doing | Doing -| Computer Vision (CV) | Mobile Image Classification
Image Classification
Semantic Tegmentation | [MobileNetV2](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py) | Supported | Supported | Doing -| Computer Vision (CV) | Mobile Image Classification
Image Classification
Semantic Tegmentation | [MobileNetV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py) | Doing | Supported | Doing -|Computer Vision (CV) | Targets Detection | [SSD](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/ssd/src/ssd.py) | Supported |Doing | Doing -| Computer Vision (CV) | Targets Detection | [YoloV3-ResNet18](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py) | Supported | Doing | Doing -| Computer Vision (CV) | Targets Detection | [YoloV3-DarkNet53](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_darknet53/src/yolo.py) | Supported | Doing | Doing -| Computer Vision (CV) | Targets Detection | [FasterRCNN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/faster_rcnn_r50.py) | Supported | Doing | Doing -| Computer Vision (CV) | Semantic Segmentation | [DeeplabV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/deeplabv3/src/deeplabv3.py) | Supported | Doing | Doing -| Computer Vision(CV) | Targets Detection | [WarpCTC](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/warpctc/src/warpctc.py) | Doing | Supported | Doing -| Natural Language Processing (NLP) | Natural Language Understanding | [BERT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py) | Supported | Doing | Doing -| Natural Language Processing (NLP) | Natural Language Understanding | [Transformer](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/transformer/src/transformer_model.py) | Supported | Doing | Doing -| Natural Language Processing (NLP) | Natural Language Understanding | [SentimentNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/lstm/src/lstm.py) | Doing | Supported | Supported -| Natural Language Processing (NLP) | Natural Language Understanding | [MASS](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py) | Supported | Doing | Doing -| Natural Language Processing (NLP) | Natural Language Understanding | [TinyBert](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/tinybert/src/tinybert_model.py) | Supported | Supported | Doing -| Recommender | Recommender System, CTR prediction | [DeepFM](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/deepfm/src/deepfm.py) | Supported | Supported | Doing -| Recommender | Recommender System, Search ranking | [Wide&Deep](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py) | Supported | Supported | Doing -| Graph Neural Networks(GNN)| Text Classification | [GCN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gcn/src/gcn.py) | Supported | Doing | Doing -| Graph Neural Networks(GNN)| Text Classification | [GAT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gat/src/gat.py) | Supported | Doing | Doing +| Domain | Sub Domain | Network | Ascend(Graph) | Ascend(PyNative) | GPU(Graph) | GPU(PyNative) | CPU(Graph) +|:------ |:------| :----------- |:------ |:------ |:------ |:------ |:----- +|Computer Vision (CV) | Image Classification | [AlexNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/alexnet/src/alexnet.py) | Supported | Supported | Supported | Supported | Doing +| Computer Vision (CV) | Image Classification | [GoogleNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/src/googlenet.py) | Supported | Supported | Supported | Supported | Doing +| Computer Vision (CV) | Image Classification | [LeNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/src/lenet.py) | Supported | Supported | Supported | Supported | Supported +| Computer Vision (CV) | Image Classification | [ResNet-50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Supported | Supported | Supported | Doing +|Computer Vision (CV) | Image Classification | [ResNet-101](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Supported | Supported | Supported | Doing +|Computer Vision (CV) | Image Classification | [SE-ResNet50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Doing |Doing | Doing | Doing +|Computer Vision (CV) | Image Classification | [ResNext50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnext50/src/image_classification.py) | Supported | Supported | Supported | Supported | Doing +| Computer Vision (CV) | Image Classification | [VGG16](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/vgg16/src/vgg.py) | Supported | Supported | Supported | Supported | Doing +| Computer Vision (CV) | Image Classification | [InceptionV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/inceptionv3/src/inception_v3.py) | Supported | Supported | Supported | Supported | Doing +| Computer Vision (CV) | Mobile Image Classification
Image Classification
Semantic Tegmentation | [MobileNetV2](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py) | Supported | Supported | Supported | Supported | Doing +| Computer Vision (CV) | Mobile Image Classification
Image Classification
Semantic Tegmentation | [MobileNetV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py) | Doing | Doing | Supported | Supported | Doing +|Computer Vision (CV) | Object Detection | [SSD](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/ssd/src/ssd.py) | Supported | Supported |Doing | Doing | Doing +| Computer Vision (CV) | Object Detection | [YoloV3-ResNet18](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py) | Supported | Doing | Doing | Doing | Doing +| Computer Vision (CV) | Object Detection | [YoloV3-DarkNet53](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_darknet53/src/yolo.py) | Supported | Doing | Doing | Doing | Doing +| Computer Vision (CV) | Object Detection | [FasterRCNN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/faster_rcnn_r50.py) | Supported | Doing | Doing | Doing | Doing +| Computer Vision(CV) | Object Detection | [WarpCTC](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/warpctc/src/warpctc.py) | Doing | Doing | Supported | Supported | Doing +| Computer Vision (CV) | Semantic Segmentation | [DeeplabV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/deeplabv3/src/nets/deeplab_v3/deeplab_v3.py) | Supported | Supported | Doing | Doing | Doing +| Natural Language Processing (NLP) | Natural Language Understanding | [BERT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py) | Supported | Supported | Supported | Supported | Doing +| Natural Language Processing (NLP) | Natural Language Understanding | [Transformer](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/transformer/src/transformer_model.py) | Supported | Doing | Doing | Doing | Doing +| Natural Language Processing (NLP) | Natural Language Understanding | [SentimentNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/lstm/src/lstm.py) | Doing | Doing | Supported | Supported | Supported +| Natural Language Processing (NLP) | Natural Language Understanding | [MASS](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py) | Supported | Supported | Doing | Doing | Doing +| Natural Language Processing (NLP) | Natural Language Understanding | [TinyBert](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/tinybert/src/tinybert_model.py) | Supported | Supported | Supported | Doing | Doing +| Recommender | Recommender System, CTR prediction | [DeepFM](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/deepfm/src/deepfm.py) | Supported | Supported | Supported | Doing | Doing +| Recommender | Recommender System, Search ranking | [Wide&Deep](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py) | Supported | Supported | Supported | Doing | Doing +| Graph Neural Networks(GNN)| Text Classification | [GCN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gcn/src/gcn.py) | Supported | Doing | Doing | Doing | Doing +| Graph Neural Networks(GNN)| Text Classification | [GAT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gat/src/gat.py) | Supported | Doing | Doing | Doing | Doing > You can also use [MindWizard Tool](https://gitee.com/mindspore/mindinsight/tree/master/mindinsight/wizard/) to quickly generate classic network scripts. - -## Pre-trained Models -*It refers to the released MindSpore version. The hardware platforms that support model training are CPU, GPU and Ascend. As shown in the table below, ✓ indicates that the pre-trained model run on the selected platform. - -| Domain | Sub Domain| Network | Dataset | CPU | GPU | Ascend | 0.5.0-beta* -|:------ |:------ | :------- |:------ |:------ |:------ |:----- |:----- -|Computer Vision (CV) | Image Classification| [AlexNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/alexnet/src/alexnet.py) | CIFAR-10| | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/cv/alexnet/alexnet_ascend_0.5.0_cifar10_official_classification_20200716.tar.gz) -|Computer Vision (CV) | Image Classification| [LeNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/src/lenet.py)| MNIST | | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/cv/lenet/lenet_ascend_0.5.0_mnist_official_classification_20200716.tar.gz) -|Computer Vision (CV) | Image Classification| [VGG16](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/vgg16/src/vgg.py)| CIFAR-10 | | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/cv/vgg/vgg16_ascend_0.5.0_cifar10_official_classification_20200715.tar.gz) -|Computer Vision (CV) | Image Classification| [ResNet-50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | CIFAR-10| | | ✓ |[Download](http://download.mindspore.cn/model_zoo/official/cv/resnet/resnet50_v1.5_ascend_0.3.0_cifar10_official_classification_20200718.tar.gz) -|Computer Vision (CV) | Targets Detection| [YoloV3-DarkNet53](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_darknet53/src/yolo.py) | COCO 2014| | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/cv/yolo/yolov3_darknet53_ascend_0.5.0_coco2014_official_object_detection_20200717.tar.gz) -| Natural Language Processing (NLP) | Natural Language Understanding| [BERT_Base](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py) | zhwiki | | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/nlp/bert/bert_base_ascend_0.5.0_cn-wiki_official_nlp_20200720.tar.gz) -| Natural Language Processing (NLP) | Natural Language Understanding| [BERT_NEZHA](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py)| zhwiki| | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/nlp/bert/bert_nezha_ascend_0.5.0_cn-wiki_official_nlp_20200720.tar.gz) -| Natural Language Processing (NLP) | Natural Language Understanding| [Transformer](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/transformer/src/transformer_model.py)| WMT English-German| | | ✓ | [Download](http://download.mindspore.cn/model_zoo/official/nlp/transformer/transformer_ascend_0.5.0_wmtende_official_machine_translation_20200713.tar.gz) diff --git a/docs/source_en/operator_list.md b/docs/source_en/operator_list.md index 672de46b5ab7e69e5c8743b03fa3cfd323d899d7..470d6a650efb713e6ac6867ec884d2f2906b7422 100644 --- a/docs/source_en/operator_list.md +++ b/docs/source_en/operator_list.md @@ -37,7 +37,7 @@ | [mindspore.nn.Flatten](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Flatten) |Supported | Supported | Supported |layer/basic | [mindspore.nn.Dense](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Dense) |Supported | Supported | Supported |layer/basic | [mindspore.nn.ClipByNorm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ClipByNorm) |Supported | Supported | Doing |layer/basic -| [mindspore.nn.Norm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Norm) |Doing | Supported | Doing |layer/basic +| [mindspore.nn.Norm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Norm) |Supported | Supported | Doing |layer/basic | [mindspore.nn.OneHot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.OneHot) | Supported | Supported | Supported |layer/basic | [mindspore.nn.Range](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Range) | Supported | Doing | Doing |layer/basic | [mindspore.nn.SequentialCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SequentialCell) |Supported | Supported | Doing |layer/container @@ -65,11 +65,21 @@ | [mindspore.nn.AvgPool2d](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.AvgPool2d) | Supported | Supported | Doing |layer/pooling | [mindspore.nn.DenseBnAct](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DenseBnAct) |Supported | Doing | Doing |layer/quant | [mindspore.nn.Conv2dBnAct](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnAct) | Supported | Supported | Doing |layer/quant +| [mindspore.nn.FakeQuantWithMinMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.FakeQuantWithMinMax) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dBnFoldQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnFoldQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dBnWithoutFoldQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnWithoutFoldQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.DenseQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DenseQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.ActQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ActQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.LeakyReLUQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.LeakyReLUQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.HSwishQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.HSwishQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.HSigmoidQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.HSigmoidQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.TensorAddQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TensorAddQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.MulQuant](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MulQuant) | Supported | Supported | Supported |layer/quant | [mindspore.nn.L1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.L1Loss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.MSELoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MSELoss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SmoothL1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SmoothL1Loss) |Supported |Doing | Doing |loss/loss | [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Supported |loss/loss -| [mindspore.nn.SoftmaxCrossEntropyExpand](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyExpand) | Supported |Supported | Doing |loss/loss | [mindspore.nn.CosineEmbeddingLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.CosineEmbeddingLoss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.ProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ProximalAdagrad) | Supported | Doing | Doing |optim/ProximalAdagrad | [mindspore.nn.LazyAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.LazyAdam) | Supported | Doing | Doing |optim/lazyadam @@ -84,300 +94,305 @@ | [mindspore.nn.WithLossCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.WithLossCell) | Supported | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.WithGradCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.WithGradCell) | Supported | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.TrainOneStepCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TrainOneStepCell) | Supported | Supported | Doing |wrap/cell_wrapper -| [mindspore.nn.DataWrapper](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DataWrapper) |Doing | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.GetNextSingleOp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.GetNextSingleOp) |Doing | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.WithEvalCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.WithEvalCell) | Supported | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.ParameterUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ParameterUpdate) | Supported |Doing | Doing |wrap/cell_wrapper | [mindspore.nn.DistributedGradReducer](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DistributedGradReducer) | Supported |Doing | Doing |wrap/grad_reducer -| [mindspore.nn.DynamicLossScaleUpdateCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DynamicLossScaleUpdateCell) | Doing |Doing | Doing |wrap/loss_scale -| [mindspore.nn.FixedLossScaleUpdateCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.FixedLossScaleUpdateCell) | Doing |Doing | Doing |wrap/loss_scale -| [mindspore.nn.TrainOneStepWithLossScaleCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TrainOneStepWithLossScaleCell) | Doing |Doing | Doing |wrap/loss_scale +| [mindspore.nn.DynamicLossScaleUpdateCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DynamicLossScaleUpdateCell) | Supported |Supported | Doing |wrap/loss_scale +| [mindspore.nn.FixedLossScaleUpdateCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.FixedLossScaleUpdateCell) | Supported |Supported | Doing |wrap/loss_scale +| [mindspore.nn.TrainOneStepWithLossScaleCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TrainOneStepWithLossScaleCell) | Supported |Supported | Doing |wrap/loss_scale | [mindspore.nn.Cell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Cell) | Supported | Supported | Supported |cell +| [mindspore.nn.EmbeddingLookup](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.EmbeddingLookup) |Supported | Supported | Supported |layer/embedding +| [mindspore.nn.Pad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Pad) |Supported | Supported | Doing |layer/basic ## mindspore.ops.operations | Operation | Ascend | GPU | CPU |Operator Type | :----------- |:------ |:------ |:-----|:--- -| [mindspore.ops.operations.Flatten](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Flatten) | Supported | Supported |Supported | nn_ops -| [mindspore.ops.operations.Softmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softmax) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.Acosh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Acosh) | Doing | Doing | Doing | nn_ops -| [mindspore.ops.operations.FloorMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorMod) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.Elu](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Elu) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.MirrorPad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MirrorPad) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.Unpack](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Unpack) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.Pack](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pack) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.L2Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.L2Loss) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.CTCLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CTCLoss) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.RNNTLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RNNTLoss) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.LogSoftmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogSoftmax) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.Softplus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softplus) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.ReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReLU) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.ReLU6](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReLU6) | Supported | Supported |Supported | nn_ops -| [mindspore.ops.operations.HSwish](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HSwish) | Doing | Supported |Doing | nn_ops -| [mindspore.ops.operations.HSigmoid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HSigmoid) | Doing | Supported |Doing | nn_ops -| [mindspore.ops.operations.Sigmoid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sigmoid) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.Tanh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tanh) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.BatchNorm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchNorm) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.LRN](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LRN) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.Conv2D](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2D) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.DepthwiseConv2dNative](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthwiseConv2dNative) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.DepthwiseConv2dNativeBackpropInput](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthwiseConv2dNativeBackpropInput) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.DepthwiseConv2dNativeiBackpropFilter](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthwiseConv2dNativeBackpropFilter) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.MaxPoolWithArgmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MaxPoolWithArgmax) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.MaxPool](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MaxPool) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.AvgPool](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AvgPool) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.Conv2DBackpropInput](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.TopK](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TopK) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops -| [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyPowerSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyPowerSign) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyGradientDescent) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyProximalGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalGradientDescent) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyRMSProp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyRMSProp) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.ApplyCenteredRMSProp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyCenteredRMSProp) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.SparseApplyAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagrad) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.SparseApplyAdagradV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagradV2) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseProximalAdagrad) | Doing | Doing | Supported | nn_ops -| [mindspore.ops.operations.ApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.FusedSparseLazyAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseLazyAdam) | Doing | Doing | Supported | nn_ops -| [mindspore.ops.operations.FusedSparseAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseAdam) | Doing | Doing | Supported | nn_ops -| [mindspore.ops.operations.SmoothL1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SmoothL1Loss) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.SGD](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SGD) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.LayerNorm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LayerNorm) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.L2Normalize](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.L2Normalize) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.DropoutGenMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutGenMask) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.DropoutDoMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutDoMask) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ResizeBilinear](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ResizeBilinear) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.OneHot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OneHot) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.Gelu](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Gelu) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.GetNext](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GetNext) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.PReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.PReLU) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.LSTM](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LSTM) | Doing | Supported | Supported | nn_ops -| [mindspore.ops.operations.BasicLSTMCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BasicLSTMCell) | Doing | Doing | Doing | nn_ops -| [mindspore.ops.operations.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SigmoidCrossEntropyWithLogits) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.Pad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pad) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.ROIAlign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ROIAlign) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.Adam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Adam) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.BinaryCrossEntropy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BinaryCrossEntropy) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.KLDivLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.KLDivLoss) | Doing | Supported | Doing | nn_ops -| [mindspore.ops.operations.LARSUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LARSUpdate) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.Softsign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softsign) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.AssignAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignAdd) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ReduceMean](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMean) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.ReduceSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceSum) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.ReduceAll](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceAll) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ReduceMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMax) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.ReduceMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMin) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.ReduceProd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceProd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.CumProd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CumProd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.MatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MatMul) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.BatchMatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchMatMul) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.CumSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CumSum) | Supported | Supported| Doing | math_ops -| [mindspore.ops.operations.AddN](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AddN) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.Neg](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Neg) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Sub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sub) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.Mul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mul) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.Square](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Square) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.SquareSumAll](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SquareSumAll) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Rsqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rsqrt) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Sqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sqrt) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Reciprocal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reciprocal) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Exp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Exp) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Log](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Log) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Log1p](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Log1p) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Minimum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Minimum) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Maximum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Maximum) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.RealDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RealDiv) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Div](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Div) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.DivNoNan](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DivNoNan) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.FloorDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorDiv) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Floor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Floor) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.EqualCount](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.EqualCount) | Doing | Supported | Supported | math_ops -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Greater](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Greater) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.GreaterEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GreaterEqual) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Less](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Less) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Atan2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Atan2) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.LessEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LessEqual) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.LogicalNot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalNot) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.LogicalAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalAnd) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.LogicalOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalOr) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.BitwiseAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseAnd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BitwiseOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseOr) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Ceil](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Ceil) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Inv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Inv) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Invert](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Invert) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BitwiseXor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseXor) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NPUAllocFloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NPUAllocFloatStatus) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NPUGetFloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NPUGetFloatStatus) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NPUClearFloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NPUClearFloatStatus) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.FloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloatStatus) | Doing | Supported | Doing | math_ops -| [mindspore.ops.operations.Cos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cos) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Cosh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cosh) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ACos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ACos) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BesselI0e](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BesselI0e) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BesselI1e](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BesselI1e) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.TruncateDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateDiv) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.TruncateMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateMod) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Tan](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tan) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Asin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Asin) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Asinh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Asinh) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Erf](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Erf) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Erfc](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Erfc) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Sin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sin) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Sinh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sinh) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Expm1](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Expm1) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NMSWithMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NMSWithMask) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Abs](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Abs) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Sign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sign) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Round](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Round) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ApproximateEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApproximateEqual) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.InplaceAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InplaceAdd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.InplaceSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InplaceSub) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Mod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mod) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ExpandDims](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ExpandDims) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.DType](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DType) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.SameTypeShape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SameTypeShape) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Cast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cast) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.IsSubClass](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IsSubClass) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.IsInstance](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IsInstance) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Reshape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reshape) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Shape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Shape) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Squeeze](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Squeeze) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.Transpose](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Transpose) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.GatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherV2) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Split](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Split) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.Rank](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rank) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.TruncatedNormal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncatedNormal) | Doing | Doing | Doing | array_ops -| [mindspore.ops.operations.Size](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Size) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Fill](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Fill) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.OnesLike](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OnesLike) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ZerosLike](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ZerosLike) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.TupleToArray](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TupleToArray) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ScalarToArray](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarToArray) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ScalarToTensor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarToTensor) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.InvertPermutation](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InvertPermutation) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Argmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Argmax) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Argmin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Argmin) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ArgMaxWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMaxWithValue) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ArgMinWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMinWithValue) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Tile](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tile) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.UnsortedSegmentSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UnsortedSegmentSum) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.UnsortedSegmentMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UnsortedSegmentMin) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.UnsortedSegmentProd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UnsortedSegmentProd) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Concat](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Concat) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ParallelConcat](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ParallelConcat) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Slice](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Slice) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Select](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Select) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.StridedSlice](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.StridedSlice) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Diag](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Diag) | Doing | Doing | Doing | array_ops -| [mindspore.ops.operations.DiagPart](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DiagPart) | Doing | Doing | Doing | array_ops -| [mindspore.ops.operations.Eye](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Eye) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ScatterNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNd) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ResizeNearestNeighbor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ResizeNearestNeighbor) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.GatherNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherNd) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ApplyFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyFtrl) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.SparseApplyFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrl) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.FusedSparseFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseFtrl) | Doing | Doing | Supported | array_ops -| [mindspore.ops.operations.SparseApplyFtrlV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrlV2) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNdUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdUpdate) | Supported | Doing | Supported | array_ops -| [mindspore.ops.operations.ScatterUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterUpdate) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMul) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterDiv) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.SpaceToDepth](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SpaceToDepth) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.DepthToSpace](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthToSpace) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.SpaceToBatch](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SpaceToBatch) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.SpaceToBatchND](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SpaceToBatchND) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.BatchToSpace](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchToSpace) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.BatchToSpaceND](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchToSpaceND) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.IsFinite](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IsFinite) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.InplaceUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InplaceUpdate) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterSub) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMax) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMin) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNdAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdAdd) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNdSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdSub) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNonAliasingAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNonAliasingAdd) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Rint](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rint) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ReverseV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReverseV2) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ReduceOp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceOp) | Supported | Supported | Doing | comm_ops -| [mindspore.ops.operations.AllReduce](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AllReduce) | Supported | Supported | Doing | comm_ops -| [mindspore.ops.operations.AllGather](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AllGather) | Supported | Supported | Doing | comm_ops -| [mindspore.ops.operations.ReduceScatter](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceScatter) | Doing | Supported | Doing | comm_ops -| [mindspore.ops.operations.Broadcast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Broadcast) | Supported | Doing | Doing | comm_ops -| [mindspore.ops.operations.ControlDepend](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ControlDepend) | Supported | Supported | Supported | control_ops -| [mindspore.ops.operations.GeSwitch](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GeSwitch) | Doing | Doing | Doing | control_ops -| [mindspore.ops.operations.Merge](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Merge) | Doing | Doing | Doing | control_ops -| [mindspore.ops.operations.ScalarSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.ImageSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ImageSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.TensorSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.HistogramSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HistogramSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.InsertGradientOf](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InsertGradientOf) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.Print](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Print) | Supported | Doing | Doing | debug_ops -| [mindspore.ops.operations.Assign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Assign) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.BoundingBoxEncode](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BoundingBoxEncode) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.BoundingBoxDecode](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BoundingBoxDecode) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.PopulationCount](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.PopulationCount) | Supported | Doing | Doing | other_ops -| [mindspore.ops.operations.CheckValid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CheckValid) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.IOU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IOU) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.MakeRefKey](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MakeRefKey) | Supported | Supported | Supported | other_ops -| [mindspore.ops.operations.InTopK](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InTopK) | Supported | Doing | Doing | other_ops -| [mindspore.ops.operations.StandardNormal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.StandardNormal) | Supported | Supported | Doing | random_ops -| [mindspore.ops.operations.Gamma](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Gamma) | Supported | Doing | Doing | random_ops -| [mindspore.ops.operations.Poisson](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Poisson) | Supported | Doing | Doing | random_ops -| [mindspore.ops.operations.UniformInt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UniformInt) | Supported | Supported | Doing | random_ops -| [mindspore.ops.operations.UniformReal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UniformReal) | Supported | Supported | Doing | random_ops -| [mindspore.ops.operations.RandomChoiceWithMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RandomChoiceWithMask) | Doing| Supported | Doing | random_ops -| [mindspore.ops.operations.RandomCategorical](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RandomCategorical) | Supported| Doing | Doing | random_ops -| [mindspore.ops.operations.ScalarCast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarCast) | Supported | Supported | Supported | inner_ops -| [mindspore.ops.operations.ReverseSequence](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReverseSequence) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.CropAndResize](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CropAndResize) | Supported | Doing | Doing | image_ops -| [mindspore.ops.operations.SquaredDifference](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SquaredDifference) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Xdivy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xdivy) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Xlogy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xlogy) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.HistogramFixedWidth](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HistogramFixedWidth) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Flatten](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Flatten) | Supported | Supported |Supported | nn_ops +| [mindspore.ops.Softmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softmax) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.Acosh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Acosh) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.FloorMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorMod) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.Elu](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Elu) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.MirrorPad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MirrorPad) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.Unpack](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Unpack) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.Pack](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pack) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.L2Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.L2Loss) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.CTCLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CTCLoss) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.RNNTLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RNNTLoss) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.LogSoftmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogSoftmax) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.Softplus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softplus) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.ReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLU) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.ReLU6](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLU6) | Supported | Supported |Supported | nn_ops +| [mindspore.ops.HSwish](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HSwish) | Doing | Supported |Doing | nn_ops +| [mindspore.ops.HSigmoid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HSigmoid) | Doing | Supported |Doing | nn_ops +| [mindspore.ops.Sigmoid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sigmoid) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.Tanh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tanh) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.BatchNorm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchNorm) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.LRN](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LRN) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.Conv2D](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Conv2D) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.DepthwiseConv2dNative](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthwiseConv2dNative) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.DepthwiseConv2dNativeBackpropInput](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthwiseConv2dNativeBackpropInput) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.DepthwiseConv2dNativeiBackpropFilter](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthwiseConv2dNativeBackpropFilter) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.MaxPoolWithArgmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MaxPoolWithArgmax) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.MaxPool](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MaxPool) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.AvgPool](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AvgPool) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.Conv2DBackpropInput](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.BiasAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BiasAdd) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.TopK](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TopK) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops +| [mindspore.ops.ApplyMomentum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyMomentum) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.ApplyAddSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAddSign) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyPowerSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyPowerSign) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyGradientDescent) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyProximalGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalGradientDescent) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyRMSProp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyRMSProp) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.ApplyCenteredRMSProp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyCenteredRMSProp) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.SparseApplyAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagrad) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.SparseApplyAdagradV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagradV2) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseProximalAdagrad) | Doing | Doing | Supported | nn_ops +| [mindspore.ops.ApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.FusedSparseLazyAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseLazyAdam) | Doing | Doing | Supported | nn_ops +| [mindspore.ops.FusedSparseAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseAdam) | Doing | Doing | Supported | nn_ops +| [mindspore.ops.SmoothL1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SmoothL1Loss) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.SGD](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SGD) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.LayerNorm](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LayerNorm) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.L2Normalize](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.L2Normalize) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.DropoutGenMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutGenMask) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.DropoutDoMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutDoMask) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ResizeBilinear](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ResizeBilinear) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.OneHot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OneHot) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.Gelu](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Gelu) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.GetNext](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GetNext) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.PReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.PReLU) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.LSTM](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LSTM) | Doing | Supported | Supported | nn_ops +| [mindspore.ops.BasicLSTMCell](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BasicLSTMCell) | Doing | Doing | Doing | nn_ops +| [mindspore.ops.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SigmoidCrossEntropyWithLogits) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.Pad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pad) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.ROIAlign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ROIAlign) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.Adam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Adam) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.BinaryCrossEntropy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BinaryCrossEntropy) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.KLDivLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.KLDivLoss) | Doing | Supported | Doing | nn_ops +| [mindspore.ops.LARSUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LARSUpdate) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.Softsign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softsign) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) | Supported | Supported | Supported | math_ops +| [mindspore.ops.AssignAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignAdd) | Supported | Supported | Supported | math_ops +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ReduceMean](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMean) | Supported | Supported | Supported | math_ops +| [mindspore.ops.ReduceSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceSum) | Supported | Supported | Supported | math_ops +| [mindspore.ops.ReduceAll](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceAll) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ReduceMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMax) | Supported | Supported | Supported | math_ops +| [mindspore.ops.ReduceMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMin) | Supported | Supported | Doing | math_ops +| [mindspore.ops.ReduceProd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceProd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.CumProd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CumProd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.MatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MatMul) | Supported | Supported | Supported | math_ops +| [mindspore.ops.BatchMatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchMatMul) | Supported | Supported | Doing | math_ops +| [mindspore.ops.CumSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CumSum) | Supported | Supported| Doing | math_ops +| [mindspore.ops.AddN](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AddN) | Supported | Supported | Supported | math_ops +| [mindspore.ops.Neg](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Neg) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Sub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sub) | Supported | Supported | Supported | math_ops +| [mindspore.ops.Mul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mul) | Supported | Supported | Supported | math_ops +| [mindspore.ops.Square](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Square) | Supported | Supported | Supported | math_ops +| [mindspore.ops.SquareSumAll](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SquareSumAll) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Rsqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rsqrt) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Sqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sqrt) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Reciprocal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reciprocal) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Exp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Exp) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Log](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Log) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Log1p](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Log1p) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Minimum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Minimum) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Maximum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Maximum) | Supported | Supported | Doing | math_ops +| [mindspore.ops.RealDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RealDiv) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Div](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Div) | Supported | Supported | Doing | math_ops +| [mindspore.ops.DivNoNan](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DivNoNan) | Supported | Doing | Doing | math_ops +| [mindspore.ops.FloorDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorDiv) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Floor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Floor) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) | Supported | Supported | Doing | math_ops +| [mindspore.ops.EqualCount](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.EqualCount) | Doing | Supported | Supported | math_ops +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Greater](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Greater) | Supported | Supported | Doing | math_ops +| [mindspore.ops.GreaterEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GreaterEqual) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Less](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Less) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Atan2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Atan2) | Supported | Doing | Doing | math_ops +| [mindspore.ops.LessEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LessEqual) | Supported | Supported | Doing | math_ops +| [mindspore.ops.LogicalNot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalNot) | Supported | Supported | Doing | math_ops +| [mindspore.ops.LogicalAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalAnd) | Supported | Supported | Doing | math_ops +| [mindspore.ops.LogicalOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalOr) | Supported | Supported | Doing | math_ops +| [mindspore.ops.BitwiseAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseAnd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BitwiseOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseOr) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Ceil](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Ceil) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Inv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Inv) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Invert](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Invert) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BitwiseXor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseXor) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NPUAllocFloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NPUAllocFloatStatus) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NPUGetFloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NPUGetFloatStatus) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NPUClearFloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NPUClearFloatStatus) | Supported | Doing | Doing | math_ops +| [mindspore.ops.FloatStatus](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloatStatus) | Doing | Supported | Doing | math_ops +| [mindspore.ops.Cos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cos) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Cosh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cosh) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ACos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ACos) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BesselI0e](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BesselI0e) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BesselI1e](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BesselI1e) | Supported | Doing | Doing | math_ops +| [mindspore.ops.TruncateDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateDiv) | Supported | Doing | Doing | math_ops +| [mindspore.ops.TruncateMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateMod) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Tan](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tan) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Asin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Asin) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Asinh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Asinh) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Erf](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Erf) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Erfc](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Erfc) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Sin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sin) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Sinh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sinh) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Expm1](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Expm1) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NMSWithMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NMSWithMask) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Abs](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Abs) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Sign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sign) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Round](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Round) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ApproximateEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApproximateEqual) | Supported | Doing | Doing | math_ops +| [mindspore.ops.InplaceAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InplaceAdd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.InplaceSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InplaceSub) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Mod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mod) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ExpandDims](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ExpandDims) | Supported | Supported | Supported | array_ops +| [mindspore.ops.DType](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DType) | Supported | Supported | Supported | array_ops +| [mindspore.ops.SameTypeShape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SameTypeShape) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Cast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cast) | Supported | Supported | Doing | array_ops +| [mindspore.ops.IsSubClass](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IsSubClass) | Supported | Supported | Supported | array_ops +| [mindspore.ops.IsInstance](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IsInstance) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Reshape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reshape) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Shape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Shape) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Squeeze](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Squeeze) | Supported | Supported | Doing | array_ops +| [mindspore.ops.Transpose](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Transpose) | Supported | Supported | Supported | array_ops +| [mindspore.ops.GatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherV2) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Split](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Split) | Supported | Supported | Doing | array_ops +| [mindspore.ops.Rank](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rank) | Supported | Supported | Supported | array_ops +| [mindspore.ops.TruncatedNormal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncatedNormal) | Doing | Doing | Doing | array_ops +| [mindspore.ops.Size](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Size) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Fill](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Fill) | Supported | Supported | Supported | array_ops +| [mindspore.ops.OnesLike](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OnesLike) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ZerosLike](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ZerosLike) | Supported | Supported | Doing | array_ops +| [mindspore.ops.TupleToArray](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TupleToArray) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ScalarToArray](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarToArray) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ScalarToTensor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarToTensor) | Supported | Supported | Supported | array_ops +| [mindspore.ops.InvertPermutation](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InvertPermutation) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Argmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Argmax) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Argmin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Argmin) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ArgMaxWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMaxWithValue) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ArgMinWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMinWithValue) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Tile](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tile) | Supported | Supported | Doing | array_ops +| [mindspore.ops.UnsortedSegmentSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UnsortedSegmentSum) | Supported | Supported | Doing | array_ops +| [mindspore.ops.UnsortedSegmentMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UnsortedSegmentMin) | Supported | Doing | Doing | array_ops +| [mindspore.ops.UnsortedSegmentProd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UnsortedSegmentProd) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Concat](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Concat) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ParallelConcat](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ParallelConcat) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Slice](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Slice) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Select](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Select) | Supported | Supported | Doing | array_ops +| [mindspore.ops.StridedSlice](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.StridedSlice) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Diag](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Diag) | Doing | Doing | Doing | array_ops +| [mindspore.ops.DiagPart](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DiagPart) | Doing | Doing | Doing | array_ops +| [mindspore.ops.Eye](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Eye) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ScatterNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNd) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ResizeNearestNeighbor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ResizeNearestNeighbor) | Supported | Supported | Doing | array_ops +| [mindspore.ops.GatherNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherNd) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ApplyFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyFtrl) | Supported | Supported | Doing | array_ops +| [mindspore.ops.SparseApplyFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrl) | Supported | Doing | Doing | array_ops +| [mindspore.ops.FusedSparseFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseFtrl) | Doing | Doing | Supported | array_ops +| [mindspore.ops.SparseApplyFtrlV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrlV2) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNdUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdUpdate) | Supported | Doing | Supported | array_ops +| [mindspore.ops.ScatterUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterUpdate) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMul) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterDiv) | Supported | Doing | Doing | array_ops +| [mindspore.ops.SpaceToDepth](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SpaceToDepth) | Supported | Doing | Doing | array_ops +| [mindspore.ops.DepthToSpace](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthToSpace) | Supported | Doing | Doing | array_ops +| [mindspore.ops.SpaceToBatch](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SpaceToBatch) | Supported | Doing | Doing | array_ops +| [mindspore.ops.SpaceToBatchND](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SpaceToBatchND) | Supported | Doing | Doing | array_ops +| [mindspore.ops.BatchToSpace](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchToSpace) | Supported | Doing | Doing | array_ops +| [mindspore.ops.BatchToSpaceND](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchToSpaceND) | Supported | Doing | Doing | array_ops +| [mindspore.ops.IsFinite](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IsFinite) | Supported | Supported | Doing | array_ops +| [mindspore.ops.InplaceUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InplaceUpdate) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterSub) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMax) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMin) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNdAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdAdd) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNdSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdSub) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNonAliasingAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNonAliasingAdd) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Rint](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rint) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ReverseV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReverseV2) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ReduceOp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceOp) | Supported | Supported | Doing | comm_ops +| [mindspore.ops.AllReduce](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AllReduce) | Supported | Supported | Doing | comm_ops +| [mindspore.ops.AllGather](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AllGather) | Supported | Supported | Doing | comm_ops +| [mindspore.ops.ReduceScatter](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceScatter) | Doing | Supported | Doing | comm_ops +| [mindspore.ops.Broadcast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Broadcast) | Supported | Doing | Doing | comm_ops +| [mindspore.ops.ControlDepend](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ControlDepend) | Supported | Supported | Supported | control_ops +| [mindspore.ops.GeSwitch](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GeSwitch) | Doing | Doing | Doing | control_ops +| [mindspore.ops.Merge](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Merge) | Doing | Doing | Doing | control_ops +| [mindspore.ops.ScalarSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.ImageSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ImageSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.TensorSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.HistogramSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HistogramSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.InsertGradientOf](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InsertGradientOf) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.Print](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Print) | Supported | Doing | Doing | debug_ops +| [mindspore.ops.Assign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Assign) | Supported | Supported | Doing | other_ops +| [mindspore.ops.BoundingBoxEncode](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BoundingBoxEncode) | Supported | Supported | Doing | other_ops +| [mindspore.ops.BoundingBoxDecode](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BoundingBoxDecode) | Supported | Supported | Doing | other_ops +| [mindspore.ops.PopulationCount](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.PopulationCount) | Supported | Doing | Doing | other_ops +| [mindspore.ops.CheckValid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CheckValid) | Supported | Supported | Doing | other_ops +| [mindspore.ops.IOU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IOU) | Supported | Supported | Doing | other_ops +| [mindspore.ops.MakeRefKey](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MakeRefKey) | Supported | Supported | Supported | other_ops +| [mindspore.ops.InTopK](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InTopK) | Supported | Doing | Doing | other_ops +| [mindspore.ops.StandardNormal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.StandardNormal) | Supported | Supported | Doing | random_ops +| [mindspore.ops.Gamma](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Gamma) | Supported | Doing | Doing | random_ops +| [mindspore.ops.Poisson](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Poisson) | Supported | Doing | Doing | random_ops +| [mindspore.ops.UniformInt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UniformInt) | Supported | Supported | Doing | random_ops +| [mindspore.ops.UniformReal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UniformReal) | Supported | Supported | Doing | random_ops +| [mindspore.ops.RandomChoiceWithMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RandomChoiceWithMask) | Doing| Supported | Doing | random_ops +| [mindspore.ops.RandomCategorical](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RandomCategorical) | Supported| Doing | Doing | random_ops +| [mindspore.ops.ScalarCast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarCast) | Supported | Supported | Supported | inner_ops +| [mindspore.ops.ReverseSequence](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReverseSequence) | Supported | Doing | Doing | array_ops +| [mindspore.ops.CropAndResize](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CropAndResize) | Supported | Doing | Doing | image_ops +| [mindspore.ops.SquaredDifference](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SquaredDifference) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Xdivy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xdivy) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Xlogy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xlogy) | Supported | Doing | Doing | math_ops +| [mindspore.ops.HistogramFixedWidth](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HistogramFixedWidth) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Eps](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Eps) | Supported | Supported | Doing | math_ops +| [mindspore.ops.ReLUV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLUV2) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.BNTrainingReduce](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BNTrainingReduce) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.BNTrainingUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BNTrainingUpdate) | Supported | Doing | Doing | nn_ops ## mindspore.ops.functional | Operation | functional Operation | :----------- | :----------- -| [mindspore.ops.operations.Pack](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pack) | pack -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) | tensor_add -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) | assign_sub -| [mindspore.ops.operations.AddN](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AddN) | addn -| [mindspore.ops.operations.Square](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Square) | square -| [mindspore.ops.operations.Sqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sqrt) | sqrt -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) | equal -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) | not_equal -| [mindspore.ops.operations.LogicalNot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalNot) | logical_not -| [mindspore.ops.operations.LogicalAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalAnd) | logical_and -| [mindspore.ops.operations.LogicalOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalOr) | logical_or -| [mindspore.ops.operations.ExpandDims](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ExpandDims) | expand_dims -| [mindspore.ops.operations.DType](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DType) | dtype -| [mindspore.ops.operations.Cast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cast) | cast -| [mindspore.ops.operations.Reshape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reshape) | reshape -| [mindspore.ops.operations.Shape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Shape) | shape -| [mindspore.ops.operations.GatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherV2) | gather -| [mindspore.ops.operations.Rank](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rank) | rank -| [mindspore.ops.operations.Size](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Size) | size -| [mindspore.ops.operations.Fill](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Fill) | fill -| [mindspore.ops.operations.OnesLike](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OnesLike) | ones_like -| [mindspore.ops.operations.Tile](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tile) | tile -| [mindspore.ops.operations.Select](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Select) | select -| [mindspore.ops.operations.ScatterNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNd) | scatter_nd -| [mindspore.ops.operations.GatherNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherNd) | gather_nd -| [mindspore.ops.operations.ControlDepend](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ControlDepend) | control_depend -| [mindspore.ops.operations.Print](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Print) | print -| [mindspore.ops.operations.Assign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Assign) | assign -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) | tensor_pow +| [mindspore.ops.Pack](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pack) | pack +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) | tensor_add +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) | assign_sub +| [mindspore.ops.AddN](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AddN) | addn +| [mindspore.ops.Square](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Square) | square +| [mindspore.ops.Sqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sqrt) | sqrt +| [mindspore.ops.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) | equal +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) | not_equal +| [mindspore.ops.LogicalNot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalNot) | logical_not +| [mindspore.ops.LogicalAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalAnd) | logical_and +| [mindspore.ops.LogicalOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalOr) | logical_or +| [mindspore.ops.ExpandDims](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ExpandDims) | expand_dims +| [mindspore.ops.DType](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DType) | dtype +| [mindspore.ops.Cast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cast) | cast +| [mindspore.ops.Reshape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reshape) | reshape +| [mindspore.ops.Shape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Shape) | shape +| [mindspore.ops.GatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherV2) | gather +| [mindspore.ops.Rank](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rank) | rank +| [mindspore.ops.Size](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Size) | size +| [mindspore.ops.Fill](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Fill) | fill +| [mindspore.ops.OnesLike](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OnesLike) | ones_like +| [mindspore.ops.Tile](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tile) | tile +| [mindspore.ops.Select](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Select) | select +| [mindspore.ops.ScatterNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNd) | scatter_nd +| [mindspore.ops.GatherNd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherNd) | gather_nd +| [mindspore.ops.ControlDepend](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ControlDepend) | control_depend +| [mindspore.ops.Print](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Print) | print +| [mindspore.ops.Assign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Assign) | assign +| [mindspore.ops.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) | tensor_pow > At present, functional supports some operators without attributes, which will be further completed in the future. @@ -385,62 +400,61 @@ | op name | constraints | :----------- | :----------- -| [mindspore.ops.operations.ACos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ACos) | None -| [mindspore.ops.operations.Cos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cos) | None -| [mindspore.ops.operations.LogicalNot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalNot) | None -| [mindspore.ops.operations.Log](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Log) | None -| [mindspore.ops.operations.Exp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Exp) | None -| [mindspore.ops.operations.LogSoftmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogSoftmax) | The logits can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. -| [mindspore.ops.operations.Softmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softmax) | The logits can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. -| [mindspore.ops.operations.Tanh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tanh) | None -| [mindspore.ops.operations.Gelu](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Gelu) | None -| [mindspore.ops.operations.ReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReLU) | None -| [mindspore.ops.operations.Sqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sqrt) | None -| [mindspore.ops.operations.Cast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cast) | None -| [mindspore.ops.operations.Neg](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Neg) | None -| [mindspore.ops.operations.ExpandDims](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ExpandDims) | None -| [mindspore.ops.operations.Squeeze](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Squeeze) | None -| [mindspore.ops.operations.Square](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Square) | None -| [mindspore.ops.operations.Sigmoid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sigmoid) | None -| [mindspore.ops.operations.Dropout](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Dropout) | Repeated calculation is not supported. -| [mindspore.ops.operations.Div](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Div) | None -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) | None -| [mindspore.ops.operations.RealDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RealDiv) | None -| [mindspore.ops.operations.Mul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mul) | None -| [mindspore.ops.operations.Sub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sub) | None -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) | None -| [mindspore.ops.operations.FloorDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorDiv) | None -| [mindspore.ops.operations.Greater](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Greater) | None -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) | None -| [mindspore.ops.operations.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SigmoidCrossEntropyWithLogits) | None -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) | None -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) | None -| [mindspore.ops.operations.Maximum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Maximum) | None -| [mindspore.ops.operations.Minimum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Minimum) | None -| [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | None -| [mindspore.ops.operations.Concat](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Concat) | The input_x can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. -| [mindspore.ops.operations.DropoutGenMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutGenMask) | Need to be used in conjunction with `DropoutDoMask`. -| [mindspore.ops.operations.DropoutDoMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutDoMask) | Need to be used in conjunction with `DropoutGenMask`,configuring shard strategy is not supported. -| [mindspore.ops.operations.GatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherV2) | Only support 1-dim and 2-dim parameters and the last dimension of the input_params should be 32-byte aligned; Scalar input_indices is not supported; Repeated calculation is not supported when the parameters are split in the dimension of the axis; Split input_indices and input_params at the same time is not supported. -| [mindspore.ops.operations.SparseGatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseGatherV2) | The same as GatherV2. -| [mindspore.ops.operations.EmbeddingLookup](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.EmbeddingLookup) | The same as GatherV2. -| [mindspore.ops.operations.L2Normalize](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.L2Normalize) | The input_x can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | The last dimension of logits and labels can't be splited; Only supports using output[0]. -| [mindspore.ops.operations.MatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MatMul) | `transpose_a=True` is not supported. -| [mindspore.ops.operations.BatchMatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchMatMul) | `transpore_a=True` is not supported. -| [mindspore.ops.operations.PReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.PReLU) | The shard strategy in channel dimension of input_x should be consistent with weight. -| [mindspore.ops.operations.OneHot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OneHot) | Only support 1-dim indices. -| [mindspore.ops.operations.ReduceSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceSum) | None -| [mindspore.ops.operations.ReduceMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMax) | None -| [mindspore.ops.operations.ReduceMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMin) | None -| [mindspore.ops.operations.ArgMinWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMinWithValue) | The output index can't be used as the input of other operators. -| [mindspore.ops.operations.ArgMaxWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMaxWithValue) | The output index can't be used as the input of other operators. -| [mindspore.ops.operations.ReduceMean](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMean) | None -| [mindspore.ops.operations.Reshape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reshape) | Configuring shard strategy is not supported. -| [mindspore.ops.operations.StridedSlice](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.StridedSlice) | Only support mask with all 0 values; The dimension needs to be split should be all extracted; Split is not supported when the strides of dimension is 1. -| [mindspore.ops.operations.Tile](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tile) | Only support configuring shard strategy for multiples. -| [mindspore.ops.operations.Transpose](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Transpose) | None -| [mindspore.ops.operations.Diag](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Diag) | Configuring shard strategy is not supported. +| [mindspore.ops.ACos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ACos) | None +| [mindspore.ops.Cos](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cos) | None +| [mindspore.ops.LogicalNot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalNot) | None +| [mindspore.ops.Log](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Log) | None +| [mindspore.ops.Exp](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Exp) | None +| [mindspore.ops.LogSoftmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogSoftmax) | The logits can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. +| [mindspore.ops.Softmax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softmax) | The logits can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. +| [mindspore.ops.Tanh](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tanh) | None +| [mindspore.ops.Gelu](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Gelu) | None +| [mindspore.ops.ReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLU) | None +| [mindspore.ops.Sqrt](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sqrt) | None +| [mindspore.ops.Cast](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cast) | None +| [mindspore.ops.Neg](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Neg) | None +| [mindspore.ops.ExpandDims](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ExpandDims) | None +| [mindspore.ops.Squeeze](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Squeeze) | None +| [mindspore.ops.Square](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Square) | None +| [mindspore.ops.Sigmoid](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sigmoid) | None +| [mindspore.ops.Dropout](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Dropout) | Repeated calculation is not supported. +| [mindspore.ops.Div](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Div) | None +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) | None +| [mindspore.ops.RealDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RealDiv) | None +| [mindspore.ops.Mul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mul) | None +| [mindspore.ops.Sub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sub) | None +| [mindspore.ops.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) | None +| [mindspore.ops.FloorDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorDiv) | None +| [mindspore.ops.Greater](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Greater) | None +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) | None +| [mindspore.ops.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SigmoidCrossEntropyWithLogits) | None +| [mindspore.ops.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) | None +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) | None +| [mindspore.ops.Maximum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Maximum) | None +| [mindspore.ops.Minimum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Minimum) | None +| [mindspore.ops.BiasAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BiasAdd) | None +| [mindspore.ops.Concat](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Concat) | The input_x can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. +| [mindspore.ops.DropoutGenMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutGenMask) | Need to be used in conjunction with `DropoutDoMask`. +| [mindspore.ops.DropoutDoMask](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutDoMask) | Need to be used in conjunction with `DropoutGenMask`,configuring shard strategy is not supported. +| [mindspore.ops.GatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherV2) | Only support 1-dim and 2-dim parameters and the last dimension of the input_params should be 32-byte aligned; Scalar input_indices is not supported; Repeated calculation is not supported when the parameters are split in the dimension of the axis; Split input_indices and input_params at the same time is not supported. +| [mindspore.ops.SparseGatherV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseGatherV2) | The same as GatherV2. +| [mindspore.ops.EmbeddingLookup](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.EmbeddingLookup) | The same as GatherV2. +| [mindspore.ops.L2Normalize](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.L2Normalize) | The input_x can't be split into the dimension of axis, otherwise it's inconsistent with the single machine in the mathematical logic. +| [mindspore.ops.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SoftmaxCrossEntropyWithLogits) | The last dimension of logits and labels can't be splited; Only supports using output[0]. +| [mindspore.ops.MatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MatMul) | `transpose_a=True` is not supported. +| [mindspore.ops.BatchMatMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchMatMul) | `transpore_a=True` is not supported. +| [mindspore.ops.PReLU](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.PReLU) | When the shape of weight is not [1], the shard strategy in channel dimension of input_x should be consistent with weight. +| [mindspore.ops.OneHot](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OneHot) | Only support 1-dim indices. Must configure strategy for the output and the first and second inputs. +| [mindspore.ops.ReduceSum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceSum) | None +| [mindspore.ops.ReduceMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMax) | When the input_x is splited on the axis dimension, the distributed result may be inconsistent with that on the single machine. +| [mindspore.ops.ReduceMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMin) | When the input_x is splited on the axis dimension, the distributed result may be inconsistent with that on the single machine. +| [mindspore.ops.ArgMinWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMinWithValue) | The output index can't be used as the input of other operators. When the input_x is splited on the axis dimension, the distributed result may be inconsistent with that on the single machine. +| [mindspore.ops.ArgMaxWithValue](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMaxWithValue) | The output index can't be used as the input of other operators. When the input_x is splited on the axis dimension, the distributed result may be inconsistent with that on the single machine. +| [mindspore.ops.ReduceMean](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMean) | None +| [mindspore.ops.Reshape](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reshape) | Configuring shard strategy is not supported. +| [mindspore.ops.StridedSlice](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.StridedSlice) | Only support mask with all 0 values; The dimension needs to be split should be all extracted; Split is not supported when the strides of dimension is 1. +| [mindspore.ops.Tile](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tile) | Only support configuring shard strategy for multiples. +| [mindspore.ops.Transpose](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Transpose) | None > Repeated calculation means that the device is not fully used. For example, the cluster has 8 devices to run distributed training, the splitting strategy only cuts the input into 4 copies. In this case, double counting will occur. > @@ -470,66 +484,66 @@ when the Tensor of int8 and uint8 data types are operated, they are converted to | op name | :----------- -| [mindspore.ops.operations.Assign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Assign) -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) -| [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) -| [mindspore.ops.operations.FusedSparseAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseAdam) -| [mindspore.ops.operations.FusedSparseLazyAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseLazyAdam) -| [mindspore.ops.operations.FusedSparseFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseFtrl) -| [mindspore.ops.operations.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseProximalAdagrad) -| [mindspore.ops.operations.ApplyAdaMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdaMax) -| [mindspore.ops.operations.ApplyAdadelta](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdadelta) -| [mindspore.ops.operations.ApplyAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdagrad) -| [mindspore.ops.operations.ApplyAdagradV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdagradV2) -| [mindspore.ops.operations.SparseApplyAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagrad) -| [mindspore.ops.operations.SparseApplyAdagradV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagradV2) -| [mindspore.ops.operations.ApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalAdagrad) -| [mindspore.ops.operations.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyProximalAdagrad) -| [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) -| [mindspore.ops.operations.ApplyPowerSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyPowerSign) -| [mindspore.ops.operations.ApplyGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyGradientDescent) -| [mindspore.ops.operations.ApplyProximalGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalGradientDescent) -| [mindspore.ops.operations.SparseApplyFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrl) -| [mindspore.ops.operations.SparseApplyFtrlV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrlV2) -| [mindspore.ops.operations.BitwiseAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseAnd) -| [mindspore.ops.operations.BitwiseOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseOr) -| [mindspore.ops.operations.BitwiseXor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseXor) -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) -| [mindspore.ops.operations.Sub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sub) -| [mindspore.ops.operations.Mul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mul) -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) -| [mindspore.ops.operations.Minimum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Minimum) -| [mindspore.ops.operations.Maximum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Maximum) -| [mindspore.ops.operations.RealDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RealDiv) -| [mindspore.ops.operations.Div](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Div) -| [mindspore.ops.operations.DivNoNan](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DivNoNan) -| [mindspore.ops.operations.FloorDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorDiv) -| [mindspore.ops.operations.TruncateDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateDiv) -| [mindspore.ops.operations.TruncateMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateMod) -| [mindspore.ops.operations.Mod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mod) -| [mindspore.ops.operations.FloorMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorMod) -| [mindspore.ops.operations.Atan2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Atan2) -| [mindspore.ops.operations.SquaredDifference](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SquaredDifference) -| [mindspore.ops.operations.Xdivy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xdivy) -| [mindspore.ops.operations.Xlogy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xlogy) -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) -| [mindspore.ops.operations.ApproximateEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApproximateEqual) -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) -| [mindspore.ops.operations.Greater](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Greater) -| [mindspore.ops.operations.GreaterEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GreaterEqual) -| [mindspore.ops.operations.Less](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Less) -| [mindspore.ops.operations.LessEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LessEqual) -| [mindspore.ops.operations.LogicalAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalAnd) -| [mindspore.ops.operations.LogicalOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalOr) -| [mindspore.ops.operations.ScatterNdUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdUpdate) -| [mindspore.ops.operations.ScatterNdAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdAdd) -| [mindspore.ops.operations.ScatterNdSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdSub) -| [mindspore.ops.operations.ScatterNonAliasingAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNonAliasingAdd) -| [mindspore.ops.operations.ScatterUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterUpdate) -| [mindspore.ops.operations.ScatterMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMax) -| [mindspore.ops.operations.ScatterMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMin) -| [mindspore.ops.operations.ScatterAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterAdd) -| [mindspore.ops.operations.ScatterSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterSub) -| [mindspore.ops.operations.ScatterMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMul) -| [mindspore.ops.operations.ScatterDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterDiv) +| [mindspore.ops.Assign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Assign) +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) +| [mindspore.ops.ApplyMomentum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyMomentum) +| [mindspore.ops.FusedSparseAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseAdam) +| [mindspore.ops.FusedSparseLazyAdam](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseLazyAdam) +| [mindspore.ops.FusedSparseFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseFtrl) +| [mindspore.ops.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseProximalAdagrad) +| [mindspore.ops.ApplyAdaMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdaMax) +| [mindspore.ops.ApplyAdadelta](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdadelta) +| [mindspore.ops.ApplyAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdagrad) +| [mindspore.ops.ApplyAdagradV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdagradV2) +| [mindspore.ops.SparseApplyAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagrad) +| [mindspore.ops.SparseApplyAdagradV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagradV2) +| [mindspore.ops.ApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalAdagrad) +| [mindspore.ops.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyProximalAdagrad) +| [mindspore.ops.ApplyAddSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAddSign) +| [mindspore.ops.ApplyPowerSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyPowerSign) +| [mindspore.ops.ApplyGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyGradientDescent) +| [mindspore.ops.ApplyProximalGradientDescent](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalGradientDescent) +| [mindspore.ops.SparseApplyFtrl](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrl) +| [mindspore.ops.SparseApplyFtrlV2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrlV2) +| [mindspore.ops.BitwiseAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseAnd) +| [mindspore.ops.BitwiseOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseOr) +| [mindspore.ops.BitwiseXor](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseXor) +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) +| [mindspore.ops.Sub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sub) +| [mindspore.ops.Mul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mul) +| [mindspore.ops.Pow](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) +| [mindspore.ops.Minimum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Minimum) +| [mindspore.ops.Maximum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Maximum) +| [mindspore.ops.RealDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RealDiv) +| [mindspore.ops.Div](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Div) +| [mindspore.ops.DivNoNan](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DivNoNan) +| [mindspore.ops.FloorDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorDiv) +| [mindspore.ops.TruncateDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateDiv) +| [mindspore.ops.TruncateMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateMod) +| [mindspore.ops.Mod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mod) +| [mindspore.ops.FloorMod](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorMod) +| [mindspore.ops.Atan2](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Atan2) +| [mindspore.ops.SquaredDifference](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SquaredDifference) +| [mindspore.ops.Xdivy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xdivy) +| [mindspore.ops.Xlogy](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xlogy) +| [mindspore.ops.Equal](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) +| [mindspore.ops.ApproximateEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApproximateEqual) +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) +| [mindspore.ops.Greater](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Greater) +| [mindspore.ops.GreaterEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GreaterEqual) +| [mindspore.ops.Less](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Less) +| [mindspore.ops.LessEqual](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LessEqual) +| [mindspore.ops.LogicalAnd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalAnd) +| [mindspore.ops.LogicalOr](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalOr) +| [mindspore.ops.ScatterNdUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdUpdate) +| [mindspore.ops.ScatterNdAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdAdd) +| [mindspore.ops.ScatterNdSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdSub) +| [mindspore.ops.ScatterNonAliasingAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNonAliasingAdd) +| [mindspore.ops.ScatterUpdate](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterUpdate) +| [mindspore.ops.ScatterMax](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMax) +| [mindspore.ops.ScatterMin](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMin) +| [mindspore.ops.ScatterAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterAdd) +| [mindspore.ops.ScatterSub](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterSub) +| [mindspore.ops.ScatterMul](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMul) +| [mindspore.ops.ScatterDiv](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterDiv) diff --git a/docs/source_zh_cn/_static/logo_source.png b/docs/source_zh_cn/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/docs/source_zh_cn/_static/logo_source.png and b/docs/source_zh_cn/_static/logo_source.png differ diff --git a/docs/source_zh_cn/constraints_on_network_construction.md b/docs/source_zh_cn/constraints_on_network_construction.md index 8b352b2625d65ebdb811e75a84caddc9a71f0b78..75deef1f664fdbe0a8befc405955d66d04935113 100644 --- a/docs/source_zh_cn/constraints_on_network_construction.md +++ b/docs/source_zh_cn/constraints_on_network_construction.md @@ -225,40 +225,67 @@ tuple也支持切片取值操作, 但不支持切片类型为Tensor类型,支 | `Cell`实例的成员函数 | Cell的construct中可以调用其他类成员函数。 | 函数 | 自定义Python函数、前文中列举的系统函数。 | dataclass实例 | 使用@dataclass装饰的类。 -| Primitive算子 |[mindspore/ops/operations/*](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html) -| Composite算子 |[mindspore/ops/composite/*](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.composite.html) +| Primitive算子 |[mindspore/ops/operations/*](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html) +| Composite算子 |[mindspore/ops/composite/*](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html) | constexpr生成算子 |使用[@constexpr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.constexpr)生成的值计算算子。 ### 其他约束 -整网construct函数输入的参数以及使用ms_function装饰器修饰的函数的参数在图编译过程中会进行泛化,不能作为常量输入传给算子使用。所以,在图模式下,限制入口网络的参数只能是Tensor,如下例所示: -* 错误的写法如下: - ```python - class ExpandDimsTest(Cell): +1. 整网`construct`函数输入的参数以及使用`ms_function`装饰器修饰的函数的参数在图编译过程中会进行泛化,不能作为常量输入传给算子使用。所以,在图模式下,限制入口网络的参数只能是`Tensor`,如下例所示: + + * 错误的写法如下: + ```python + class ExpandDimsTest(Cell): + def __init__(self): + super(ExpandDimsTest, self).__init__() + self.expandDims = P.ExpandDims() + + def construct(self, input_x, input_axis): + return self.expandDims(input_x, input_axis) + expand_dim = ExpandDimsTest() + input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) + expand_dim(input_x, 0) + ``` + 在示例中,`ExpandDimsTest`是一个只有单算子的网络,网络的输入有`input_x`和`input_axis`两个。因为`ExpandDims`算子的第二个输入需要是常量,这是因为在图编译过程中推导`ExpandDims`算子输出维度的时候需要用到,而`input_axis`作为网络参数输入会泛化成变量,无法确定其值,从而无法推导算子的输出维度导致图编译失败。所以在图编译阶段需要值推导的输入都应该是常量输入。在API中,这类算子需要常量输入的参数会进行说明,标注"constant input is needed"。 + + * 正确的写法是在construct函数里面对算子的常量输入直接填入需要的值或者是一个类的成员变量,如下: + ```python + class ExpandDimsTest(Cell): + def __init__(self, axis): + super(ExpandDimsTest, self).__init__() + self.expandDims = P.ExpandDims() + self.axis = axis + + def construct(self, input_x): + return self.expandDims(input_x, self.axis) + axis = 0 + expand_dim = ExpandDimsTest(axis) + input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) + expand_dim(input_x) + ``` + +2. 不允许修改网络的非`Parameter`类型数据成员。示例如下: + + ``` + class Net(Cell): def __init__(self): - super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() - - def construct(self, input_x, input_axis): - return self.expandDims(input_x, input_axis) - expand_dim = ExpandDimsTest() - input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) - expand_dim(input_x, 0) + super(Net, self).__init__() + self.num = 2 + self.par = Parameter(Tensor(np.ones((2, 3, 4))), name="par") + + def construct(self, x, y): + return x + y ``` - 在示例中,ExpandDimsTest是一个只有单算子的网络,网络的输入有input_x和input_axis两个。因为ExpandDims算子的第二个输入需要是常量,这是因为在图编译过程中推导ExpandDims算子输出维度的时候需要用到,而input_axis作为网络参数输入会泛化成变量,无法确定其值,从而无法推导算子的输出维度导致图编译失败。所以在图编译阶段需要值推导的输入都应该是常量输入。在API中,这类算子需要常量输入的参数会进行说明,标注"constant input is needed"。 + 上面所定义的网络里,`self.num`不是一个`Parameter`,不允许被修改,而`self.par`是一个`Parameter`,可以被修改。 -* 正确的写法是在construct函数里面对算子的常量输入直接填入需要的值或者是一个类的成员变量,如下: - ```python - class ExpandDimsTest(Cell): - def __init__(self, axis): - super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() - self.axis = axis - - def construct(self, input_x): - return self.expandDims(input_x, self.axis) - axis = 0 - expand_dim = ExpandDimsTest(axis) - input_x = Tensor(np.random.randn(2,2,2,2).astype(np.float32)) - expand_dim(input_x) +3. 当`construct`函数里,使用未定义的类成员时,不会像Python解释器那样抛出`AttributeError`,而是作为`None`处理。示例如下: ``` + class Net(Cell): + def __init__(self): + super(Net, self).__init__() + + def construct(self, x): + return x + self.y + ``` + 上面所定义的网络里,`construct`里使用了并未定义的类成员`self.y`,此时会将`self.y`作为`None`处理。 + diff --git a/docs/source_zh_cn/design/mindarmour/fuzzer_design.md b/docs/source_zh_cn/design/mindarmour/fuzzer_design.md index 81a730c30a9ff3804fea82b355544b9894bfa8c1..129496aaa2b874ca4acab1ca69663b5312248500 100644 --- a/docs/source_zh_cn/design/mindarmour/fuzzer_design.md +++ b/docs/source_zh_cn/design/mindarmour/fuzzer_design.md @@ -6,8 +6,8 @@ - [AI模型安全测试](#ai模型安全测试) - [背景](#背景) - - [Fuzzer设计图](#Fuzzer设计图) - - [Fuzzer流程](#Fuzzer流程) + - [Fuzz Testing设计图](#fuzz-testing设计图) + - [Fuzz Testing流程](#Fuzz-testing流程) - [代码实现](#代码实现) - [参考文献](#参考文献) @@ -17,9 +17,9 @@ ## 背景 -不同于[传统程序的Fuzz安全测试](https://zhuanlan.zhihu.com/p/43432370),MindArmour针对深度神经网络,提供AI模型安全测试模块Fuzzer。根据神经网络的特点,引入神经元覆盖率[1]的概念,作为Fuzz的测试指导,引导Fuzz朝神经元覆盖率增加的方向生成样本,让输入能够激活更多的神经元,神经元值的分布范围更广,以充分测试DNN,探索不同类型的模型输出结果、模型错误行为。 +不同于[传统程序的Fuzz安全测试](https://zhuanlan.zhihu.com/p/43432370),MindArmour针对深度神经网络,提供AI模型安全测试模块fuzz_testing。根据神经网络的特点,引入神经元覆盖率[1]的概念,作为Fuzz的测试指导,引导Fuzz朝神经元覆盖率增加的方向生成样本,让输入能够激活更多的神经元,神经元值的分布范围更广,以充分测试DNN,探索不同类型的模型输出结果、模型错误行为。 -## Fuzzer设计图 +## Fuzz Testing设计图 AI模型安全测试设计图如下。 @@ -27,7 +27,7 @@ AI模型安全测试设计图如下。 在用户接口层,需要用户提供原始数据集`DataSet`、被测试模型`Model`和配置Fuzzer参数`Fuzzer configuration`。Fuzzer模块对模型和数据进行Fuzz测试后,返回安全评估报告`Security Report`。 -Fuzzer架构主要包括三个模块: +Fuzz Testing架构主要包括三个模块: 1. Natural Threat/Adversarial Example Generator(数据变异模块): @@ -43,17 +43,17 @@ Fuzzer架构主要包括三个模块: 3. Evaluation(评估模块): - 评估Fuzzer效果,生成数据的质量,变异方法的强度。支持3个类型5种指标,包括通用评价指标:accuracy,神经元覆盖率指标:kmnc, nbc,snac,对抗攻击评价指标:attack_success_rate。 + 评估Fuzz Testing的效果,生成数据的质量,变异方法的强度。支持3个类型5种指标,包括通用评价指标:accuracy,神经元覆盖率指标:kmnc, nbc,snac,对抗攻击评价指标:attack_success_rate。 -## Fuzzer流程 +## Fuzz Testing流程 ![fuzz_process](./images/fuzz_process.png) -具体的Fuzzer流程如下: +具体的Fuzz Testing流程如下: 1. 根据策略从种子队列中选择一个种子A。 2. 随机选择变异策略,对种子A进行变异,生成多个变种数据A1,A2... -3. 用目标模型对变种A1,A2...进行预测,如果变种使得目标模型预测错误,则改变种进入Failed tests。 +3. 用目标模型对变种A1,A2...进行预测,如果变种的语意与种子保持一致,则进入Fuzzed Tests。 4. 若目标模型对于变种的预测结果是正确的,用神经元覆盖率指标进行分析。 5. 如果变种使得覆盖率增加,那么将该变种放入种子队列,用于下一轮变异。 diff --git a/docs/source_zh_cn/design/mindinsight/images/graph_visual_main.png b/docs/source_zh_cn/design/mindinsight/images/graph_visual_main.png index 55ca7d7183c818a15b69a3a6ee2c4ef29655460c..0bc13636b5c84952978469c652c38500e6d34f43 100644 Binary files a/docs/source_zh_cn/design/mindinsight/images/graph_visual_main.png and b/docs/source_zh_cn/design/mindinsight/images/graph_visual_main.png differ diff --git a/docs/source_zh_cn/design/mindinsight/images/graph_visual_right_side.png b/docs/source_zh_cn/design/mindinsight/images/graph_visual_right_side.png index ea9515857e23d9a55ad56a88a4a21d232734ffb5..1cfab2911877ed6a51097f0e7bac880479143e26 100644 Binary files a/docs/source_zh_cn/design/mindinsight/images/graph_visual_right_side.png and b/docs/source_zh_cn/design/mindinsight/images/graph_visual_right_side.png differ diff --git a/docs/source_zh_cn/design/mindinsight/images/tensor_table.png b/docs/source_zh_cn/design/mindinsight/images/tensor_table.png index 725bd9f8481826d682b593c2224a766854e9b4f8..d04dffae59fd6f9e49aede94bae93f8b8621fcb0 100644 Binary files a/docs/source_zh_cn/design/mindinsight/images/tensor_table.png and b/docs/source_zh_cn/design/mindinsight/images/tensor_table.png differ diff --git a/docs/source_zh_cn/design/mindinsight/tensor_visual_design.md b/docs/source_zh_cn/design/mindinsight/tensor_visual_design.md index e3af486df552ed81c7f4e4b6fab8bf680c0b2687..e70db7a709cad527a0f67fed78f2b8d75d075250 100644 --- a/docs/source_zh_cn/design/mindinsight/tensor_visual_design.md +++ b/docs/source_zh_cn/design/mindinsight/tensor_visual_design.md @@ -44,7 +44,7 @@ Tensor可视支持1-N维的Tensor以表格或直方图的形式展示,对于0 图1将用户所记录的张量以表格的形式展示,包含以下功能: -- 表格中白色方框显示当前展示的是哪个维度下的张量数据,其中冒号`:`表示当前维度的所有值,可以在方框输入对应的索引(和Python的索引含义一致,支持负值)或者`:`来查询特定维度的张量数据。 +- 表格中白色方框显示当前展示的是哪个维度下的张量数据,其中冒号`:`表示当前维度索引范围,和Python索引含义基本一致,不指定具体索引表示当前维度所有值,`2:5`表示索引2到5(不包括5)的值,可以在方框输入对应的索引或者含有`:`的索引范围来查询特定维度的张量数据。 - 拖拽表格下方的空心圆圈可以查询特定步骤的张量数据。 ![tensor_histogram.png](./images/tensor_histogram.png) diff --git a/docs/source_zh_cn/design/mindspore/distributed_training_design.md b/docs/source_zh_cn/design/mindspore/distributed_training_design.md index 3a73eb8ff66d49500707f959e32d87fb926bf85c..1273d4bfb43a7b7f0e56746cb206d7a24bb34692 100644 --- a/docs/source_zh_cn/design/mindspore/distributed_training_design.md +++ b/docs/source_zh_cn/design/mindspore/distributed_training_design.md @@ -47,19 +47,19 @@ 每次开始进行并行训练前,通过调用`mindspore.communication.init`接口初始化通信资源,并自动创建全局通信组`WORLD_COMM_GROUP`。 -2. 数据分发 +2. 数据分发(Data distribution) 数据并行的核心在于将数据集在样本维度拆分并下发到不同的卡上。在`mindspore.dataset`模块提供的所有数据集加载接口中都有`num_shards`和`shard_id`两个参数,它们用于将数据集拆分为多份并循环采样的方式,采集`batch`大小的数据到各自的卡上,当出现数据量不足的情况时将会从头开始采样。 3. 网络构图 - 数据并行网络的书写方式与单机网络没有差别,这是因为在正反向传播过程中各卡的模型间是独立执行的,只是保持了相同的网络结构。唯一需要特别注意的是为了保证各卡间训练同步,相应的网络参数初始化值应当是一致的,这里建议通过`numpy.random.seed`在每张卡上设置相同的随机数种子达到模型广播的目的。 + 数据并行网络的书写方式与单机网络没有差别,这是因为在正反向传播(Forward propogation & Backword Propogation)过程中各卡的模型间是独立执行的,只是保持了相同的网络结构。唯一需要特别注意的是为了保证各卡间训练同步,相应的网络参数初始化值应当是一致的,这里建议通过`numpy.random.seed`在每张卡上设置相同的随机数种子达到模型广播的目的。 -4. 梯度聚合 +4. 梯度聚合(Gradient aggregation) 数据并行理论上应该实现和单机一致的训练效果,为了保证计算逻辑的一致性,在梯度计算完成后插入`AllReduce`算子实现各卡间的梯度聚合操作。这里我们设置了`mean`开关,用户可以选择是否要对求和后的梯度值进行求平均操作,也可以将其视为超参项,打开开关等价于学习率倍数缩小。 -5. 参数更新 +5. 参数更新(Parameter update) 因为引入了梯度聚合操作,所以各卡的模型会以相同的梯度值一起进入参数更新步骤。因此MindSpore实现的是一种同步数据并行训练方式。理论上最终每卡训练出来的模型是相同的,如果网络中含有在样本维度的归约类型操作,网络的输出可能会有所差别,这是由数据并行的切分性质决定的。 diff --git a/docs/source_zh_cn/design/mindspore/images/data_parallel.png b/docs/source_zh_cn/design/mindspore/images/data_parallel.png index a926948143fbdfbe323fe661672c0aad824459a0..a92c82aa64615b398e83b9bc2cf0aa2c5db9f904 100644 Binary files a/docs/source_zh_cn/design/mindspore/images/data_parallel.png and b/docs/source_zh_cn/design/mindspore/images/data_parallel.png differ diff --git a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution1.png b/docs/source_zh_cn/design/mindspore/images/tensor_redistribution1.png index 2220231387851241c5fa8d514aff00c0f4e3cc49..ed4d79416a0a07f8d75e738aa544d214834ae778 100644 Binary files a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution1.png and b/docs/source_zh_cn/design/mindspore/images/tensor_redistribution1.png differ diff --git a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution2.png b/docs/source_zh_cn/design/mindspore/images/tensor_redistribution2.png index 1261cdc28b2f8c3a6f0ccba9adb96e9d0fb5bcfa..114f984c66ae578722dbcdbb59ab03c44dbcb097 100644 Binary files a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution2.png and b/docs/source_zh_cn/design/mindspore/images/tensor_redistribution2.png differ diff --git a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution3.png b/docs/source_zh_cn/design/mindspore/images/tensor_redistribution3.png index 70eafae423d9836480a801b6519f85d892bbf19c..dd66c9120615f50f2b3f60cfe139954cb4adf307 100644 Binary files a/docs/source_zh_cn/design/mindspore/images/tensor_redistribution3.png and b/docs/source_zh_cn/design/mindspore/images/tensor_redistribution3.png differ diff --git a/docs/source_zh_cn/design/mindspore/ir.md b/docs/source_zh_cn/design/mindspore/ir.md index 77bc45014d5301fa6f76a9505ce38a491c09b9b4..362544c7f113919404560d8b52b838d632eda6e5 100644 --- a/docs/source_zh_cn/design/mindspore/ir.md +++ b/docs/source_zh_cn/design/mindspore/ir.md @@ -1,6 +1,6 @@ # MindSpore IR(MindIR) -`Linux` `框架开发` `中级` `高级` `贡献者` +`Linux` `Windows` `框架开发` `中级` `高级` `贡献者` diff --git a/docs/source_zh_cn/glossary.md b/docs/source_zh_cn/glossary.md index 647c9076f97496a863d4f9ba88e06df4f2beb908..c5721652b0e701aa8e989eea346e307b2c58fd72 100644 --- a/docs/source_zh_cn/glossary.md +++ b/docs/source_zh_cn/glossary.md @@ -32,9 +32,10 @@ | LSTM | Long short-term memory,长短期记忆,对应的网络是一种时间循环神经网络,适合于处理和预测时间序列中间隔和延迟非常长的重要事件。 | | Manifest | 一种数据格式文件,华为ModelArts采用了该格式,详细说明请参见。 | | ME | Mind Expression,MindSpore前端,主要完成从用户源码到计算图的编译任务、训练中控制执行及上下文维护(非下沉模式配置下)、动态图(PyNative模式)等。 | -| MindArmour | MindSpore安全组件,用于AI对抗样本管理,AI模型防攻击和增强,AI模型健壮性评测。 | +| MindArmour | MindSpore安全模块,通过差分隐私、对抗性攻防等技术手段,提升模型的保密性、完整性和可用性,阻止攻击者对模型进行恶意修改或是破解模型的内部构件,窃取模型的参数。 | | MindData | MindSpore数据框架,提供数据加载、增强、数据集管理以及可视化。 | | MindInsight | MindSpore可视化组件,可视化标量、图像、计算图以及模型超参等信息。 | +| MindRecord | MindSpore定义的一种数据格式,是一个执行读取、写入、搜索和转换MindSpore格式数据集的模块。 | | MindSpore | 华为主导开源的深度学习框架。 | | MindSpore Lite | 一个轻量级的深度神经网络推理引擎,提供了将MindSpore训练出的模型在端侧进行推理的功能。 | | MNIST database | Modified National Institute of Standards and Technology database,一个大型手写数字数据库,通常用于训练各种图像处理系统。 | @@ -43,5 +44,5 @@ | ResNet-50 | Residual Neural Network 50,由微软研究院的Kaiming He等四名华人提出的残差神经网络。 | | Schema | 数据集结构定义文件,用于定义数据集包含哪些字段以及字段的类型。 | | Summary | 是对网络中Tensor取值进行监测的一种算子,在图中是“外围”操作,不影响数据流本身。 | -| TBE | Tensor Boost Engine,在TVM( Tensor Virtual Machine )框架基础上扩展的算子开发工具。 | +| TBE | Tensor Boost Engine,华为自研的NPU算子开发工具,在TVM( Tensor Virtual Machine )框架基础上扩展,提供了一套Python API来实施开发活动,进行自定义算子开发。 | | TFRecord | Tensorflow定义的数据格式。 | diff --git a/docs/source_zh_cn/network_list.md b/docs/source_zh_cn/network_list.md index 351a5223c2083d20655f4eac118991dd08da7400..7364a1223b2bec9004bda1fcd75eae22c615475d 100644 --- a/docs/source_zh_cn/network_list.md +++ b/docs/source_zh_cn/network_list.md @@ -6,7 +6,6 @@ - [网络支持](#网络支持) - [Model Zoo](#model-zoo) - - [预训练模型](#预训练模型) @@ -14,47 +13,33 @@ ## Model Zoo -| 领域 | 子领域 | 网络 | Ascend | GPU | CPU -|:---- |:------- |:---- |:---- |:---- |:---- -|计算机视觉(CV) | 图像分类(Image Classification) | [AlexNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/alexnet/src/alexnet.py) | Supported | Supported | Doing -| 计算机视觉(CV) | 图像分类(Image Classification) | [GoogleNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/src/googlenet.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 图像分类(Image Classification) | [LeNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/src/lenet.py) | Supported | Supported | Supported -| 计算机视觉(CV) | 图像分类(Image Classification) | [ResNet-50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Supported | Doing -|计算机视觉(CV) | 图像分类(Image Classification) | [ResNet-101](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported |Doing | Doing -|计算机视觉(CV) | 图像分类(Image Classification) | [SE-ResNet50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported |Doing | Doing -|计算机视觉(CV) | 图像分类(Image Classification) | [ResNext50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnext50/src/image_classification.py) | Supported | Supported | Doing -| 计算机视觉(CV) | 图像分类(Image Classification) | [VGG16](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/vgg16/src/vgg.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 图像分类(Image Classification) | [InceptionV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/inceptionv3/src/inception_v3.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 移动端图像分类(Mobile Image Classification)
目标检测(Image Classification)
语义分割(Semantic Tegmentation) | [MobileNetV2](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py) | Supported | Supported | Doing -| 计算机视觉(CV) | 移动端图像分类(Mobile Image Classification)
目标检测(Image Classification)
语义分割(Semantic Tegmentation) | [MobileNetV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py) | Doing | Supported | Doing -|计算机视觉(CV) | 目标检测(Targets Detection) | [SSD](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/ssd/src/ssd.py) | Supported |Doing | Doing -| 计算机视觉(CV) | 目标检测(Targets Detection) | [YoloV3-ResNet18](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 目标检测(Targets Detection) | [YoloV3-DarkNet53](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_darknet53/src/yolo.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 目标检测(Targets Detection) | [FasterRCNN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/faster_rcnn_r50.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 语义分割(Semantic Segmentation) | [DeeplabV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/deeplabv3/src/deeplabv3.py) | Supported | Doing | Doing -| 计算机视觉(CV) | 目标检测(Targets Detection) | [WarpCTC](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/warpctc/src/warpctc.py) | Doing | Supported | Doing -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [BERT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py) | Supported | Doing | Doing -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [Transformer](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/transformer/src/transformer_model.py) | Supported | Doing | Doing -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [SentimentNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/lstm/src/lstm.py) | Doing | Supported | Supported -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [MASS](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py) | Supported | Doing | Doing -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [TinyBert](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/tinybert/src/tinybert_model.py) | Supported | Supported | Doing -| 推荐(Recommender) | 推荐系统、点击率预估(Recommender System, CTR prediction) | [DeepFM](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/deepfm/src/deepfm.py) | Supported | Supported | Doing -| 推荐(Recommender) | 推荐系统、搜索、排序(Recommender System, Search ranking) | [Wide&Deep](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py) | Supported | Supported | Doing -| 图神经网络(GNN) | 文本分类(Text Classification) | [GCN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gcn/src/gcn.py) | Supported | Doing | Doing -| 图神经网络(GNN) | 文本分类(Text Classification) | [GAT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gat/src/gat.py) | Supported | Doing | Doing +| 领域 | 子领域 | 网络 | Ascend(Graph) | Ascend(PyNative) | GPU(Graph) | GPU(PyNative) | CPU(Graph) +|:---- |:------- |:---- |:---- |:---- |:---- |:---- |:---- +|计算机视觉(CV) | 图像分类(Image Classification) | [AlexNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/alexnet/src/alexnet.py) | Supported | Supported | Supported | Supported | Doing +| 计算机视觉(CV) | 图像分类(Image Classification) | [GoogleNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/src/googlenet.py) | Supported | Supported | Supported | Supported | Doing +| 计算机视觉(CV) | 图像分类(Image Classification) | [LeNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/src/lenet.py) | Supported | Supported | Supported | Supported | Supported +| 计算机视觉(CV) | 图像分类(Image Classification) | [ResNet-50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Supported | Supported | Supported | Doing +|计算机视觉(CV) | 图像分类(Image Classification) | [ResNet-101](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Supported |Supported | Supported | Doing +|计算机视觉(CV) | 图像分类(Image Classification) | [SE-ResNet50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) | Supported | Doing |Doing | Doing | Doing +|计算机视觉(CV) | 图像分类(Image Classification) | [ResNext50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnext50/src/image_classification.py) | Supported | Supported | Supported | Supported | Doing +| 计算机视觉(CV) | 图像分类(Image Classification) | [VGG16](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/vgg16/src/vgg.py) | Supported | Supported | Supported | Supported | Doing +| 计算机视觉(CV) | 图像分类(Image Classification) | [InceptionV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/inceptionv3/src/inception_v3.py) | Supported | Supported | Supported | Supported | Doing +| 计算机视觉(CV) | 移动端图像分类(Mobile Image Classification)
目标检测(Image Classification)
语义分割(Semantic Tegmentation) | [MobileNetV2](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py) | Supported | Supported | Supported | Supported | Doing +| 计算机视觉(CV) | 移动端图像分类(Mobile Image Classification)
目标检测(Image Classification)
语义分割(Semantic Tegmentation) | [MobileNetV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py) | Doing | Doing | Supported | Supported | Doing +|计算机视觉(CV) | 目标检测(Object Detection) | [SSD](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/ssd/src/ssd.py) | Supported | Supported |Doing | Doing | Doing +| 计算机视觉(CV) | 目标检测(Object Detection) | [YoloV3-ResNet18](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py) | Supported | Doing | Doing | Doing | Doing +| 计算机视觉(CV) | 目标检测(Object Detection) | [YoloV3-DarkNet53](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/yolov3_darknet53/src/yolo.py) | Supported | Doing | Doing | Doing | Doing +| 计算机视觉(CV) | 目标检测(Object Detection) | [FasterRCNN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/faster_rcnn_r50.py) | Supported | Doing | Doing | Doing | Doing +| 计算机视觉(CV) | 目标检测(Object Detection) | [WarpCTC](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/warpctc/src/warpctc.py) | Doing | Doing | Supported | Supported | Doing +| 计算机视觉(CV) | 语义分割(Semantic Segmentation) | [DeeplabV3](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/deeplabv3/src/nets/deeplab_v3/deeplab_v3.py) | Supported | Supported | Doing | Doing | Doing +| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [BERT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py) | Supported | Supported | Supported | Supported | Doing +| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [Transformer](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/transformer/src/transformer_model.py) | Supported | Doing | Doing | Doing | Doing +| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [SentimentNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/lstm/src/lstm.py) | Doing | Doing | Supported | Supported | Supported +| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [MASS](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py) | Supported | Supported | Doing | Doing | Doing +| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding) | [TinyBert](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/tinybert/src/tinybert_model.py) | Supported | Supported | Supported | Doing | Doing +| 推荐(Recommender) | 推荐系统、点击率预估(Recommender System, CTR prediction) | [DeepFM](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/deepfm/src/deepfm.py) | Supported | Supported | Supported | Doing | Doing +| 推荐(Recommender) | 推荐系统、搜索、排序(Recommender System, Search ranking) | [Wide&Deep](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py) | Supported | Supported | Supported | Doing | Doing +| 图神经网络(GNN) | 文本分类(Text Classification) | [GCN](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gcn/src/gcn.py) | Supported | Doing | Doing | Doing | Doing +| 图神经网络(GNN) | 文本分类(Text Classification) | [GAT](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/gnn/gat/src/gat.py) | Supported | Doing | Doing | Doing | Doing > 你也可以使用 [MindWizard工具](https://gitee.com/mindspore/mindinsight/tree/master/mindinsight/wizard/) 快速生成经典网络脚本。 - -## 预训练模型 -*代表MindSpore已发布的版本号,支持网络训练的硬件平台有CPU、GPU和Ascend,以下表格中 ✓ 代表模型是基于选中的硬件平台训练而来。 - -| 领域 | 子领域 | 网络 |数据集 | CPU | GPU | Ascend | 0.5.0-beta* -|:---- |:----- |:---- |:---- |:---- |:---- |:---- |:------ -|计算机视觉(CV) | 图像分类(Image Classification) | [AlexNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/alexnet/src/alexnet.py) | CIFAR-10 | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/cv/alexnet/alexnet_ascend_0.5.0_cifar10_official_classification_20200716.tar.gz) -|计算机视觉(CV) | 图像分类(Image Classification)| [LeNet](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/src/lenet.py)| MNIST | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/cv/lenet/lenet_ascend_0.5.0_mnist_official_classification_20200716.tar.gz) -|计算机视觉(CV) | 图像分类(Image Classification)| [VGG16](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/vgg16/src/vgg.py)|CIFAR-10 | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/cv/vgg/vgg16_ascend_0.5.0_cifar10_official_classification_20200715.tar.gz) -|计算机视觉(CV) | 图像分类(Image Classification)| [ResNet-50](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py) |CIFAR-10 | | | ✓ |[下载](http://download.mindspore.cn/model_zoo/official/cv/resnet/resnet50_v1.5_ascend_0.3.0_cifar10_official_classification_20200718.tar.gz) -|计算机视觉(CV) | 目标检测(Targets Detection)| [YoloV3-DarkNet53](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/cv/yolov3_darknet53) |COCO 2014 | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/cv/yolo/yolov3_darknet53_ascend_0.5.0_coco2014_official_object_detection_20200717.tar.gz) -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding)| [BERT_Base](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py) | zhwiki | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/nlp/bert/bert_base_ascend_0.5.0_cn-wiki_official_nlp_20200720.tar.gz) -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding)| [BERT_NEZHA](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/bert/src/bert_model.py)| zhwiki | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/nlp/bert/bert_nezha_ascend_0.5.0_cn-wiki_official_nlp_20200720.tar.gz) -| 自然语言处理(NLP) | 自然语言理解(Natural Language Understanding)| [Transformer](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/nlp/transformer/src/transformer_model.py)|WMT English-German | | | ✓ | [下载](http://download.mindspore.cn/model_zoo/official/nlp/transformer/transformer_ascend_0.5.0_wmtende_official_machine_translation_20200713.tar.gz) diff --git a/docs/source_zh_cn/operator_list.md b/docs/source_zh_cn/operator_list.md index 016d4b5f8025ca4ffe97b5ff6a836b8c265c1f90..1bac1146d559988a5847e84b6909917586ce45f5 100644 --- a/docs/source_zh_cn/operator_list.md +++ b/docs/source_zh_cn/operator_list.md @@ -37,7 +37,7 @@ | [mindspore.nn.Flatten](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Flatten) |Supported | Supported | Supported |layer/basic | [mindspore.nn.Dense](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Dense) |Supported | Supported | Supported |layer/basic | [mindspore.nn.ClipByNorm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ClipByNorm) |Supported | Supported | Doing |layer/basic -| [mindspore.nn.Norm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Norm) |Doing | Supported | Doing |layer/basic +| [mindspore.nn.Norm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Norm) |Supported | Supported | Doing |layer/basic | [mindspore.nn.OneHot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.OneHot) | Supported | Supported | Supported |layer/basic | [mindspore.nn.Range](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Range) | Supported | Doing | Doing |layer/basic | [mindspore.nn.SequentialCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SequentialCell) |Supported | Supported | Doing |layer/container @@ -63,13 +63,23 @@ | [mindspore.nn.LinSpace](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.LinSpace) | Supported | Doing | Doing | layer/normalization | [mindspore.nn.MaxPool2d](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MaxPool2d) | Supported | Supported | Supported |layer/pooling | [mindspore.nn.AvgPool2d](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.AvgPool2d) | Supported | Supported | Doing |layer/pooling -| [mindspore.nn.DenseBnAct](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DenseBnAct) |Supported | Doing | Doing |layer/quant -| [mindspore.nn.Conv2dBnAct](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnAct) | Supported | Supported | Doing |layer/quant +| [mindspore.nn.DenseBnAct](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DenseBnAct) |Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dBnAct](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnAct) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.FakeQuantWithMinMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.FakeQuantWithMinMax) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dBnFoldQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnFoldQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dBnWithoutFoldQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dBnWithoutFoldQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.Conv2dQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Conv2dQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.DenseQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DenseQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.ActQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ActQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.LeakyReLUQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.LeakyReLUQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.HSwishQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.HSwishQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.HSigmoidQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.HSigmoidQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.TensorAddQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TensorAddQuant) | Supported | Supported | Supported |layer/quant +| [mindspore.nn.MulQuant](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MulQuant) | Supported | Supported | Supported |layer/quant | [mindspore.nn.L1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.L1Loss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.MSELoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MSELoss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SmoothL1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SmoothL1Loss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Supported |loss/loss -| [mindspore.nn.SoftmaxCrossEntropyExpand](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyExpand) | Supported |Supported | Doing |loss/loss | [mindspore.nn.CosineEmbeddingLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.CosineEmbeddingLoss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.ProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ProximalAdagrad) | Supported |Doing | Doing |optim/ProximalAdagrad | [mindspore.nn.LazyAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.LazyAdam) | Supported |Doing | Doing |optim/lazyadam @@ -84,300 +94,305 @@ | [mindspore.nn.WithLossCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.WithLossCell) | Supported | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.WithGradCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.WithGradCell) | Supported | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.TrainOneStepCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TrainOneStepCell) | Supported | Supported | Doing |wrap/cell_wrapper -| [mindspore.nn.DataWrapper](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DataWrapper) |Doing | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.GetNextSingleOp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.GetNextSingleOp) |Doing | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.WithEvalCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.WithEvalCell) | Supported | Supported | Doing |wrap/cell_wrapper | [mindspore.nn.ParameterUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ParameterUpdate) | Supported |Doing | Doing |wrap/cell_wrapper | [mindspore.nn.DistributedGradReducer](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DistributedGradReducer) | Supported |Doing | Doing |wrap/grad_reducer -| [mindspore.nn.DynamicLossScaleUpdateCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DynamicLossScaleUpdateCell) | Doing |Doing | Doing |wrap/loss_scale -| [mindspore.nn.FixedLossScaleUpdateCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.FixedLossScaleUpdateCell) | Doing |Doing | Doing |wrap/loss_scale -| [mindspore.nn.TrainOneStepWithLossScaleCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TrainOneStepWithLossScaleCell) | Doing |Doing | Doing |wrap/loss_scale +| [mindspore.nn.DynamicLossScaleUpdateCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.DynamicLossScaleUpdateCell) | Supported |Supported | Doing |wrap/loss_scale +| [mindspore.nn.FixedLossScaleUpdateCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.FixedLossScaleUpdateCell) | Supported |Supported | Doing |wrap/loss_scale +| [mindspore.nn.TrainOneStepWithLossScaleCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.TrainOneStepWithLossScaleCell) | Supported |Supported | Doing |wrap/loss_scale | [mindspore.nn.Cell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Cell) | Supported | Supported | Supported |cell +| [mindspore.nn.EmbeddingLookup](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.EmbeddingLookup) |Supported | Supported | Supported |layer/embedding +| [mindspore.nn.Pad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.Pad) |Supported | Supported | Doing |layer/basic ## mindspore.ops.operations | 操作名 | Ascend | GPU | CPU |算子类别 | :----------- |:------ |:------ |:-----|:--- -| [mindspore.ops.operations.Flatten](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Flatten) | Supported | Supported |Supported | nn_ops -| [mindspore.ops.operations.Softmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softmax) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.Acosh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Acosh) | Doing | Doing | Doing | nn_ops -| [mindspore.ops.operations.FloorMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorMod) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.Elu](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Elu) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.MirrorPad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MirrorPad) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.Unpack](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Unpack) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.Pack](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pack) | Supported| Doing | Doing | nn_ops -| [mindspore.ops.operations.L2Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.L2Loss) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.CTCLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CTCLoss) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.RNNTLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RNNTLoss) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.LogSoftmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogSoftmax) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.Softplus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softplus) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.ReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReLU) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.ReLU6](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReLU6) | Supported | Supported |Supported | nn_ops -| [mindspore.ops.operations.HSwish](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HSwish) | Doing | Supported |Doing | nn_ops -| [mindspore.ops.operations.HSigmoid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HSigmoid) | Doing | Supported |Doing | nn_ops -| [mindspore.ops.operations.Sigmoid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sigmoid) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.Tanh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tanh) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.BatchNorm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchNorm) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.LRN](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LRN) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.Conv2D](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2D) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.DepthwiseConv2dNative](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthwiseConv2dNative) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.DepthwiseConv2dNativeBackpropInput](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthwiseConv2dNativeBackpropInput) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.DepthwiseConv2dNativeiBackpropFilter](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthwiseConv2dNativeBackpropFilter) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.MaxPoolWithArgmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MaxPoolWithArgmax) | Supported | Doing |Doing | nn_ops -| [mindspore.ops.operations.MaxPool](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MaxPool) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.AvgPool](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AvgPool) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.Conv2DBackpropInput](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.TopK](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TopK) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops -| [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyPowerSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyPowerSign) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyGradientDescent) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyProximalGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalGradientDescent) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ApplyRMSProp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyRMSProp) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.ApplyCenteredRMSProp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyCenteredRMSProp) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.SparseApplyAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagrad) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.SparseApplyAdagradV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagradV2) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseProximalAdagrad) | Doing | Doing | Supported | nn_ops -| [mindspore.ops.operations.ApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.FusedSparseLazyAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseLazyAdam) | Doing | Doing | Supported | nn_ops -| [mindspore.ops.operations.FusedSparseAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseAdam) | Doing | Doing | Supported | nn_ops -| [mindspore.ops.operations.SmoothL1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SmoothL1Loss) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.SGD](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SGD) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.LayerNorm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LayerNorm) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.L2Normalize](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.L2Normalize) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.DropoutGenMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutGenMask) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.DropoutDoMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutDoMask) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.ResizeBilinear](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ResizeBilinear) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.OneHot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OneHot) | Supported | Supported | Supported | nn_ops -| [mindspore.ops.operations.Gelu](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Gelu) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.GetNext](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GetNext) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.PReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.PReLU) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.LSTM](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LSTM) | Doing | Supported | Supported | nn_ops -| [mindspore.ops.operations.BasicLSTMCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BasicLSTMCell) | Doing | Doing | Doing | nn_ops -| [mindspore.ops.operations.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SigmoidCrossEntropyWithLogits) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.Pad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pad) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.ROIAlign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ROIAlign) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.Adam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Adam) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.BinaryCrossEntropy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BinaryCrossEntropy) | Supported | Supported | Doing | nn_ops -| [mindspore.ops.operations.KLDivLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.KLDivLoss) | Doing | Supported | Doing | nn_ops -| [mindspore.ops.operations.LARSUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LARSUpdate) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.Softsign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softsign) | Supported | Doing | Doing | nn_ops -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.AssignAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignAdd) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ReduceMean](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMean) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.ReduceSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceSum) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.ReduceAll](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceAll) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ReduceMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMax) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.ReduceMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMin) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.ReduceProd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceProd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.CumProd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CumProd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.MatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MatMul) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.BatchMatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchMatMul) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.CumSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CumSum) | Supported | Supported| Doing | math_ops -| [mindspore.ops.operations.AddN](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AddN) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.Neg](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Neg) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Sub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sub) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.Mul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mul) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.Square](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Square) | Supported | Supported | Supported | math_ops -| [mindspore.ops.operations.SquareSumAll](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SquareSumAll) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Rsqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rsqrt) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Sqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sqrt) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Reciprocal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reciprocal) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Exp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Exp) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Log](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Log) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Log1p](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Log1p) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Minimum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Minimum) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Maximum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Maximum) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.RealDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RealDiv) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Div](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Div) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.DivNoNan](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DivNoNan) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.FloorDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorDiv) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Floor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Floor) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.EqualCount](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.EqualCount) | Doing | Supported | Supported | math_ops -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Greater](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Greater) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.GreaterEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GreaterEqual) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Less](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Less) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Atan2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Atan2) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.LessEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LessEqual) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.LogicalNot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalNot) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.LogicalAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalAnd) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.LogicalOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalOr) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.BitwiseAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseAnd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BitwiseOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseOr) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BitwiseXor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseXor) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Ceil](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Ceil) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Inv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Inv) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Invert](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Invert) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NPUAllocFloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NPUAllocFloatStatus) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NPUGetFloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NPUGetFloatStatus) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NPUClearFloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NPUClearFloatStatus) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.FloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloatStatus) | Doing | Supported | Doing | math_ops -| [mindspore.ops.operations.Cos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cos) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Cosh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cosh) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ACos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ACos) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BesselI0e](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BesselI0e) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.BesselI1e](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BesselI1e) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.TruncateDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateDiv) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.TruncateMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateMod) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Tan](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tan) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Asin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Asin) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Asinh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Asinh) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Erf](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Erf) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Erfc](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Erfc) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Sin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sin) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Sinh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sinh) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Expm1](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Expm1) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.NMSWithMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NMSWithMask) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Abs](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Abs) | Supported | Supported | Doing | math_ops -| [mindspore.ops.operations.Sign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sign) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Round](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Round) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ApproximateEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApproximateEqual) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.InplaceAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InplaceAdd) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.InplaceSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InplaceSub) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Mod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mod) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.ExpandDims](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ExpandDims) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.DType](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DType) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.SameTypeShape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SameTypeShape) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Cast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cast) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.IsSubClass](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IsSubClass) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.IsInstance](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IsInstance) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Reshape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reshape) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Shape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Shape) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Squeeze](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Squeeze) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.Transpose](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Transpose) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.GatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherV2) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Split](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Split) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.Rank](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rank) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.TruncatedNormal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncatedNormal) | Doing | Doing | Doing | array_ops -| [mindspore.ops.operations.Size](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Size) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Fill](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Fill) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.OnesLike](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OnesLike) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ZerosLike](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ZerosLike) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.TupleToArray](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TupleToArray) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ScalarToArray](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarToArray) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ScalarToTensor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarToTensor) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.InvertPermutation](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InvertPermutation) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Argmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Argmax) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Argmin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Argmin) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ArgMaxWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMaxWithValue) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ArgMinWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMinWithValue) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Tile](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tile) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.UnsortedSegmentSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UnsortedSegmentSum) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.UnsortedSegmentMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UnsortedSegmentMin) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.UnsortedSegmentProd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UnsortedSegmentProd) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Concat](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Concat) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ParallelConcat](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ParallelConcat) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Slice](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Slice) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Select](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Select) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.StridedSlice](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.StridedSlice) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.Diag](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Diag) | Doing | Doing | Doing | array_ops -| [mindspore.ops.operations.DiagPart](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DiagPart) | Doing | Doing | Doing | array_ops -| [mindspore.ops.operations.Eye](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Eye) | Supported | Supported | Supported | array_ops -| [mindspore.ops.operations.ScatterNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNd) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ResizeNearestNeighbor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ResizeNearestNeighbor) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.GatherNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherNd) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.ApplyFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyFtrl) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.SparseApplyFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrl) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.FusedSparseFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseFtrl) | Doing | Doing | Supported | array_ops -| [mindspore.ops.operations.SparseApplyFtrlV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrlV2) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNdUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdUpdate) | Supported | Doing | Supported | array_ops -| [mindspore.ops.operations.ScatterUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterUpdate) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMul) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterDiv) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.SpaceToDepth](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SpaceToDepth) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.DepthToSpace](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DepthToSpace) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.SpaceToBatch](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SpaceToBatch) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.SpaceToBatchND](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SpaceToBatchND) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.BatchToSpace](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchToSpace) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.BatchToSpaceND](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchToSpaceND) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.IsFinite](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IsFinite) | Supported | Supported | Doing | array_ops -| [mindspore.ops.operations.InplaceUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InplaceUpdate) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterSub) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMax) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMin) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNdAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdAdd) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNdSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdSub) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ScatterNonAliasingAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNonAliasingAdd) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.Rint](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rint) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ReverseV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReverseV2) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.ReduceOp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceOp) | Supported | Supported | Doing | comm_ops -| [mindspore.ops.operations.AllReduce](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AllReduce) | Supported | Supported | Doing | comm_ops -| [mindspore.ops.operations.AllGather](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AllGather) | Supported | Supported | Doing | comm_ops -| [mindspore.ops.operations.ReduceScatter](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceScatter) | Doing | Supported | Doing | comm_ops -| [mindspore.ops.operations.Broadcast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Broadcast) | Supported | Doing | Doing | comm_ops -| [mindspore.ops.operations.ControlDepend](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ControlDepend) | Supported | Supported | Supported | control_ops -| [mindspore.ops.operations.GeSwitch](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GeSwitch) | Doing | Doing | Doing | control_ops -| [mindspore.ops.operations.Merge](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Merge) | Doing | Doing | Doing | control_ops -| [mindspore.ops.operations.ScalarSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.ImageSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ImageSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.TensorSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.HistogramSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HistogramSummary) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.InsertGradientOf](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InsertGradientOf) | Supported | Supported | Supported | debug_ops -| [mindspore.ops.operations.Print](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Print) | Supported | Doing | Doing | debug_ops -| [mindspore.ops.operations.Assign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Assign) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.BoundingBoxEncode](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BoundingBoxEncode) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.BoundingBoxDecode](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BoundingBoxDecode) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.PopulationCount](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.PopulationCount) | Supported | Doing | Doing | other_ops -| [mindspore.ops.operations.CheckValid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CheckValid) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.IOU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.IOU) | Supported | Supported | Doing | other_ops -| [mindspore.ops.operations.MakeRefKey](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MakeRefKey) | Supported | Supported | Supported | other_ops -| [mindspore.ops.operations.InTopK](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.InTopK) | Supported | Doing | Doing | other_ops -| [mindspore.ops.operations.StandardNormal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.StandardNormal) | Supported | Supported | Doing | random_ops -| [mindspore.ops.operations.Gamma](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Gamma) | Supported | Doing | Doing | random_ops -| [mindspore.ops.operations.Poisson](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Poisson) | Supported | Doing | Doing | random_ops -| [mindspore.ops.operations.UniformInt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UniformInt) | Supported | Supported | Doing | random_ops -| [mindspore.ops.operations.UniformReal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.UniformReal) | Supported | Supported | Doing | random_ops -| [mindspore.ops.operations.RandomChoiceWithMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RandomChoiceWithMask) | Doing| Supported | Doing | random_ops -| [mindspore.ops.operations.RandomCategorical](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RandomCategorical) | Supported| Doing | Doing | random_ops -| [mindspore.ops.operations.ScalarCast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScalarCast) | Supported | Supported | Supported | inner_ops -| [mindspore.ops.operations.ReverseSequence](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReverseSequence) | Supported | Doing | Doing | array_ops -| [mindspore.ops.operations.CropAndResize](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.CropAndResize) | Supported | Doing | Doing | image_ops -| [mindspore.ops.operations.SquaredDifference](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SquaredDifference) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Xdivy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xdivy) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.Xlogy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xlogy) | Supported | Doing | Doing | math_ops -| [mindspore.ops.operations.HistogramFixedWidth](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.HistogramFixedWidth) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Flatten](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Flatten) | Supported | Supported |Supported | nn_ops +| [mindspore.ops.Softmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softmax) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.Acosh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Acosh) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.FloorMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorMod) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.Elu](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Elu) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.MirrorPad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MirrorPad) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.Unpack](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Unpack) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.Pack](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pack) | Supported| Doing | Doing | nn_ops +| [mindspore.ops.L2Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.L2Loss) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.CTCLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CTCLoss) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.RNNTLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RNNTLoss) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.LogSoftmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogSoftmax) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.Softplus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softplus) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.ReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLU) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.ReLU6](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLU6) | Supported | Supported |Supported | nn_ops +| [mindspore.ops.HSwish](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HSwish) | Doing | Supported |Doing | nn_ops +| [mindspore.ops.HSigmoid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HSigmoid) | Doing | Supported |Doing | nn_ops +| [mindspore.ops.Sigmoid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sigmoid) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.Tanh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tanh) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.BatchNorm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchNorm) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.LRN](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LRN) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.Conv2D](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Conv2D) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.DepthwiseConv2dNative](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthwiseConv2dNative) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.DepthwiseConv2dNativeBackpropInput](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthwiseConv2dNativeBackpropInput) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.DepthwiseConv2dNativeiBackpropFilter](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthwiseConv2dNativeBackpropFilter) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.MaxPoolWithArgmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MaxPoolWithArgmax) | Supported | Doing |Doing | nn_ops +| [mindspore.ops.MaxPool](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MaxPool) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.AvgPool](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AvgPool) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.Conv2DBackpropInput](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.BiasAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BiasAdd) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.TopK](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TopK) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops +| [mindspore.ops.ApplyMomentum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyMomentum) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.ApplyAddSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAddSign) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyPowerSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyPowerSign) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyGradientDescent) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyProximalGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalGradientDescent) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ApplyRMSProp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyRMSProp) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.ApplyCenteredRMSProp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyCenteredRMSProp) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.SparseApplyAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagrad) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.SparseApplyAdagradV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagradV2) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseProximalAdagrad) | Doing | Doing | Supported | nn_ops +| [mindspore.ops.ApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalAdagrad) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.FusedSparseLazyAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseLazyAdam) | Doing | Doing | Supported | nn_ops +| [mindspore.ops.FusedSparseAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseAdam) | Doing | Doing | Supported | nn_ops +| [mindspore.ops.SmoothL1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SmoothL1Loss) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.SGD](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SGD) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.LayerNorm](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LayerNorm) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.L2Normalize](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.L2Normalize) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.DropoutGenMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutGenMask) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.DropoutDoMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutDoMask) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.ResizeBilinear](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ResizeBilinear) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.OneHot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OneHot) | Supported | Supported | Supported | nn_ops +| [mindspore.ops.Gelu](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Gelu) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.GetNext](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GetNext) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.PReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.PReLU) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.LSTM](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LSTM) | Doing | Supported | Supported | nn_ops +| [mindspore.ops.BasicLSTMCell](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BasicLSTMCell) | Doing | Doing | Doing | nn_ops +| [mindspore.ops.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SigmoidCrossEntropyWithLogits) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.Pad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pad) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.ROIAlign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ROIAlign) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.Adam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Adam) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.BinaryCrossEntropy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BinaryCrossEntropy) | Supported | Supported | Doing | nn_ops +| [mindspore.ops.KLDivLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.KLDivLoss) | Doing | Supported | Doing | nn_ops +| [mindspore.ops.LARSUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LARSUpdate) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.Softsign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softsign) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) | Supported | Supported | Supported | math_ops +| [mindspore.ops.AssignAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignAdd) | Supported | Supported | Supported | math_ops +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ReduceMean](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMean) | Supported | Supported | Supported | math_ops +| [mindspore.ops.ReduceSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceSum) | Supported | Supported | Supported | math_ops +| [mindspore.ops.ReduceAll](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceAll) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ReduceMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMax) | Supported | Supported | Supported | math_ops +| [mindspore.ops.ReduceMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMin) | Supported | Supported | Doing | math_ops +| [mindspore.ops.ReduceProd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceProd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.CumProd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CumProd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.MatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MatMul) | Supported | Supported | Supported | math_ops +| [mindspore.ops.BatchMatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchMatMul) | Supported | Supported | Doing | math_ops +| [mindspore.ops.CumSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CumSum) | Supported | Supported| Doing | math_ops +| [mindspore.ops.AddN](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AddN) | Supported | Supported | Supported | math_ops +| [mindspore.ops.Neg](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Neg) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Sub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sub) | Supported | Supported | Supported | math_ops +| [mindspore.ops.Mul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mul) | Supported | Supported | Supported | math_ops +| [mindspore.ops.Square](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Square) | Supported | Supported | Supported | math_ops +| [mindspore.ops.SquareSumAll](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SquareSumAll) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Rsqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rsqrt) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Sqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sqrt) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Reciprocal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reciprocal) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Exp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Exp) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Log](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Log) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Log1p](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Log1p) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Minimum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Minimum) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Maximum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Maximum) | Supported | Supported | Doing | math_ops +| [mindspore.ops.RealDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RealDiv) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Div](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Div) | Supported | Supported | Doing | math_ops +| [mindspore.ops.DivNoNan](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DivNoNan) | Supported | Doing | Doing | math_ops +| [mindspore.ops.FloorDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorDiv) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Floor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Floor) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) | Supported | Supported | Doing | math_ops +| [mindspore.ops.EqualCount](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.EqualCount) | Doing | Supported | Supported | math_ops +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Greater](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Greater) | Supported | Supported | Doing | math_ops +| [mindspore.ops.GreaterEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GreaterEqual) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Less](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Less) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Atan2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Atan2) | Supported | Doing | Doing | math_ops +| [mindspore.ops.LessEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LessEqual) | Supported | Supported | Doing | math_ops +| [mindspore.ops.LogicalNot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalNot) | Supported | Supported | Doing | math_ops +| [mindspore.ops.LogicalAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalAnd) | Supported | Supported | Doing | math_ops +| [mindspore.ops.LogicalOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalOr) | Supported | Supported | Doing | math_ops +| [mindspore.ops.BitwiseAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseAnd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BitwiseOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseOr) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BitwiseXor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseXor) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Ceil](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Ceil) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Inv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Inv) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Invert](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Invert) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NPUAllocFloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NPUAllocFloatStatus) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NPUGetFloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NPUGetFloatStatus) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NPUClearFloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NPUClearFloatStatus) | Supported | Doing | Doing | math_ops +| [mindspore.ops.FloatStatus](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloatStatus) | Doing | Supported | Doing | math_ops +| [mindspore.ops.Cos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cos) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Cosh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cosh) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ACos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ACos) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BesselI0e](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BesselI0e) | Supported | Doing | Doing | math_ops +| [mindspore.ops.BesselI1e](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BesselI1e) | Supported | Doing | Doing | math_ops +| [mindspore.ops.TruncateDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateDiv) | Supported | Doing | Doing | math_ops +| [mindspore.ops.TruncateMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateMod) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Tan](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tan) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Asin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Asin) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Asinh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Asinh) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Erf](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Erf) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Erfc](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Erfc) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Sin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sin) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Sinh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sinh) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Expm1](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Expm1) | Supported | Doing | Doing | math_ops +| [mindspore.ops.NMSWithMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NMSWithMask) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Abs](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Abs) | Supported | Supported | Doing | math_ops +| [mindspore.ops.Sign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sign) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Round](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Round) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ApproximateEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApproximateEqual) | Supported | Doing | Doing | math_ops +| [mindspore.ops.InplaceAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InplaceAdd) | Supported | Doing | Doing | math_ops +| [mindspore.ops.InplaceSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InplaceSub) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Mod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mod) | Supported | Doing | Doing | math_ops +| [mindspore.ops.ExpandDims](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ExpandDims) | Supported | Supported | Supported | array_ops +| [mindspore.ops.DType](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DType) | Supported | Supported | Supported | array_ops +| [mindspore.ops.SameTypeShape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SameTypeShape) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Cast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cast) | Supported | Supported | Doing | array_ops +| [mindspore.ops.IsSubClass](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IsSubClass) | Supported | Supported | Supported | array_ops +| [mindspore.ops.IsInstance](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IsInstance) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Reshape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reshape) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Shape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Shape) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Squeeze](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Squeeze) | Supported | Supported | Doing | array_ops +| [mindspore.ops.Transpose](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Transpose) | Supported | Supported | Supported | array_ops +| [mindspore.ops.GatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherV2) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Split](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Split) | Supported | Supported | Doing | array_ops +| [mindspore.ops.Rank](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rank) | Supported | Supported | Supported | array_ops +| [mindspore.ops.TruncatedNormal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncatedNormal) | Doing | Doing | Doing | array_ops +| [mindspore.ops.Size](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Size) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Fill](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Fill) | Supported | Supported | Supported | array_ops +| [mindspore.ops.OnesLike](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OnesLike) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ZerosLike](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ZerosLike) | Supported | Supported | Doing | array_ops +| [mindspore.ops.TupleToArray](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TupleToArray) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ScalarToArray](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarToArray) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ScalarToTensor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarToTensor) | Supported | Supported | Supported | array_ops +| [mindspore.ops.InvertPermutation](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InvertPermutation) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Argmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Argmax) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Argmin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Argmin) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ArgMaxWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMaxWithValue) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ArgMinWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMinWithValue) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Tile](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tile) | Supported | Supported | Doing | array_ops +| [mindspore.ops.UnsortedSegmentSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UnsortedSegmentSum) | Supported | Supported | Doing | array_ops +| [mindspore.ops.UnsortedSegmentMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UnsortedSegmentMin) | Supported | Doing | Doing | array_ops +| [mindspore.ops.UnsortedSegmentProd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UnsortedSegmentProd) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Concat](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Concat) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ParallelConcat](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ParallelConcat) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Slice](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Slice) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Select](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Select) | Supported | Supported | Doing | array_ops +| [mindspore.ops.StridedSlice](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.StridedSlice) | Supported | Supported | Supported | array_ops +| [mindspore.ops.Diag](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Diag) | Doing | Doing | Doing | array_ops +| [mindspore.ops.DiagPart](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DiagPart) | Doing | Doing | Doing | array_ops +| [mindspore.ops.Eye](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Eye) | Supported | Supported | Supported | array_ops +| [mindspore.ops.ScatterNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNd) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ResizeNearestNeighbor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ResizeNearestNeighbor) | Supported | Supported | Doing | array_ops +| [mindspore.ops.GatherNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherNd) | Supported | Supported | Doing | array_ops +| [mindspore.ops.ApplyFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyFtrl) | Supported | Supported | Doing | array_ops +| [mindspore.ops.SparseApplyFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrl) | Supported | Doing | Doing | array_ops +| [mindspore.ops.FusedSparseFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseFtrl) | Doing | Doing | Supported | array_ops +| [mindspore.ops.SparseApplyFtrlV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrlV2) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNdUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdUpdate) | Supported | Doing | Supported | array_ops +| [mindspore.ops.ScatterUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterUpdate) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMul) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterDiv) | Supported | Doing | Doing | array_ops +| [mindspore.ops.SpaceToDepth](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SpaceToDepth) | Supported | Doing | Doing | array_ops +| [mindspore.ops.DepthToSpace](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DepthToSpace) | Supported | Doing | Doing | array_ops +| [mindspore.ops.SpaceToBatch](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SpaceToBatch) | Supported | Doing | Doing | array_ops +| [mindspore.ops.SpaceToBatchND](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SpaceToBatchND) | Supported | Doing | Doing | array_ops +| [mindspore.ops.BatchToSpace](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchToSpace) | Supported | Doing | Doing | array_ops +| [mindspore.ops.BatchToSpaceND](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchToSpaceND) | Supported | Doing | Doing | array_ops +| [mindspore.ops.IsFinite](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IsFinite) | Supported | Supported | Doing | array_ops +| [mindspore.ops.InplaceUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InplaceUpdate) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterSub) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMax) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMin) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNdAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdAdd) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNdSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdSub) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ScatterNonAliasingAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNonAliasingAdd) | Supported | Doing | Doing | array_ops +| [mindspore.ops.Rint](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rint) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ReverseV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReverseV2) | Supported | Doing | Doing | array_ops +| [mindspore.ops.ReduceOp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceOp) | Supported | Supported | Doing | comm_ops +| [mindspore.ops.AllReduce](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AllReduce) | Supported | Supported | Doing | comm_ops +| [mindspore.ops.AllGather](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AllGather) | Supported | Supported | Doing | comm_ops +| [mindspore.ops.ReduceScatter](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceScatter) | Doing | Supported | Doing | comm_ops +| [mindspore.ops.Broadcast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Broadcast) | Supported | Doing | Doing | comm_ops +| [mindspore.ops.ControlDepend](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ControlDepend) | Supported | Supported | Supported | control_ops +| [mindspore.ops.GeSwitch](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GeSwitch) | Doing | Doing | Doing | control_ops +| [mindspore.ops.Merge](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Merge) | Doing | Doing | Doing | control_ops +| [mindspore.ops.ScalarSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.ImageSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ImageSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.TensorSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.HistogramSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HistogramSummary) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.InsertGradientOf](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InsertGradientOf) | Supported | Supported | Supported | debug_ops +| [mindspore.ops.Print](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Print) | Supported | Doing | Doing | debug_ops +| [mindspore.ops.Assign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Assign) | Supported | Supported | Doing | other_ops +| [mindspore.ops.BoundingBoxEncode](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BoundingBoxEncode) | Supported | Supported | Doing | other_ops +| [mindspore.ops.BoundingBoxDecode](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BoundingBoxDecode) | Supported | Supported | Doing | other_ops +| [mindspore.ops.PopulationCount](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.PopulationCount) | Supported | Doing | Doing | other_ops +| [mindspore.ops.CheckValid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CheckValid) | Supported | Supported | Doing | other_ops +| [mindspore.ops.IOU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.IOU) | Supported | Supported | Doing | other_ops +| [mindspore.ops.MakeRefKey](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MakeRefKey) | Supported | Supported | Supported | other_ops +| [mindspore.ops.InTopK](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.InTopK) | Supported | Doing | Doing | other_ops +| [mindspore.ops.StandardNormal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.StandardNormal) | Supported | Supported | Doing | random_ops +| [mindspore.ops.Gamma](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Gamma) | Supported | Doing | Doing | random_ops +| [mindspore.ops.Poisson](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Poisson) | Supported | Doing | Doing | random_ops +| [mindspore.ops.UniformInt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UniformInt) | Supported | Supported | Doing | random_ops +| [mindspore.ops.UniformReal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.UniformReal) | Supported | Supported | Doing | random_ops +| [mindspore.ops.RandomChoiceWithMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RandomChoiceWithMask) | Doing| Supported | Doing | random_ops +| [mindspore.ops.RandomCategorical](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RandomCategorical) | Supported| Doing | Doing | random_ops +| [mindspore.ops.ScalarCast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScalarCast) | Supported | Supported | Supported | inner_ops +| [mindspore.ops.ReverseSequence](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReverseSequence) | Supported | Doing | Doing | array_ops +| [mindspore.ops.CropAndResize](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.CropAndResize) | Supported | Doing | Doing | image_ops +| [mindspore.ops.SquaredDifference](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SquaredDifference) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Xdivy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xdivy) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Xlogy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xlogy) | Supported | Doing | Doing | math_ops +| [mindspore.ops.HistogramFixedWidth](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.HistogramFixedWidth) | Supported | Doing | Doing | math_ops +| [mindspore.ops.Eps](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Eps) | Supported | Supported | Doing | math_ops +| [mindspore.ops.ReLUV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLUV2) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.BNTrainingReduce](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BNTrainingReduce) | Supported | Doing | Doing | nn_ops +| [mindspore.ops.BNTrainingUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BNTrainingUpdate) | Supported | Doing | Doing | nn_ops ## mindspore.ops.functional | 操作名 | 对应functional算子 | :----------- | :----------- -| [mindspore.ops.operations.Pack](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pack) | pack -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) | tensor_add -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) | assign_sub -| [mindspore.ops.operations.AddN](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AddN) | addn -| [mindspore.ops.operations.Square](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Square) | square -| [mindspore.ops.operations.Sqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sqrt) | sqrt -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) | equal -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) | not_equal -| [mindspore.ops.operations.LogicalNot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalNot) | logical_not -| [mindspore.ops.operations.LogicalAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalAnd) | logical_and -| [mindspore.ops.operations.LogicalOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalOr) | logical_or -| [mindspore.ops.operations.ExpandDims](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ExpandDims) | expand_dims -| [mindspore.ops.operations.DType](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DType) | dtype -| [mindspore.ops.operations.Cast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cast) | cast -| [mindspore.ops.operations.Reshape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reshape) | reshape -| [mindspore.ops.operations.Shape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Shape) | shape -| [mindspore.ops.operations.GatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherV2) | gather -| [mindspore.ops.operations.Rank](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Rank) | rank -| [mindspore.ops.operations.Size](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Size) | size -| [mindspore.ops.operations.Fill](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Fill) | fill -| [mindspore.ops.operations.OnesLike](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OnesLike) | ones_like -| [mindspore.ops.operations.Tile](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tile) | tile -| [mindspore.ops.operations.Select](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Select) | select -| [mindspore.ops.operations.ScatterNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNd) | scatter_nd -| [mindspore.ops.operations.GatherNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherNd) | gather_nd -| [mindspore.ops.operations.ControlDepend](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ControlDepend) | control_depend -| [mindspore.ops.operations.Print](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Print) | print -| [mindspore.ops.operations.Assign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Assign) | assign -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) | tensor_pow +| [mindspore.ops.Pack](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pack) | pack +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) | tensor_add +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) | assign_sub +| [mindspore.ops.AddN](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AddN) | addn +| [mindspore.ops.Square](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Square) | square +| [mindspore.ops.Sqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sqrt) | sqrt +| [mindspore.ops.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) | equal +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) | not_equal +| [mindspore.ops.LogicalNot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalNot) | logical_not +| [mindspore.ops.LogicalAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalAnd) | logical_and +| [mindspore.ops.LogicalOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalOr) | logical_or +| [mindspore.ops.ExpandDims](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ExpandDims) | expand_dims +| [mindspore.ops.DType](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DType) | dtype +| [mindspore.ops.Cast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cast) | cast +| [mindspore.ops.Reshape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reshape) | reshape +| [mindspore.ops.Shape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Shape) | shape +| [mindspore.ops.GatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherV2) | gather +| [mindspore.ops.Rank](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Rank) | rank +| [mindspore.ops.Size](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Size) | size +| [mindspore.ops.Fill](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Fill) | fill +| [mindspore.ops.OnesLike](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OnesLike) | ones_like +| [mindspore.ops.Tile](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tile) | tile +| [mindspore.ops.Select](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Select) | select +| [mindspore.ops.ScatterNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNd) | scatter_nd +| [mindspore.ops.GatherNd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherNd) | gather_nd +| [mindspore.ops.ControlDepend](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ControlDepend) | control_depend +| [mindspore.ops.Print](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Print) | print +| [mindspore.ops.Assign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Assign) | assign +| [mindspore.ops.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) | tensor_pow > 当前functional支持了一部分没有属性的算子,后续会进一步补齐完整。 @@ -385,62 +400,61 @@ | 操作名 | 约束 | :----------- | :----------- -| [mindspore.ops.operations.ACos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ACos) | None -| [mindspore.ops.operations.Cos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cos) | None -| [mindspore.ops.operations.LogicalNot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalNot) | None -| [mindspore.ops.operations.Log](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Log) | None -| [mindspore.ops.operations.Exp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Exp) | None -| [mindspore.ops.operations.LogSoftmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogSoftmax) | 输入(logits)在轴(axis)对应的维度不可切分,切分后,在数学逻辑上和单机不等价 -| [mindspore.ops.operations.Softmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Softmax) | 输入(logits)在轴(axis)对应的维度不可切分,切分后,在数学逻辑上和单机不等价 -| [mindspore.ops.operations.Tanh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tanh) | None -| [mindspore.ops.operations.Gelu](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Gelu) | None -| [mindspore.ops.operations.ReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReLU) | None -| [mindspore.ops.operations.Sqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sqrt) | None -| [mindspore.ops.operations.Cast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Cast) | None -| [mindspore.ops.operations.Neg](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Neg) | None -| [mindspore.ops.operations.ExpandDims](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ExpandDims) | None -| [mindspore.ops.operations.Squeeze](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Squeeze) | None -| [mindspore.ops.operations.Square](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Square) | None -| [mindspore.ops.operations.Sigmoid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sigmoid) | None -| [mindspore.ops.operations.Dropout](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Dropout) | 不支持重复计算 -| [mindspore.ops.operations.Div](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Div) | None -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) | None -| [mindspore.ops.operations.RealDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RealDiv) | None -| [mindspore.ops.operations.Mul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mul) | None -| [mindspore.ops.operations.Sub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sub) | None -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) | None -| [mindspore.ops.operations.FloorDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorDiv) | None -| [mindspore.ops.operations.Greater](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Greater) | None -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) | None -| [mindspore.ops.operations.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SigmoidCrossEntropyWithLogits) | None -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) | None -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) | None -| [mindspore.ops.operations.Maximum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Maximum) | None -| [mindspore.ops.operations.Minimum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Minimum) | None -| [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | None -| [mindspore.ops.operations.Concat](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Concat) | 输入(input_x)在轴(axis)所对应的维度不能切分,切分后,在数学逻辑上和单机不等价 -| [mindspore.ops.operations.DropoutGenMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutGenMask) | 需和`DropoutDoMask`联合使用 -| [mindspore.ops.operations.DropoutDoMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DropoutDoMask) | 需和`DropoutGenMask`联合使用,不支持配置切分策略 -| [mindspore.ops.operations.GatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GatherV2) | 仅支持1维和2维的input_params,并且input_params的最后一维要32字节对齐(出于性能考虑);不支持标量input_indices;参数在轴(axis)所在维度切分时,不支持重复计算;不支持input_indices和input_params同时进行切分 -| [mindspore.ops.operations.SparseGatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseGatherV2) | 同GatherV2 -| [mindspore.ops.operations.EmbeddingLookup](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.EmbeddingLookup) | 同GatherV2 -| [mindspore.ops.operations.L2Normalize](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.L2Normalize) | 输入(input_x)在轴(axis)对应的维度不能切,切分后,在数学逻辑上和单机不等价 -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | 输入(logits、labels)的最后一维不能切分;有两个输出,正向的loss只支持取[0] -| [mindspore.ops.operations.MatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.MatMul) | 不支持`transpose_a=True` -| [mindspore.ops.operations.BatchMatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BatchMatMul) | 不支持`transpore_a=True` -| [mindspore.ops.operations.PReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.PReLU) | 输入(input_x)的Channel维要和weight的切分方式一致 -| [mindspore.ops.operations.OneHot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.OneHot) | 仅支持输入(indices)是1维的Tensor -| [mindspore.ops.operations.ReduceSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceSum) | None -| [mindspore.ops.operations.ReduceMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMax) | None -| [mindspore.ops.operations.ReduceMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMin) | None -| [mindspore.ops.operations.ArgMinWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMinWithValue) | 第一个输出(index)不能作为其他算子的输入 -| [mindspore.ops.operations.ArgMaxWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ArgMaxWithValue) | 第一个输出(index)不能作为其他算子的输入 -| [mindspore.ops.operations.ReduceMean](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ReduceMean) | None -| [mindspore.ops.operations.Reshape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Reshape) | 不支持配置切分策略 -| [mindspore.ops.operations.StridedSlice](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.StridedSlice) | 仅支持值为全0的mask;需要切分的维度必须全部提取;输入在strides不为1对应的维度不支持切分 -| [mindspore.ops.operations.Tile](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Tile) | 仅支持对multiples配置切分策略 -| [mindspore.ops.operations.Transpose](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Transpose) | None -| [mindspore.ops.operations.Diag](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Diag) | 不支持配置切分策略 +| [mindspore.ops.ACos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ACos) | None +| [mindspore.ops.Cos](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cos) | None +| [mindspore.ops.LogicalNot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalNot) | None +| [mindspore.ops.Log](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Log) | None +| [mindspore.ops.Exp](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Exp) | None +| [mindspore.ops.LogSoftmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogSoftmax) | 输入(logits)在轴(axis)对应的维度不可切分,切分后,在数学逻辑上和单机不等价 +| [mindspore.ops.Softmax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Softmax) | 输入(logits)在轴(axis)对应的维度不可切分,切分后,在数学逻辑上和单机不等价 +| [mindspore.ops.Tanh](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tanh) | None +| [mindspore.ops.Gelu](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Gelu) | None +| [mindspore.ops.ReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReLU) | None +| [mindspore.ops.Sqrt](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sqrt) | None +| [mindspore.ops.Cast](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Cast) | None +| [mindspore.ops.Neg](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Neg) | None +| [mindspore.ops.ExpandDims](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ExpandDims) | None +| [mindspore.ops.Squeeze](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Squeeze) | None +| [mindspore.ops.Square](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Square) | None +| [mindspore.ops.Sigmoid](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sigmoid) | None +| [mindspore.ops.Dropout](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Dropout) | 不支持重复计算 +| [mindspore.ops.Div](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Div) | None +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) | None +| [mindspore.ops.RealDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RealDiv) | None +| [mindspore.ops.Mul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mul) | None +| [mindspore.ops.Sub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sub) | None +| [mindspore.ops.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) | None +| [mindspore.ops.FloorDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorDiv) | None +| [mindspore.ops.Greater](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Greater) | None +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) | None +| [mindspore.ops.SigmoidCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SigmoidCrossEntropyWithLogits) | None +| [mindspore.ops.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) | None +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) | None +| [mindspore.ops.Maximum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Maximum) | None +| [mindspore.ops.Minimum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Minimum) | None +| [mindspore.ops.BiasAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BiasAdd) | None +| [mindspore.ops.Concat](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Concat) | 输入(input_x)在轴(axis)所对应的维度不能切分,切分后,在数学逻辑上和单机不等价 +| [mindspore.ops.DropoutGenMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutGenMask) | 需和`DropoutDoMask`联合使用 +| [mindspore.ops.DropoutDoMask](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DropoutDoMask) | 需和`DropoutGenMask`联合使用,不支持配置切分策略 +| [mindspore.ops.GatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GatherV2) | 仅支持1维和2维的input_params,并且input_params的最后一维要32字节对齐(出于性能考虑);不支持标量input_indices;参数在轴(axis)所在维度切分时,不支持重复计算;不支持input_indices和input_params同时进行切分 +| [mindspore.ops.SparseGatherV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseGatherV2) | 同GatherV2 +| [mindspore.ops.EmbeddingLookup](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.EmbeddingLookup) | 同GatherV2 +| [mindspore.ops.L2Normalize](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.L2Normalize) | 输入(input_x)在轴(axis)对应的维度不能切,切分后,在数学逻辑上和单机不等价 +| [mindspore.ops.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SoftmaxCrossEntropyWithLogits) | 输入(logits、labels)的最后一维不能切分;有两个输出,正向的loss只支持取[0] +| [mindspore.ops.MatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.MatMul) | 不支持`transpose_a=True` +| [mindspore.ops.BatchMatMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BatchMatMul) | 不支持`transpore_a=True` +| [mindspore.ops.PReLU](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.PReLU) | weight的shape在非[1]的情况下,输入(input_x)的Channel维要和weight的切分方式一致 +| [mindspore.ops.OneHot](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.OneHot) | 仅支持输入(indices)是1维的Tensor,切分策略要配置输出的切分策略,以及第1和第2个输入的切分策略 +| [mindspore.ops.ReduceSum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceSum) | None +| [mindspore.ops.ReduceMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMax) | 输入在轴(axis)的维度进行切分时,分布式结果可能会和单机不一致 +| [mindspore.ops.ReduceMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMin) | 输入在轴(axis)的维度进行切分时,分布式结果可能会和单机不一致 +| [mindspore.ops.ArgMinWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMinWithValue) | 第一个输出(index)不能作为其他算子的输入,输入在轴(axis)的维度进行切分时,分布式结果可能会和单机不一致 +| [mindspore.ops.ArgMaxWithValue](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ArgMaxWithValue) | 第一个输出(index)不能作为其他算子的输入,输入在轴(axis)的维度进行切分时,分布式结果可能会和单机不一致 +| [mindspore.ops.ReduceMean](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ReduceMean) | None +| [mindspore.ops.Reshape](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Reshape) | 不支持配置切分策略 +| [mindspore.ops.StridedSlice](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.StridedSlice) | 仅支持值为全0的mask;需要切分的维度必须全部提取;输入在strides不为1对应的维度不支持切分 +| [mindspore.ops.Tile](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Tile) | 仅支持对multiples配置切分策略 +| [mindspore.ops.Transpose](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Transpose) | None > 重复计算是指,机器没有用满,比如:集群有8张卡跑分布式训练,切分策略只对输入切成了4份。这种情况下会发生重复计算。 @@ -468,66 +482,66 @@ | 算子名 | :----------- -| [mindspore.ops.operations.Assign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Assign) -| [mindspore.ops.operations.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.AssignSub) -| [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) -| [mindspore.ops.operations.FusedSparseAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseAdam) -| [mindspore.ops.operations.FusedSparseLazyAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseLazyAdam) -| [mindspore.ops.operations.FusedSparseFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseFtrl) -| [mindspore.ops.operations.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FusedSparseProximalAdagrad) -| [mindspore.ops.operations.ApplyAdaMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdaMax) -| [mindspore.ops.operations.ApplyAdadelta](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdadelta) -| [mindspore.ops.operations.ApplyAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdagrad) -| [mindspore.ops.operations.ApplyAdagradV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAdagradV2) -| [mindspore.ops.operations.SparseApplyAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagrad) -| [mindspore.ops.operations.SparseApplyAdagradV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyAdagradV2) -| [mindspore.ops.operations.ApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalAdagrad) -| [mindspore.ops.operations.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyProximalAdagrad) -| [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) -| [mindspore.ops.operations.ApplyPowerSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyPowerSign) -| [mindspore.ops.operations.ApplyGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyGradientDescent) -| [mindspore.ops.operations.ApplyProximalGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyProximalGradientDescent) -| [mindspore.ops.operations.SparseApplyFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrl) -| [mindspore.ops.operations.SparseApplyFtrlV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseApplyFtrlV2) -| [mindspore.ops.operations.BitwiseAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseAnd) -| [mindspore.ops.operations.BitwiseOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseOr) -| [mindspore.ops.operations.BitwiseXor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BitwiseXor) -| [mindspore.ops.operations.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TensorAdd) -| [mindspore.ops.operations.Sub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Sub) -| [mindspore.ops.operations.Mul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mul) -| [mindspore.ops.operations.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Pow) -| [mindspore.ops.operations.Minimum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Minimum) -| [mindspore.ops.operations.Maximum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Maximum) -| [mindspore.ops.operations.RealDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.RealDiv) -| [mindspore.ops.operations.Div](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Div) -| [mindspore.ops.operations.DivNoNan](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.DivNoNan) -| [mindspore.ops.operations.FloorDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorDiv) -| [mindspore.ops.operations.TruncateDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateDiv) -| [mindspore.ops.operations.TruncateMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TruncateMod) -| [mindspore.ops.operations.Mod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Mod) -| [mindspore.ops.operations.FloorMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.FloorMod) -| [mindspore.ops.operations.Atan2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Atan2) -| [mindspore.ops.operations.SquaredDifference](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SquaredDifference) -| [mindspore.ops.operations.Xdivy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xdivy) -| [mindspore.ops.operations.Xlogy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Xlogy) -| [mindspore.ops.operations.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Equal) -| [mindspore.ops.operations.ApproximateEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApproximateEqual) -| [mindspore.ops.operations.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.NotEqual) -| [mindspore.ops.operations.Greater](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Greater) -| [mindspore.ops.operations.GreaterEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.GreaterEqual) -| [mindspore.ops.operations.Less](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Less) -| [mindspore.ops.operations.LessEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LessEqual) -| [mindspore.ops.operations.LogicalAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalAnd) -| [mindspore.ops.operations.LogicalOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.LogicalOr) -| [mindspore.ops.operations.ScatterNdUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdUpdate) -| [mindspore.ops.operations.ScatterNdAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdAdd) -| [mindspore.ops.operations.ScatterNdSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNdSub) -| [mindspore.ops.operations.ScatterNonAliasingAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterNonAliasingAdd) -| [mindspore.ops.operations.ScatterUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterUpdate) -| [mindspore.ops.operations.ScatterMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMax) -| [mindspore.ops.operations.ScatterMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMin) -| [mindspore.ops.operations.ScatterAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterAdd) -| [mindspore.ops.operations.ScatterSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterSub) -| [mindspore.ops.operations.ScatterMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterMul) -| [mindspore.ops.operations.ScatterDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ScatterDiv) +| [mindspore.ops.Assign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Assign) +| [mindspore.ops.AssignSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.AssignSub) +| [mindspore.ops.ApplyMomentum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyMomentum) +| [mindspore.ops.FusedSparseAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseAdam) +| [mindspore.ops.FusedSparseLazyAdam](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseLazyAdam) +| [mindspore.ops.FusedSparseFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseFtrl) +| [mindspore.ops.FusedSparseProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FusedSparseProximalAdagrad) +| [mindspore.ops.ApplyAdaMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdaMax) +| [mindspore.ops.ApplyAdadelta](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdadelta) +| [mindspore.ops.ApplyAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdagrad) +| [mindspore.ops.ApplyAdagradV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAdagradV2) +| [mindspore.ops.SparseApplyAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagrad) +| [mindspore.ops.SparseApplyAdagradV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyAdagradV2) +| [mindspore.ops.ApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalAdagrad) +| [mindspore.ops.SparseApplyProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyProximalAdagrad) +| [mindspore.ops.ApplyAddSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyAddSign) +| [mindspore.ops.ApplyPowerSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyPowerSign) +| [mindspore.ops.ApplyGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyGradientDescent) +| [mindspore.ops.ApplyProximalGradientDescent](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApplyProximalGradientDescent) +| [mindspore.ops.SparseApplyFtrl](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrl) +| [mindspore.ops.SparseApplyFtrlV2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SparseApplyFtrlV2) +| [mindspore.ops.BitwiseAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseAnd) +| [mindspore.ops.BitwiseOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseOr) +| [mindspore.ops.BitwiseXor](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.BitwiseXor) +| [mindspore.ops.TensorAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TensorAdd) +| [mindspore.ops.Sub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Sub) +| [mindspore.ops.Mul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mul) +| [mindspore.ops.Pow](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Pow) +| [mindspore.ops.Minimum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Minimum) +| [mindspore.ops.Maximum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Maximum) +| [mindspore.ops.RealDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.RealDiv) +| [mindspore.ops.Div](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Div) +| [mindspore.ops.DivNoNan](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.DivNoNan) +| [mindspore.ops.FloorDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorDiv) +| [mindspore.ops.TruncateDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateDiv) +| [mindspore.ops.TruncateMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.TruncateMod) +| [mindspore.ops.Mod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Mod) +| [mindspore.ops.FloorMod](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.FloorMod) +| [mindspore.ops.Atan2](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Atan2) +| [mindspore.ops.SquaredDifference](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.SquaredDifference) +| [mindspore.ops.Xdivy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xdivy) +| [mindspore.ops.Xlogy](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Xlogy) +| [mindspore.ops.Equal](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Equal) +| [mindspore.ops.ApproximateEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ApproximateEqual) +| [mindspore.ops.NotEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.NotEqual) +| [mindspore.ops.Greater](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Greater) +| [mindspore.ops.GreaterEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.GreaterEqual) +| [mindspore.ops.Less](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.Less) +| [mindspore.ops.LessEqual](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LessEqual) +| [mindspore.ops.LogicalAnd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalAnd) +| [mindspore.ops.LogicalOr](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.LogicalOr) +| [mindspore.ops.ScatterNdUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdUpdate) +| [mindspore.ops.ScatterNdAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdAdd) +| [mindspore.ops.ScatterNdSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNdSub) +| [mindspore.ops.ScatterNonAliasingAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterNonAliasingAdd) +| [mindspore.ops.ScatterUpdate](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterUpdate) +| [mindspore.ops.ScatterMax](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMax) +| [mindspore.ops.ScatterMin](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMin) +| [mindspore.ops.ScatterAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterAdd) +| [mindspore.ops.ScatterSub](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterSub) +| [mindspore.ops.ScatterMul](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterMul) +| [mindspore.ops.ScatterDiv](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html#mindspore.ops.ScatterDiv) diff --git a/lite/docs/source_en/_static/logo_source.png b/lite/docs/source_en/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/lite/docs/source_en/_static/logo_source.png and b/lite/docs/source_en/_static/logo_source.png differ diff --git a/lite/docs/source_en/apicc/dataset.md b/lite/docs/source_en/apicc/dataset.md index 984ffc15eaa2fc44ed5e17c87f89b561083a5eae..6b2313ffb61a19ef45f18309b87f6a68b528e335 100644 --- a/lite/docs/source_en/apicc/dataset.md +++ b/lite/docs/source_en/apicc/dataset.md @@ -6,6 +6,8 @@ ## Functions of image_process.h +### ResizeBilinear + ``` bool ResizeBilinear(LiteMat &src, LiteMat &dst, int dst_w, int dst_h) ``` @@ -22,6 +24,8 @@ Resize image by bilinear algorithm, currently the data type only supports uint8, Return True or False. +### InitFromPixel + ``` bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType data_type, int w, int h, LiteMat &m) ``` @@ -40,6 +44,8 @@ Initialize LiteMat from pixel, currently the conversion supports rbgaTorgb and r Return True or False. +### ConvertTo + ``` bool ConvertTo(LiteMat &src, LiteMat &dst, double scale = 1.0) ``` @@ -56,6 +62,8 @@ Convert the data type, currently it supports converting the data type from uint8 Return True or False. +### Crop + ``` bool Crop(LiteMat &src, LiteMat &dst, int x, int y, int w, int h) ``` @@ -74,8 +82,10 @@ Crop image, the channel supports is 3 and 1. Return True or False. +### SubStractMeanNormalize + ``` -bool SubStractMeanNormalize(LiteMat &src, LiteMat &dst, const float *mean, float *norm) +bool SubStractMeanNormalize(const LiteMat &src, LiteMat &dst, const std::vector &mean, const std::vector &std); ``` Normalize image, currently the supports data type is float. @@ -85,16 +95,18 @@ Normalize image, currently the supports data type is float. - `src`: Input image data. - `dst`: Output image data. - `mean`: Mean of the data set. - - `norm`: Norm of the data set. + - `std`: Norm of the data set. - Returns Return True or False. +### Pad + ``` -bool Padd(LiteMat &src, LiteMat &dst, const int top, const int bottom, const int left, const int right, const PaddBorderType pad_type, uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) +bool Pad(const LiteMat &src, LiteMat &dst, int top, int bottom, int left, int right, PaddBorderType pad_type, uint8_t fill_b_or_gray, uint8_t fill_g, uint8_t fill_r) ``` -Padd image, the channel supports is 3 and 1. +Pad image, the channel supports is 3 and 1. - Parameters @@ -105,13 +117,15 @@ Padd image, the channel supports is 3 and 1. - `left`: The length of left. - `right`: The length of right. - `pad_type`: The type of pad. - - `fill_r`: R. + - `fill_b_or_gray`: B or GRAY. - `fill_g`: G. - - `fill_b`: B. + - `fill_r`: R. - Returns Return True or False. +### Affine + ``` void Affine(LiteMat &src, LiteMat &out_img, double M[6], std::vector dsize, UINT8_C1 borderValue) ``` @@ -140,6 +154,8 @@ Apply affine transformation for 3 channel image. - `dsize`: The size of the output image. - `borderValue`: The pixel value is used for filing after the image is captured. +### GetDefaultBoxes + ``` std::vector> GetDefaultBoxes(BoxesConfig config) ``` @@ -154,6 +170,8 @@ Get default anchor boxes for Faster R-CNN, SSD, YOLO etc. Return the default boxes. +### ConvertBoxes + ``` void ConvertBoxes(std::vector> &boxes, std::vector> &default_boxes, BoxesConfig config) ``` @@ -166,6 +184,8 @@ Convert the prediction boxes to the actual boxes with (y, x, h, w). - `default_boxes`: Default box. - `config`: Objects of BoxesConfig structure. +### ApplyNms + ``` std::vector ApplyNms(std::vector> &all_boxes, std::vector &all_scores, float thres, int max_boxes) ``` @@ -190,6 +210,7 @@ Class that represents a lite Mat of a Image. **Constructors & Destructors** +### LiteMat ``` LiteMat() @@ -211,6 +232,7 @@ Destructor of MindSpore dataset LiteMat. **Public Member Functions** +### Init ``` void Init(int width, LDataType data_type = LDataType::UINT8) @@ -222,6 +244,8 @@ void Init(int width, int height, int channel, LDataType data_type = LDataType::U The function to initialize the channel, width and height of the image, but the parameters are different. +### IsEmpty + ``` bool IsEmpty() const ``` @@ -232,6 +256,8 @@ A function to determine whether the object is empty. Return True or False. +### Release + ``` void Release() ``` @@ -240,6 +266,8 @@ A function to release memory. **Private Member Functions** +### AlignMalloc + ``` void *AlignMalloc(unsigned int size) ``` @@ -254,6 +282,8 @@ Apply for memory alignment. Return the size of a pointer. +### AlignFree + ``` void AlignFree(void *ptr) ``` @@ -270,6 +300,8 @@ Initialize the value of elem_size_ by data_type. - `data_type`: Type of data. +### addRef + ``` int addRef(int *p, int value) ``` diff --git a/lite/docs/source_en/apicc/errorcode_and_metatype.md b/lite/docs/source_en/apicc/errorcode_and_metatype.md index df566213408154cd2034eb2932a5f6d1380f89f3..45b4877a858d82df61c1dffa8dc734edddd300a5 100644 --- a/lite/docs/source_en/apicc/errorcode_and_metatype.md +++ b/lite/docs/source_en/apicc/errorcode_and_metatype.md @@ -13,6 +13,7 @@ Description of error code and meta type supported in MindSpore Lite. | RET_NO_CHANGE | -4 | No change. | | RET_SUCCESS_EXIT | -5 | No error but exit. | | RET_MEMORY_FAILED | -6 | Fail to create memory. | +| RET_NOT_SUPPORT | -7 | Fail to support. | | RET_OUT_OF_TENSOR_RANGE | -101 | Failed to check range. | | RET_INPUT_TENSOR_ERROR | -102 | Failed to check input tensor. | | RET_REENTRANT_ERROR | -103 | Exist executor running. | @@ -24,6 +25,8 @@ Description of error code and meta type supported in MindSpore Lite. | RET_FORMAT_ERR | -401 | Failed to check the tensor format. | | RET_INFER_ERR | -501 | Failed to infer shape. | | RET_INFER_INVALID | -502 | Invalid infer shape before runtime. | +| RET_INPUT_PARAM_INVALID | -601 | Invalid input param by user. | +| RET_INPUT_PARAM_LACK | -602 | Lack input param by user. | ## MetaType An **enum** type. diff --git a/lite/docs/source_en/apicc/lite.md b/lite/docs/source_en/apicc/lite.md index 93bc93edf0d709c8d227723f921ea39f9a39f3b0..1dbe44a3f99d3b35f2c6a501523ac75d90702ec4 100644 --- a/lite/docs/source_en/apicc/lite.md +++ b/lite/docs/source_en/apicc/lite.md @@ -23,23 +23,6 @@ Context() Constructor of MindSpore Lite Context using default value for parameters. -``` -Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx) -``` -Constructor of MindSpore Lite Context using input value for parameters. - -- Parameters - - - `thread_num`: Define the work thread number during the runtime. - - - `allocator`: Define the allocator for malloc. - - - `device_ctx`: Define device information during the runtime. - -- Returns - - The instance of MindSpore Lite Context. - ``` ~Context() ``` @@ -52,10 +35,12 @@ float16_priority ``` A **bool** value. Defaults to **false**. Prior enable float16 inference. +> Enabling float16 inference may cause low precision inference,because some variables may exceed the range of float16 during forwarding. + ``` -device_ctx_{DT_CPU} +device_type ``` -A [**DeviceContext**](https://www.mindspore.cn/lite/docs/en/master/apicc/lite.html#devicecontext) struct defined at the bottom of the text. Using to specify the device. +A [**DeviceType**](https://www.mindspore.cn/lite/docs/en/master/apicc/lite.html#devicetype) **enum** type. Defaults to **DT_CPU**. Using to specify the device. ``` thread_num_ @@ -153,16 +138,6 @@ GPU device type. DT_NPU = 0 ``` NPU device type, not supported yet. -## DeviceContext - -A **struct**. DeviceContext defined for holding DeviceType. - -**Attributes** -``` -type -``` -A [**DeviceType**](https://www.mindspore.cn/lite/docs/en/master/apicc/lite.html#devicetype) variable. The device type. - ## Version ``` diff --git a/lite/docs/source_en/apicc/tensor.md b/lite/docs/source_en/apicc/tensor.md index 014929ba12ea2d636478ea7515562559bd9af087..c721fd22d5d8fe14c3da625aa6539431a224c2d1 100644 --- a/lite/docs/source_en/apicc/tensor.md +++ b/lite/docs/source_en/apicc/tensor.md @@ -36,19 +36,6 @@ Get data type of the MindSpore Lite MSTensor. MindSpore Lite TypeId of the MindSpore Lite MSTensor. -``` -virtual TypeId set_data_type(TypeId data_type) -``` -Set data type for the MindSpore Lite MSTensor. - -- Parameters - - - `data_type`: Define MindSpore Lite TypeId to be set in the MindSpore Lite MSTensor. - -- Returns - - MindSpore Lite TypeId of the MindSpore Lite MSTensor after set. - ``` virtual std::vector shape() const ``` @@ -59,19 +46,6 @@ Get shape of the MindSpore Lite MSTensor. A vector of int as the shape of the MindSpore Lite MSTensor. -``` -virtual size_t set_shape(const std::vector &shape) -``` -Set shape for the MindSpore Lite MSTensor. - -- Parameters - - - `shape`: Define a vector of int as shape to be set into the MindSpore Lite MSTensor. - -- Returns - - Size of shape of the MindSpore Lite MSTensor after set. - ``` virtual int DimensionSize(size_t index) const ``` @@ -96,16 +70,6 @@ Get number of element in MSTensor. Number of element in MSTensor. -``` -virtual std::size_t hash() const -``` - -Get hash of the MindSpore Lite MSTensor. - -- Returns - - Hash of the MindSpore Lite MSTensor. - ``` virtual size_t Size() const ``` @@ -129,23 +93,3 @@ Get the pointer of data in MSTensor. - Returns The pointer points to data in MSTensor. - -**Static Public Member Functions** - -``` -static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape) -``` - -Static method to create a MSTensor pointer. - -> Note: TypeId is defined in [mindspore/mindspore/core/ir/dtype/type_id.h](https://gitee.com/mindspore/mindspore/blob/master/mindspore/core/ir/dtype/type_id.h). Only number types in TypeId enum are suitable for MSTensor. - -- Parameters - - - `data_type`: Define the data type of tensor to be created. - - - `shape`: Define the shape of tensor to be created. - -- Returns - - The pointer of MSTensor. \ No newline at end of file diff --git a/lite/docs/source_en/image_classification.md b/lite/docs/source_en/image_classification.md new file mode 100644 index 0000000000000000000000000000000000000000..61e2321e45598f9cd38154dfa3f10838285cc8f5 --- /dev/null +++ b/lite/docs/source_en/image_classification.md @@ -0,0 +1,32 @@ +# Image classification + + + +## Image classification introduction + +Image classification is to identity what an image represents, to predict the object list and the probabilites. For example,the following tabel shows the classification results after mode inference. + +![image_classification](images/image_classification_result.png) + +| Category | Probability | +| ---------- | ----------- | +| plant | 0.9359 | +| flower | 0.8641 | +| tree | 0.8584 | +| houseplant | 0.7867 | + +Using MindSpore Lite to realize image classification [example](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/lite/image_classification). + +## Image classification model list + +The following table shows the data of some image classification models using MindSpore Lite inference. + +> The performance of the table below is tested on the mate30. + +| Model name | Size(Mb) | Top1 | Top5 | F1 | CPU 4 thread delay (ms) | +|-----------------------| :----------: | :----------: | :----------: | :----------: | :-----------: | +| [MobileNetV2](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ms) | 11.5 | - | - | 65.5% | 14.595 | +| [Inceptionv3](https://download.mindspore.cn/model_zoo/official/lite/inceptionv3_lite/inceptionv3.ms) | 90.9 | 78.62% | 94.08% | - | 92.086 | +| [Shufflenetv2](https://download.mindspore.cn/model_zoo/official/lite/shufflenetv2_lite/shufflenetv2.ms) | 8.8 | 67.74% | 87.62% | - | 8.303 | +| [GoogleNet](https://download.mindspore.cn/model_zoo/official/lite/googlenet_lite/googlenet.ms) | 25.3 | 72.2% | 90.06% | - | 23.257 | +| [ResNext50](https://download.mindspore.cn/model_zoo/official/lite/resnext50_lite/resnext50.ms) | 95.8 | 73.1% | 91.21% | - | 138.164 | diff --git a/lite/docs/source_en/images/image_classification_result.png b/lite/docs/source_en/images/image_classification_result.png new file mode 100644 index 0000000000000000000000000000000000000000..a7cc49f582440e31b6b5b14dbba5131bfed2a4b4 Binary files /dev/null and b/lite/docs/source_en/images/image_classification_result.png differ diff --git a/lite/docs/source_en/images/object_detection.png b/lite/docs/source_en/images/object_detection.png new file mode 100644 index 0000000000000000000000000000000000000000..ad5425c86393a9367701166796df42c9e4702988 Binary files /dev/null and b/lite/docs/source_en/images/object_detection.png differ diff --git a/lite/docs/source_en/index.rst b/lite/docs/source_en/index.rst index abecfe957e16896bca6efeb5a1cb376835251fa6..10e8c04337755b302a99f74116e0afc3b938c7fc 100644 --- a/lite/docs/source_en/index.rst +++ b/lite/docs/source_en/index.rst @@ -12,5 +12,7 @@ MindSpore Lite Documentation architecture apicc/apicc + image_classification + object_detection operator_list glossary diff --git a/lite/docs/source_en/object_detection.md b/lite/docs/source_en/object_detection.md new file mode 100644 index 0000000000000000000000000000000000000000..4f2085c5d045ee3654140e34158a098502ce9733 --- /dev/null +++ b/lite/docs/source_en/object_detection.md @@ -0,0 +1,26 @@ +# Object detection + + + +## Object dectectin introduction + +Object detection can identify the object in the image and its position in the image. For the following figure, the output of the object detection model is shown in the following table. The rectangular box is used to identify the position of the object in the graph and the probability of the object category is marked. The four numbers in the coordinates are Xmin, Ymin, Xmax, Ymax; the probability represents the probility of the detected object. + +![object_detectiontion](images/object_detection.png) + +| Category | Probability | Coordinate | +| -------- | ----------- | ---------------- | +| mouse | 0.78 | [10, 25, 35, 43] | + +Using MindSpore Lite to implement object detection [example](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/lite/object_detection). + +## Object detection model list + +The following table shows the data of some object detection models using MindSpore Lite inference. + +> The performance of the table below is tested on the mate30. + +| Model name | Size | mAP(IoU=0.50:0.95) | CPU 4 thread delay (ms) | +|-----------------------| :----------: | :----------: | :-----------: | +| [MobileNetv2-SSD](https://download.mindspore.cn/model_zoo/official/lite/ssd_mobilenetv2_lite/ssd.ms) | 16.7 | 0.22 | 25.4 | + diff --git a/lite/docs/source_en/operator_list.md b/lite/docs/source_en/operator_list.md index 6038b5c95690dd4b30378a7101d828ef9d0cda90..3fbeb8544df8ddeb93636b1e364e0f22be0d36a3 100644 --- a/lite/docs/source_en/operator_list.md +++ b/lite/docs/source_en/operator_list.md @@ -5,107 +5,111 @@ > √ The checked items are the operators supported by MindSpore Lite。 | Operation | CPU
FP16 | CPU
FP32 | CPU
Int8 | CPU
UInt8 | GPU
FP16 | GPU
FP32 | Tensorflow
Lite op supported | Caffe
Lite op supported | Onnx
Lite op supported | -|-----------------------|----------|----------|-----------|----------|----------|------------------|----------|----------|----------| -| Abs | | √ | √ | √ | | | Abs | | Abs | -| Add | √ | √ | √ | √ | | √ | Add | | Add | -| AddN | | √ | | | | | AddN | | | -| Argmax | | √ | √ | √ | | | Argmax | ArgMax | ArgMax | -| Argmin | | √ | √ | √ | | | Argmin | | | -| AvgPool | √ | √ | √ | √ | | √ | MeanPooling| Pooling | AveragePool | -| BatchNorm | √ | √ | √ | √ | | √ | | BatchNorm | BatchNormalization | -| BatchToSpace | | √ | √ | √ | | | BatchToSpace, BatchToSpaceND | | | -| BiasAdd | | √ | √ | √ | | √ | | | BiasAdd | +|-----------------------|----------|----------|----------|-----------|----------|----------|------------|----------------|--------------------| +| Abs | | √ | √ | √ | √ | √ | Abs | | Abs | +| Add | √ | √ | √ | √ | √ | √ | Add | | Add | +| AddN | | √ | | | | | AddN | | | +| Argmax | | √ | √ | √ | | | Argmax | ArgMax | ArgMax | +| Argmin | | √ | √ | √ | | | Argmin | | | +| AvgPool | √ | √ | √ | √ | √ | √ | MeanPooling| Pooling | AveragePool | +| BatchNorm | √ | √ | √ | √ | √ | √ | | BatchNorm | BatchNormalization | +| BatchToSpace | | √ | √ | √ | | | BatchToSpace | | | +| BatchToSpaceND | | √ | √ | | | | BatchToSpaceND | | | +| BiasAdd | | √ | √ | √ | √ | √ | | | BiasAdd | | Broadcast | | √ | | | | | BroadcastTo | | Expand | -| Cast | √ | √ | | √ | | | Cast, DEQUANTIZE* | | Cast | -| Ceil | | √ | √ | √ | | | Ceil | | Ceil | +| Cast | √ | √ | √ | √ | √ | √ | Cast, QUANTIZE, DEQUANTIZE | | Cast | +| Ceil | | √ | √ | √ | √ | √ | Ceil | | Ceil | | Concat | √ | √ | √ | √ | √ | √ | Concat | Concat | Concat | | Conv2d | √ | √ | √ | √ | √ | √ | Conv2D | Convolution | Conv | | Conv2dTranspose | √ | √ | √ | √ | √ | √ | DeConv2D | Deconvolution | ConvTranspose | -| Cos | | √ | √ | √ | | | Cos | | Cos | +| Cos | | √ | √ | √ | √ | √ | Cos | | Cos | | Crop | | √ | √ | √ | | | | Crop | | | DeDepthwiseConv2D | | √ | √ | √ | | | | Deconvolution| ConvTranspose | | DepthToSpace | | √ | √ | √ | | | DepthToSpace| | DepthToSpace | | DepthwiseConv2dNative | √ | √ | √ | √ | √ | √ | DepthwiseConv2D | Convolution | Convolution | -| Div | √ | √ | √ | √ | | √ | Div, RealDiv | | Div | +| DetectionPostProcess | | √ | | | | | DetectionPostProcess | | | +| Div | √ | √ | √ | √ | √ | √ | Div, RealDiv | | Div | | Eltwise | √ | √ | | | | | | Eltwise | | -| Elu | | √ | | | | | Elu | | Elu | +| Elu | | √ | | | | | Elu | | Elu | | Equal | √ | √ | √ | √ | | | Equal | | Equal | -| Exp | | √ | | | | | Exp | | Exp | -| ExpandDims | | √ | | | | | | | | +| Exp | | √ | | | √ | √ | Exp | Exp | Exp | +| ExpandDims | | √ | | | | | ExpandDims | | | | Fill | | √ | | | | | Fill | | | | Flatten | | √ | | | | | | Flatten | | -| Floor | | √ | √ | √ | | | flOOR | | Floor | +| Floor | | √ | √ | √ | √ | √ | flOOR | | Floor | | FloorDiv | √ | √ | | | | | FloorDiv | | | | FloorMod | √ | √ | | | | | FloorMod | | | -| FullConnection | | √ | √ | √ | | | FullyConnected | InnerProduct | | +| FullConnection | √ | √ | √ | √ | √ | √ | FullyConnected | InnerProduct | | | GatherNd | | √ | √ | √ | | | GatherND | | | | GatherV2 | | √ | √ | √ | | | Gather | | Gather | | Greater | √ | √ | √ | √ | | | Greater | | Greater | | GreaterEqual | √ | √ | √ | √ | | | GreaterEqual| | | | Hswish | √ | √ | √ | √ | | | HardSwish | | | -| LeakyReLU | √ | √ | | | | √ | LeakyRelu | | LeakyRelu | +| L2Norm | | √ | | | | | L2_NORMALIZATION | | | +| LeakyReLU | √ | √ | | | √ | √ | LeakyRelu | | LeakyRelu | | Less | √ | √ | √ | √ | | | Less | | Less | | LessEqual | √ | √ | √ | √ | | | LessEqual | | | -| LRN | | √ | | | | | LocalResponseNorm | | Lrn | -| Log | | √ | √ | √ | | | Log | | Log | +| LRN | | √ | | | | | LocalResponseNorm | | Lrn, LRN | +| Log | | √ | √ | √ | √ | √ | Log | | Log | | LogicalAnd | √ | √ | | | | | LogicalAnd | | | -| LogicalNot | | √ | √ | √ | | | LogicalNot | | | +| LogicalNot | | √ | √ | √ | √ | √ | LogicalNot | | | | LogicalOr | √ | √ | | | | | LogicalOr | | | | LSTM | | √ | | | | | | | | | MatMul | | √ | √ | √ | √ | √ | | | MatMul | | Maximum | √ | √ | | | | | Maximum | | Max | -| MaxPool | √ | √ | √ | √ | | √ | MaxPooling | Pooling | MaxPool | +| MaxPool | √ | √ | √ | √ | √ | √ | MaxPooling | Pooling | MaxPool | | Minimum | √ | √ | | | | | Minimum | | Min | -| Mul | √ | √ | √ | √ | | √ | Mul | | Mul | +| Mul | √ | √ | √ | √ | √ | √ | Mul | | Mul | +| Neg | | √ | | | | | Neg | | Neg | | NotEqual | √ | √ | √ | √ | | | NotEqual | | | | OneHot | | √ | | | | | OneHot | | | -| Pad | | √ | √ | √ | | | Pad | | Pad | -| Pow | | √ | √ | √ | | | Pow | Power | Power | -| PReLU | | √ | | | | √ | | PReLU | | +| Pad | √ | √ | √ | √ | | | Pad, MirrorPad | | Pad | +| Pow | | √ | √ | √ | | | Pow | Power | Power | +| PReLU | | √ | | | √ | √ | | PReLU | | | Range | | √ | | | | | Range | | | | Rank | | √ | | | | | Rank | | | +| ReduceASum | | √ | | | | | | Reduction | | | ReduceMax | √ | √ | √ | √ | | | ReduceMax | | ReduceMax | -| ReduceMean | √ | √ | √ | √ | | | Mean | | ReduceMean | +| ReduceMean | √ | √ | √ | √ | | | Mean | Reduction | ReduceMean | | ReduceMin | √ | √ | √ | √ | | | ReduceMin | | ReduceMin | | ReduceProd | √ | √ | √ | √ | | | ReduceProd | | | -| ReduceSum | √ | √ | √ | √ | | | Sum | | ReduceSum | -| ReduceSumSquare | √ | √ | √ | √ | | | | | | -| ReLU | √ | √ | √ | √ | | √ | Relu | ReLU | Relu | -| ReLU6 | √ | √ | √ | √ | | √ | Relu6 | ReLU6 | Clip* | -| Reshape | √ | √ | √ | √ | | √ | Reshape | Reshape | Reshape,Flatten | +| ReduceSum | √ | √ | √ | √ | | | Sum | Reduction | ReduceSum | +| ReduceSumSquare | √ | √ | √ | √ | | | | Reduction | | +| ReLU | √ | √ | √ | √ | √ | √ | Relu | ReLU | Relu | +| ReLU6 | √ | √ | √ | √ | √ | √ | Relu6 | ReLU6 | Clip* | +| Reshape | √ | √ | √ | √ | √ | √ | Reshape | Reshape | Reshape,Flatten | | Resize | | √ | √ | √ | | | ResizeBilinear, NearestNeighbor | Interp | | | Reverse | | √ | | | | | reverse | | | | ReverseSequence | | √ | | | | | ReverseSequence | | | -| Round | | √ | √ | √ | | | Round | | | -| Rsqrt | | √ | √ | √ | | | Rsqrt | | | -| Scale | | √ | | | | | | Scale | | +| Round | | √ | √ | √ | √ | √ | Round | | | +| Rsqrt | | √ | √ | √ | √ | √ | Rsqrt | | | +| Scale | | √ | | | √ | √ | | Scale | | | ScatterNd | | √ | | | | | ScatterNd | | | -| Shape | | √ | | | | | Shape | | Shape | -| Sigmoid | √ | √ | √ | √ | | √ | Logistic | Sigmoid | Sigmoid | -| Sin | | √ | √ | √ | | | Sin | | Sin | -| Slice | | √ | √ | √ | √ | √ | Slice | | Slice | -| Softmax | √ | √ | √ | √ | | √ | Softmax | Softmax | Softmax | -| SpaceToBatch | | √ | | | | | | | | -| SpaceToBatchND | | √ | | | | | SpaceToBatchND | | | +| Shape | | √ | | | | | Shape | | Shape | +| Sigmoid | √ | √ | √ | √ | √ | √ | Logistic | Sigmoid | Sigmoid | +| Sin | | √ | √ | √ | √ | √ | Sin | | Sin | +| Slice | | √ | √ | √ | √ | √ | Slice | Slice | Slice | +| Softmax | √ | √ | √ | √ | √ | √ | Softmax | Softmax | Softmax | +| SpaceToBatch | | √ | √ | | | | SpaceToBatch | | | +| SpaceToBatchND | | √ | √ | | | | SpaceToBatchND | | | | SpaceToDepth | | √ | | | | | SpaceToDepth | | SpaceToDepth | | SparseToDense | | √ | | | | | SpareToDense | | | | Split | √ | √ | √ | √ | | | Split, SplitV | | | -| Sqrt | | √ | √ | √ | | | Sqrt | | Sqrt | -| Square | | √ | √ | √ | | | Square | | | -| SquaredDifference | | √ | | | | | SquaredDifference | | | +| Sqrt | | √ | √ | √ | √ | √ | Sqrt | | Sqrt | +| Square | | √ | √ | √ | √ | √ | Square | | | +| SquaredDifference | | √ | | | | | SquaredDifference | | | | Squeeze | | √ | √ | √ | | | Squeeze | | Squeeze | | StridedSlice | | √ | √ | √ | | | StridedSlice| | | | Stack | | √ | | | | | Stack | | | -| Sub | √ | √ | √ | √ | | √ | Sub | | Sub | -| Tanh | √ | √ | | | | | Tanh | TanH | | -| Tile | | √ | | | | | Tile | | Tile | +| Sub | √ | √ | √ | √ | √ | √ | Sub | | Sub | +| Tanh | √ | √ | | | √ | √ | Tanh | TanH | | +| Tile | | √ | | | | | Tile | Tile | Tile | | TopK | | √ | √ | √ | | | TopKV2 | | | -| Transpose | √ | √ | | | | √ | Transpose | Permute | Transpose | +| Transpose | √ | √ | | | √ | √ | Transpose | Permute | Transpose | | Unique | | √ | | | | | Unique | | | | Unsqueeze | | √ | √ | √ | | | | | Unsqueeze | | Unstack | | √ | | | | | Unstack | | | | Where | | √ | | | | | Where | | | -| ZerosLike | | √ | | | | | ZerosLike | | | +| ZerosLike | | √ | | | | | ZerosLike | | | * Clip: only support convert clip(0, 6) to Relu6. -* DEQUANTIZE: only support to convert fp16 to fp32. diff --git a/lite/docs/source_zh_cn/_static/logo_source.png b/lite/docs/source_zh_cn/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/lite/docs/source_zh_cn/_static/logo_source.png and b/lite/docs/source_zh_cn/_static/logo_source.png differ diff --git a/lite/docs/source_zh_cn/apicc/dataset.md b/lite/docs/source_zh_cn/apicc/dataset.md index 379d3e11632327b3075c0f8a56d53c852cdeae80..2e9926063c23ce292b84127c2145517102b5e282 100644 --- a/lite/docs/source_zh_cn/apicc/dataset.md +++ b/lite/docs/source_zh_cn/apicc/dataset.md @@ -6,6 +6,8 @@ ## image_process.h文件的函数 +### ResizeBilinear + ``` bool ResizeBilinear(LiteMat &src, LiteMat &dst, int dst_w, int dst_h) ``` @@ -22,6 +24,8 @@ bool ResizeBilinear(LiteMat &src, LiteMat &dst, int dst_w, int dst_h) 返回True或者False。 +### InitFromPixel + ``` bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType data_type, int w, int h, LiteMat &m) ``` @@ -40,6 +44,8 @@ bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType d 返回True或者False。 +### ConvertTo + ``` bool ConvertTo(LiteMat &src, LiteMat &dst, double scale = 1.0) ``` @@ -56,6 +62,8 @@ bool ConvertTo(LiteMat &src, LiteMat &dst, double scale = 1.0) 返回True或者False。 +### Crop + ``` bool Crop(LiteMat &src, LiteMat &dst, int x, int y, int w, int h) ``` @@ -74,8 +82,10 @@ bool Crop(LiteMat &src, LiteMat &dst, int x, int y, int w, int h) 返回True或者False。 +### SubStractMeanNormalize + ``` -bool SubStractMeanNormalize(LiteMat &src, LiteMat &dst, const float *mean, float *norm) +bool SubStractMeanNormalize(const LiteMat &src, LiteMat &dst, const std::vector &mean, const std::vector &std); ``` 规一化图像,当前支持的数据类型为float。 @@ -85,13 +95,15 @@ bool SubStractMeanNormalize(LiteMat &src, LiteMat &dst, const float *mean, float - `src`: 输入的图片数据。 - `dst`: 输出图像数据。 - `mean`: 数据集的均值。 - - `norm`: 数据集的方差。 + - `std`: 数据集的方差。 - 返回值 返回True或者False。 +### Pad + ``` -bool Padd(LiteMat &src, LiteMat &dst, const int top, const int bottom, const int left, const int right, const PaddBorderType pad_type, uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) +bool Pad(const LiteMat &src, LiteMat &dst, int top, int bottom, int left, int right, PaddBorderType pad_type, uint8_t fill_b_or_gray, uint8_t fill_g, uint8_t fill_r) ``` 填充图像,通道支持为3和1。 @@ -105,13 +117,15 @@ bool Padd(LiteMat &src, LiteMat &dst, const int top, const int bottom, const int - `left`: 图片左边长度。 - `right`: 图片右边长度。 - `pad_type`: padding的类型。 - - `fill_r`: R. + - `fill_b_or_gray`: B或者GRAY. - `fill_g`: G. - - `fill_b`: B. + - `fill_r`: R. - 返回值 返回True或者False。 +### Affine + ``` void Affine(LiteMat &src, LiteMat &out_img, double M[6], std::vector dsize, UINT8_C1 borderValue) ``` @@ -140,6 +154,8 @@ void Affine(LiteMat &src, LiteMat &out_img, double M[6], std::vector dsi - `dsize`: 输出图像的大小。 - `borderValue`: 采图之后用于填充的像素值。 +### GetDefaultBoxes + ``` std::vector> GetDefaultBoxes(BoxesConfig config) ``` @@ -154,6 +170,8 @@ std::vector> GetDefaultBoxes(BoxesConfig config) 返回默认框。 +### ConvertBoxes + ``` void ConvertBoxes(std::vector> &boxes, std::vector> &default_boxes, BoxesConfig config) ``` @@ -166,6 +184,8 @@ void ConvertBoxes(std::vector> &boxes, std::vector ApplyNms(std::vector> &all_boxes, std::vector &all_scores, float thres, int max_boxes) ``` @@ -190,6 +210,7 @@ LiteMat是一个处理图像的类。 **构造函数和析构函数** +### LiteMat ``` LiteMat() @@ -211,6 +232,7 @@ MindSpore dataset LiteMat的析构函数。 **公有成员函数** +### Init ``` void Init(int width, LDataType data_type = LDataType::UINT8) @@ -222,6 +244,8 @@ void Init(int width, int height, int channel, LDataType data_type = LDataType::U 该函数用于初始化图像的通道,宽度和高度,参数不同。 +### IsEmpty + ``` bool IsEmpty() const ``` @@ -232,6 +256,8 @@ bool IsEmpty() const 返回True或者False。 +### Release + ``` void Release() ``` @@ -240,6 +266,8 @@ void Release() **私有成员函数** +### AlignMalloc + ``` void *AlignMalloc(unsigned int size) ``` @@ -254,12 +282,17 @@ void *AlignMalloc(unsigned int size) 返回指针的大小。 +### AlignFree + ``` void AlignFree(void *ptr) ``` 释放指针内存大小的方法。 + +### InitElemSize + ``` void InitElemSize(LDataType data_type) ``` diff --git a/lite/docs/source_zh_cn/apicc/errorcode_and_metatype.md b/lite/docs/source_zh_cn/apicc/errorcode_and_metatype.md index 4195eaedcfa2cda8e0470d3db06950e35e2050d8..59f0d81ea4a3a254c7b37e9895c89de1d0357b3d 100644 --- a/lite/docs/source_zh_cn/apicc/errorcode_and_metatype.md +++ b/lite/docs/source_zh_cn/apicc/errorcode_and_metatype.md @@ -13,6 +13,7 @@ | RET_NO_CHANGE | -4 | 无改变。 | | RET_SUCCESS_EXIT | -5 | 无错误退出。 | | RET_MEMORY_FAILED | -6 | 创建内存失败。 | +| RET_NOT_SUPPORT | -7 | 尚未支持。 | | RET_OUT_OF_TENSOR_RANGE | -101 | 输出检查越界。 | | RET_INPUT_TENSOR_ERROR | -102 | 输入检查越界。 | | RET_REENTRANT_ERROR | -103 | 存在运行中的执行器。 | @@ -24,6 +25,8 @@ | RET_FORMAT_ERR | -401 | 张量格式检查失败。 | | RET_INFER_ERR | -501 | 维度推理失败。 | | RET_INFER_INVALID | -502 | 无效的维度推理。 | +| RET_INPUT_PARAM_INVALID | -601 | 无效的用户输入参数。 | +| RET_INPUT_PARAM_LACK | -602 | 缺少必要的输入参数。 | ## MetaType diff --git a/lite/docs/source_zh_cn/apicc/lite.md b/lite/docs/source_zh_cn/apicc/lite.md index 2673487a861f56db5c8b9f6bab8daac555cb7fed..839930645a3fed1484a6fc62945620ab22b8b313 100644 --- a/lite/docs/source_zh_cn/apicc/lite.md +++ b/lite/docs/source_zh_cn/apicc/lite.md @@ -21,31 +21,13 @@ Context类用于保存执行中的环境变量。 Context() ``` -用默认参数构造MindSpore Lite Context 对象。 - -``` -Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx) -``` - -根据输入参数构造MindSpore Lite Context 对象。 - -- 参数 - - - `thread_num`: 定义了执行线程数。 - - - `allocator`: 定义了内存分配器。 - - - `device_ctx`: 定义了设备信息。 - -- 返回值 - - MindSpore Lite Context 指针。 +用默认参数构造MindSpore Lite Context对象。 ``` ~Context() ``` -MindSpore Lite Context 的析构函数。 +MindSpore Lite Context的析构函数。 **公有属性** @@ -53,19 +35,21 @@ MindSpore Lite Context 的析构函数。 float16_priority ``` -**bool** 值,默认为**false**,用于使能float16 推理。 +**bool**值,默认为**false**,用于使能float16推理。 + +> 使能float16推理可能会导致模型推理精度下降,因为在模型推理的中间过程中,有些变量可能会超出float16的数值范围。 ``` -device_ctx_{DT_CPU} +device_type ``` -[**DeviceContext**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/lite.html#devicecontext)结构体。用于设置设备信息。 +[**DeviceType**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/lite.html#devicetype)枚举类型。默认为**DT_CPU**,用于设置设备信息。 ``` thread_num_ ``` -**int** 值,默认为**2**,设置线程数。 +**int**值,默认为**2**,设置线程数。 ``` allocator @@ -173,18 +157,6 @@ DT_NPU = 0 设备为NPU,暂不支持。 -## DeviceContext - -定义设备类型的结构体。 - -**属性** - -``` -type -``` - -[**DeviceType**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/lite.html#devicetype) 变量。设备类型。 - ## Version ``` diff --git a/lite/docs/source_zh_cn/apicc/session.md b/lite/docs/source_zh_cn/apicc/session.md index 86556e1351e97bf4ad435e09db907fdca4e5fefd..e8203d44d6f872f816f211a8a97b80d5b922ff74 100644 --- a/lite/docs/source_zh_cn/apicc/session.md +++ b/lite/docs/source_zh_cn/apicc/session.md @@ -1,4 +1,4 @@ -# mindspore::session +# mindspore::session #include <[lite_session.h](https://gitee.com/mindspore/mindspore/blob/master/mindspore/lite/include/lite_session.h)> @@ -31,9 +31,9 @@ virtual void BindThread(bool if_bind) ``` virtual int CompileGraph(lite::Model *model) ``` -编译MindSpore Lite模型。 +编译MindSpore Lite模型。 -> 注意: CompileGraph必须在RunGraph方法之后调用。 +> 注意: CompileGraph必须在RunGraph方法之前调用。 - 参数 @@ -64,18 +64,18 @@ std::vector GetInputsByName(const std::string &node_name) c - 返回值 MindSpore Lite MSTensor向量。 - + ``` virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) ``` -运行带有回调函数的会话。 +运行带有回调函数的会话。 > 注意: RunGraph必须在CompileGraph方法之后调用。 - 参数 - - `before`: 一个[**KernelCallBack**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/session.html#kernelcallback) 结构体。定义了运行每个节点之前调用的回调函数。 + - `before`: 一个[**KernelCallBack**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/session.html#kernelcallback)结构体。定义了运行每个节点之前调用的回调函数。 - - `after`: 一个[**KernelCallBack**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/session.html#kernelcallback) 结构体。定义了运行每个节点之后调用的回调函数。 + - `after`: 一个[**KernelCallBack**](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/session.html#kernelcallback)结构体。定义了运行每个节点之后调用的回调函数。 - 返回值 @@ -159,7 +159,7 @@ static LiteSession *CreateSession(lite::Context *context) using KernelCallBack = std::function inputs, std::vector outputs, const CallBackParam &opInfo)> ``` -一个函数包装器。KernelCallBack 定义了指向回调函数的指针。 +一个函数包装器。KernelCallBack定义了指向回调函数的指针。 ## CallBackParam @@ -174,4 +174,4 @@ name_callback_param ``` type_callback_param ``` -**string** 类型变量。节点类型参数。 \ No newline at end of file +**string** 类型变量。节点类型参数。 diff --git a/lite/docs/source_zh_cn/apicc/tensor.md b/lite/docs/source_zh_cn/apicc/tensor.md index e9eae1f0fd9a62aa59e7b578b09a455bab843f1d..32d918604f0fddeabcf064d649dc76cfe9f1baf1 100644 --- a/lite/docs/source_zh_cn/apicc/tensor.md +++ b/lite/docs/source_zh_cn/apicc/tensor.md @@ -8,6 +8,7 @@ MSTensor定义了MindSpore Lite中的张量。 **构造函数和析构函数** + ``` MSTensor() ``` @@ -15,7 +16,7 @@ MindSpore Lite MSTensor的构造函数。 - 返回值 - MindSpore Lite MSTensor 的实例。 + MindSpore Lite MSTensor的实例。 ``` virtual ~MSTensor() @@ -35,19 +36,6 @@ virtual TypeId data_type() const MindSpore Lite MSTensor类的MindSpore Lite TypeId。 -``` -virtual TypeId set_data_type(TypeId data_type) -``` -设置MindSpore Lite MSTensor的数据类型。 - -- 参数 - - - `data_type`: 定义了MindSpore Lite MSTensor所需设置的MindSpore Lite TypeId。 - -- 返回值 - - 设置后的MindSpore Lite MSTensor的MindSpore Lite TypeI。 - ``` virtual std::vector shape() const ``` @@ -57,23 +45,10 @@ virtual std::vector shape() const 一个包含MindSpore Lite MSTensor形状数值的整型向量。 -``` -virtual size_t set_shape(const std::vector &shape) -``` -设置MindSpore Lite MSTensor的形状. - -- 参数 - - - `shape`: 定义了一个整型向量,包含了所需设置的MindSpore Lite MSTensor形状数值。 - -- 返回值 - - 设置形状后的MindSpore Lite MSTensor的大小。 - ``` virtual int DimensionSize(size_t index) const ``` -Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. +通过参数索引获取MindSpore Lite MSTensor的维度的大小。 - 参数 @@ -92,15 +67,6 @@ virtual int ElementsNum() const MSTensor中的元素个数 -``` -virtual std::size_t hash() const -``` -获取MindSpore Lite MSTensor的哈希码。 - -- 返回值 - - MindSpore Lite MSTensor的哈希码。 - ``` virtual size_t Size() const ``` @@ -121,22 +87,3 @@ virtual void *MutableData() const - 返回值 指向MSTensor中的数据的指针。 - -**静态公有成员函数** - -``` -static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape) -``` -创建MSTensor指针的静态方法。 - -> 注意:TypeId在[mindspore/mindspore/core/ir/dtype/type_id\.h](https://gitee.com/mindspore/mindspore/blob/master/mindspore/core/ir/dtype/type_id.h)中定义。只有TypeId枚举中的数字类型可用于MSTensor。 - -- 参数 - - - `data_type`: 定义了所要创建的张量的数据类型。 - - - `shape`: 定义了所要创建的张量的形状。 - -- 返回值 - - 指向MSTensor的指针。 \ No newline at end of file diff --git a/lite/docs/source_zh_cn/image_classification.md b/lite/docs/source_zh_cn/image_classification.md new file mode 100644 index 0000000000000000000000000000000000000000..18a11ed4be0dd3d3903582518448c2e5781b795e --- /dev/null +++ b/lite/docs/source_zh_cn/image_classification.md @@ -0,0 +1,33 @@ +# 图像分类 + + + +## 图像分类介绍 + +图像分类模型可以预测图片中出现哪些物体,识别出图片中出现物体列表及其概率。 比如下图经过模型推理的分类结果为下表: + +![image_classification](images/image_classification_result.png) + +| 类别 | 概率 | +| ---------- | ------ | +| plant | 0.9359 | +| flower | 0.8641 | +| tree | 0.8584 | +| houseplant | 0.7867 | + +使用MindSpore Lite实现图像分类的[示例代码](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/lite/image_classification)。 + +## 图像分类模型列表 + +下表是使用MindSpore Lite推理的部分图像分类模型的数据。 + +> 下表的性能是在mate30手机上测试的。 + +| 模型名称 | 大小(Mb) | Top1 | Top5 | F1 | CPU 4线程时延(ms) | +|-----------------------| :----------: | :----------: | :----------: | :----------: | :-----------: | +| [MobileNetV2](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ms) | 11.5 | - | - | 65.5% | 14.595 | +| [Inceptionv3](https://download.mindspore.cn/model_zoo/official/lite/inceptionv3_lite/inceptionv3.ms) | 90.9 | 78.62% | 94.08% | - | 92.086 | +| [Shufflenetv2](https://download.mindspore.cn/model_zoo/official/lite/shufflenetv2_lite/shufflenetv2.ms) | 8.8 | 67.74% | 87.62% | - | 8.303 | +| [GoogleNet](https://download.mindspore.cn/model_zoo/official/lite/googlenet_lite/googlenet.ms) | 25.3 | 72.2% | 90.06% | - | 23.257 | +| [ResNext50](https://download.mindspore.cn/model_zoo/official/lite/resnext50_lite/resnext50.ms) | 95.8 | 73.1% | 91.21% | - | 138.164 | + diff --git a/lite/docs/source_zh_cn/images/image_classification_result.png b/lite/docs/source_zh_cn/images/image_classification_result.png new file mode 100644 index 0000000000000000000000000000000000000000..a7cc49f582440e31b6b5b14dbba5131bfed2a4b4 Binary files /dev/null and b/lite/docs/source_zh_cn/images/image_classification_result.png differ diff --git a/lite/docs/source_zh_cn/images/object_detection.png b/lite/docs/source_zh_cn/images/object_detection.png new file mode 100644 index 0000000000000000000000000000000000000000..ad5425c86393a9367701166796df42c9e4702988 Binary files /dev/null and b/lite/docs/source_zh_cn/images/object_detection.png differ diff --git a/lite/docs/source_zh_cn/index.rst b/lite/docs/source_zh_cn/index.rst index 20ecdbb72c0fe01cbc24c674bda6944504c792ff..53e1d51b2881d493dfd7f14db81fda7ab84d930e 100644 --- a/lite/docs/source_zh_cn/index.rst +++ b/lite/docs/source_zh_cn/index.rst @@ -1,4 +1,4 @@ -.. MindSpore documentation master file, created by +.. MindSpore documentation master file, created by sphinx-quickstart on Thu Aug 17 10:00:00 2020. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. @@ -12,5 +12,7 @@ MindSpore端侧文档 architecture apicc/apicc + image_classification + object_detection operator_list glossary diff --git a/lite/docs/source_zh_cn/object_detection.md b/lite/docs/source_zh_cn/object_detection.md new file mode 100644 index 0000000000000000000000000000000000000000..70fd2ac5ea87952d8bdfaf09ed75d1d6bede876a --- /dev/null +++ b/lite/docs/source_zh_cn/object_detection.md @@ -0,0 +1,26 @@ +# 对象检测 + + + +## 对象检测介绍 + +对象检测可以识别出图片中的对象和该对象在图片中的位置。 如:对下图使用对象检测模型的输出如下表所示,使用矩形框识别图中对象的位置并且标注出对象类别的概率,其中坐标中的4个数字分别为Xmin,Ymin,,Xmax,,Ymax;概率表示反应被检测物理的可信程度。 + +![image_classification](images/object_detection.png) + +| 类别 | 概率 | 坐标 | +| ----- | ---- | ---------------- | +| mouse | 0.78 | [10, 25, 35, 43] | + +使用MindSpore Lite实现对象检测的[示例代码](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/lite/object_detection)。 + +## 对象检测模型列表 + +下表是使用MindSpore Lite推理的部分对象检测模型的数据。 + +> 下表的性能是在mate30手机上测试的。 + +| 模型名称 | 大小 | mAP(IoU=0.50:0.95) | CPU 4线程时延(ms) | +|-----------------------| :----------: | :----------: | :-----------: | +| [MobileNetv2-SSD](https://download.mindspore.cn/model_zoo/official/lite/ssd_mobilenetv2_lite/ssd.ms) | 16.7 | 0.22 | 25.4 | + diff --git a/lite/docs/source_zh_cn/operator_list.md b/lite/docs/source_zh_cn/operator_list.md index 3384d8baf91b1af92ff4758816790af7b6e241bc..7558d93078cfc81db8e3de8ea82457ceb5face25 100644 --- a/lite/docs/source_zh_cn/operator_list.md +++ b/lite/docs/source_zh_cn/operator_list.md @@ -5,107 +5,111 @@ > √勾选的项为MindSpore Lite所支持的算子。 | 操作名 | CPU
FP16 | CPU
FP32 | CPU
Int8 | CPU
UInt8 | GPU
FP16 | GPU
FP32 | 支持的Tensorflow
Lite op | 支持的Caffe
Lite op | 支持的Onnx
Lite op | -|-----------------------|----------|----------|----------|-----------|----------|-------------------|----------|----------|---------| -| Abs | | √ | √ | √ | | | Abs | | Abs | -| Add | √ | √ | √ | √ | | √ | Add | | Add | -| AddN | | √ | | | | | AddN | | | -| Argmax | | √ | √ | √ | | | Argmax | ArgMax | ArgMax | -| Argmin | | √ | √ | √ | | | Argmin | | | -| AvgPool | √ | √ | √ | √ | | √ | MeanPooling| Pooling | AveragePool | -| BatchNorm | √ | √ | √ | √ | | √ | | BatchNorm | BatchNormalization | -| BatchToSpace | | √ | √ | √ | | | BatchToSpace, BatchToSpaceND | | | -| BiasAdd | | √ | √ | √ | | √ | | | BiasAdd | +|-----------------------|----------|----------|----------|-----------|----------|----------|------------|----------------|--------------------| +| Abs | | √ | √ | √ | √ | √ | Abs | | Abs | +| Add | √ | √ | √ | √ | √ | √ | Add | | Add | +| AddN | | √ | | | | | AddN | | | +| Argmax | | √ | √ | √ | | | Argmax | ArgMax | ArgMax | +| Argmin | | √ | √ | √ | | | Argmin | | | +| AvgPool | √ | √ | √ | √ | √ | √ | MeanPooling| Pooling | AveragePool | +| BatchNorm | √ | √ | √ | √ | √ | √ | | BatchNorm | BatchNormalization | +| BatchToSpace | | √ | √ | √ | | | BatchToSpace | | | +| BatchToSpaceND | | √ | √ | | | | BatchToSpaceND | | | +| BiasAdd | | √ | √ | √ | √ | √ | | | BiasAdd | | Broadcast | | √ | | | | | BroadcastTo | | Expand | -| Cast | √ | √ | | √ | | | Cast, DEQUANTIZE* | | Cast | -| Ceil | | √ | √ | √ | | | Ceil | | Ceil | +| Cast | √ | √ | √ | √ | √ | √ | Cast, QUANTIZE, DEQUANTIZE | | Cast | +| Ceil | | √ | √ | √ | √ | √ | Ceil | | Ceil | | Concat | √ | √ | √ | √ | √ | √ | Concat | Concat | Concat | | Conv2d | √ | √ | √ | √ | √ | √ | Conv2D | Convolution | Conv | | Conv2dTranspose | √ | √ | √ | √ | √ | √ | DeConv2D | Deconvolution | ConvTranspose | -| Cos | | √ | √ | √ | | | Cos | | Cos | +| Cos | | √ | √ | √ | √ | √ | Cos | | Cos | | Crop | | √ | √ | √ | | | | Crop | | | DeDepthwiseConv2D | | √ | √ | √ | | | | Deconvolution| ConvTranspose | | DepthToSpace | | √ | √ | √ | | | DepthToSpace| | DepthToSpace | | DepthwiseConv2dNative | √ | √ | √ | √ | √ | √ | DepthwiseConv2D | Convolution | Convolution | -| Div | √ | √ | √ | √ | | √ | Div, RealDiv | | Div | +| DetectionPostProcess | | √ | | | | | DetectionPostProcess | | | +| Div | √ | √ | √ | √ | √ | √ | Div, RealDiv | | Div | | Eltwise | √ | √ | | | | | | Eltwise | | | Elu | | √ | | | | | Elu | | Elu | | Equal | √ | √ | √ | √ | | | Equal | | Equal | -| Exp | | √ | | | | | Exp | | Exp | -| ExpandDims | | √ | | | | | | | | +| Exp | | √ | | | √ | √ | Exp | Exp | Exp | +| ExpandDims | | √ | | | | | ExpandDims | | | | Fill | | √ | | | | | Fill | | | | Flatten | | √ | | | | | | Flatten | | -| Floor | | √ | √ | √ | | | flOOR | | Floor | +| Floor | | √ | √ | √ | √ | √ | flOOR | | Floor | | FloorDiv | √ | √ | | | | | FloorDiv | | | | FloorMod | √ | √ | | | | | FloorMod | | | -| FullConnection | | √ | √ | √ | | | FullyConnected | InnerProduct | | +| FullConnection | √ | √ | √ | √ | √ | √ | FullyConnected | InnerProduct | | | GatherNd | | √ | √ | √ | | | GatherND | | | | GatherV2 | | √ | √ | √ | | | Gather | | Gather | | Greater | √ | √ | √ | √ | | | Greater | | Greater | | GreaterEqual | √ | √ | √ | √ | | | GreaterEqual| | | | Hswish | √ | √ | √ | √ | | | HardSwish | | | -| LeakyReLU | √ | √ | | | | √ | LeakyRelu | | LeakyRelu | +| L2Norm | | √ | | | | | L2_NORMALIZATION | | | +| LeakyReLU | √ | √ | | | √ | √ | LeakyRelu | | LeakyRelu | | Less | √ | √ | √ | √ | | | Less | | Less | | LessEqual | √ | √ | √ | √ | | | LessEqual | | | -| LRN | | √ | | | | | LocalResponseNorm | | Lrn | -| Log | | √ | √ | √ | | | Log | | Log | +| LRN | | √ | | | | | LocalResponseNorm | | Lrn, LRN | +| Log | | √ | √ | √ | √ | √ | Log | | Log | | LogicalAnd | √ | √ | | | | | LogicalAnd | | | -| LogicalNot | | √ | √ | √ | | | LogicalNot | | | +| LogicalNot | | √ | √ | √ | √ | √ | LogicalNot | | | | LogicalOr | √ | √ | | | | | LogicalOr | | | | LSTM | | √ | | | | | | | | | MatMul | | √ | √ | √ | √ | √ | | | MatMul | | Maximum | √ | √ | | | | | Maximum | | Max | -| MaxPool | √ | √ | √ | √ | | √ | MaxPooling | Pooling | MaxPool | +| MaxPool | √ | √ | √ | √ | √ | √ | MaxPooling | Pooling | MaxPool | | Minimum | √ | √ | | | | | Minimum | | Min | -| Mul | √ | √ | √ | √ | | √ | Mul | | Mul | +| Mul | √ | √ | √ | √ | √ | √ | Mul | | Mul | +| Neg | | √ | | | | | Neg | | Neg | | NotEqual | √ | √ | √ | √ | | | NotEqual | | | | OneHot | | √ | | | | | OneHot | | | -| Pad | | √ | √ | √ | | | Pad | | Pad | -| Pow | | √ | √ | √ | | | Pow | Power | Power | -| PReLU | | √ | | | | √ | | PReLU | | +| Pad | √ | √ | √ | √ | | | Pad, MirrorPad | | Pad | +| Pow | | √ | √ | √ | | | Pow | Power | Power | +| PReLU | | √ | | | √ | √ | | PReLU | | | Range | | √ | | | | | Range | | | | Rank | | √ | | | | | Rank | | | +| ReduceASum | | √ | | | | | | Reduction | | | ReduceMax | √ | √ | √ | √ | | | ReduceMax | | ReduceMax | -| ReduceMean | √ | √ | √ | √ | | | Mean | | ReduceMean | +| ReduceMean | √ | √ | √ | √ | | | Mean | Reduction | ReduceMean | | ReduceMin | √ | √ | √ | √ | | | ReduceMin | | ReduceMin | | ReduceProd | √ | √ | √ | √ | | | ReduceProd | | | -| ReduceSum | √ | √ | √ | √ | | | Sum | | ReduceSum | -| ReduceSumSquare | √ | √ | √ | √ | | | | | | -| ReLU | √ | √ | √ | √ | | √ | Relu | ReLU | Relu | -| ReLU6 | √ | √ | √ | √ | | √ | Relu6 | ReLU6 | Clip* | -| Reshape | √ | √ | √ | √ | | √ | Reshape | Reshape | Reshape,Flatten | -| Resize | | √ | √ | √ | | | ResizeBilinear, NearestNeighbor | Interp | | +| ReduceSum | √ | √ | √ | √ | | | Sum | Reduction | ReduceSum | +| ReduceSumSquare | √ | √ | √ | √ | | | | Reduction | | +| ReLU | √ | √ | √ | √ | √ | √ | Relu | ReLU | Relu | +| ReLU6 | √ | √ | √ | √ | √ | √ | Relu6 | ReLU6 | Clip* | +| Reshape | √ | √ | √ | √ | √ | √ | Reshape | Reshape | Reshape,Flatten | +| Resize | | √ | √ | √ | | | ResizeBilinear, NearestNeighbor | Interp | | | Reverse | | √ | | | | | reverse | | | | ReverseSequence | | √ | | | | | ReverseSequence | | | -| Round | | √ | √ | √ | | | Round | | | -| Rsqrt | | √ | √ | √ | | | Rsqrt | | | -| Scale | | √ | | | | | | Scale | | +| Round | | √ | √ | √ | √ | √ | Round | | | +| Rsqrt | | √ | √ | √ | √ | √ | Rsqrt | | | +| Scale | | √ | | | √ | √ | | Scale | | | ScatterNd | | √ | | | | | ScatterNd | | | -| Shape | | √ | | | | | Shape | | Shape | -| Sigmoid | √ | √ | √ | √ | | √ | Logistic | Sigmoid | Sigmoid | -| Sin | | √ | √ | √ | | | Sin | | Sin | -| Slice | | √ | √ | √ | √ | √ | Slice | | Slice | -| Softmax | √ | √ | √ | √ | | √ | Softmax | Softmax | Softmax | -| SpaceToBatch | | √ | | | | | | | | -| SpaceToBatchND | | √ | | | | | SpaceToBatchND | | | +| Shape | | √ | | | | | Shape | | Shape | +| Sigmoid | √ | √ | √ | √ | √ | √ | Logistic | Sigmoid | Sigmoid | +| Sin | | √ | √ | √ | √ | √ | Sin | | Sin | +| Slice | | √ | √ | √ | √ | √ | Slice | Slice | Slice | +| Softmax | √ | √ | √ | √ | √ | √ | Softmax | Softmax | Softmax | +| SpaceToBatch | | √ | √ | | | | SpaceToBatch | | | +| SpaceToBatchND | | √ | √ | | | | SpaceToBatchND | | | | SpaceToDepth | | √ | | | | | SpaceToDepth | | SpaceToDepth | | SparseToDense | | √ | | | | | SpareToDense | | | | Split | √ | √ | √ | √ | | | Split, SplitV | | | -| Sqrt | | √ | √ | √ | | | Sqrt | | Sqrt | -| Square | | √ | √ | √ | | | Square | | | -| SquaredDifference | | √ | | | | | SquaredDifference | | | +| Sqrt | | √ | √ | √ | √ | √ | Sqrt | | Sqrt | +| Square | | √ | √ | √ | √ | √ | Square | | | +| SquaredDifference | | √ | | | | | SquaredDifference | | | | Squeeze | | √ | √ | √ | | | Squeeze | | Squeeze | | StridedSlice | | √ | √ | √ | | | StridedSlice| | | | Stack | | √ | | | | | Stack | | | -| Sub | √ | √ | √ | √ | | √ | Sub | | Sub | -| Tanh | √ | √ | | | | | Tanh | TanH | | -| Tile | | √ | | | | | Tile | | Tile | +| Sub | √ | √ | √ | √ | √ | √ | Sub | | Sub | +| Tanh | √ | √ | | | √ | √ | Tanh | TanH | | +| Tile | | √ | | | | | Tile | Tile | Tile | | TopK | | √ | √ | √ | | | TopKV2 | | | -| Transpose | √ | √ | | | | √ | Transpose | Permute | Transpose | +| Transpose | √ | √ | | | √ | √ | Transpose | Permute | Transpose | | Unique | | √ | | | | | Unique | | | | Unsqueeze | | √ | √ | √ | | | | | Unsqueeze | | Unstack | | √ | | | | | Unstack | | | | Where | | √ | | | | | Where | | | -| ZerosLike | | √ | | | | | ZerosLike | | | +| ZerosLike | | √ | | | | | ZerosLike | | | * Clip: 仅支持将clip(0, 6)转换为Relu6. -* DEQUANTIZE: 仅支持将fp16转换为fp32. diff --git a/lite/tutorials/source_en/_static/logo_source.png b/lite/tutorials/source_en/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/lite/tutorials/source_en/_static/logo_source.png and b/lite/tutorials/source_en/_static/logo_source.png differ diff --git a/lite/tutorials/source_en/build.md b/lite/tutorials/source_en/build.md index ef1282a257493900b1c43c9371d083058f2e04de..1a996d30b22c9f7f4d95ef9b259853a9d078eb34 100644 --- a/lite/tutorials/source_en/build.md +++ b/lite/tutorials/source_en/build.md @@ -10,11 +10,7 @@ - [Output Description](#output-description) - [Description of Converter's Directory Structure](#description-of-converters-directory-structure) - [Description of Runtime and Other tools' Directory Structure](#description-of-runtime-and-other-tools-directory-structure) - - [Windows Environment Compilation](#windows-environment-compilation) - - [Environment Requirements](#environment-requirements-1) - - [Compilation Options](#compilation-options-1) - - [Compilation Example](#compilation-example-1) - - [Output Description](#output-description-1) + - [Description of Imageprocess's Directory Structure](#description-of-imageprocesss-directory-structure) @@ -24,10 +20,11 @@ This chapter introduces how to quickly compile MindSpore Lite, which includes th | Module | Support Platform | Description | | --- | ---- | ---- | -| converter | Linux、Windows | Model Conversion Tool | +| converter | Linux | Model Conversion Tool | | runtime | Linux、Android | Model Inference Framework | | benchmark | Linux、Android | Benchmarking Tool | -| time_profiler | Linux、Android | Performance Analysis Tool | +| timeprofiler | Linux、Android | Performance Analysis Tool | +| imageprocess | Linux、Android | Image Processing Library | ## Linux Environment Compilation @@ -35,7 +32,7 @@ This chapter introduces how to quickly compile MindSpore Lite, which includes th - The compilation environment supports Linux x86_64 only. Ubuntu 18.04.02 LTS is recommended. -- Compilation dependencies of runtime、benchmark and time_profiler: +- Compilation dependencies of runtime、benchmark and timeprofiler: - [CMake](https://cmake.org/download/) >= 3.14.1 - [GCC](https://gcc.gnu.org/releases.html) >= 7.3.0 - [Android_NDK r20b](https://dl.google.com/android/repository/android-ndk-r20b-linux-x86_64.zip) @@ -53,6 +50,7 @@ This chapter introduces how to quickly compile MindSpore Lite, which includes th - [Libevent](https://libevent.org) >= 2.0 - [M4](https://www.gnu.org/software/m4/m4.html) >= 1.4.18 - [OpenSSL](https://www.openssl.org/) >= 1.1.1 + - [Python](https://www.python.org/) >= 3.7.5 > - To install and use `Android_NDK`, you need to configure environment variables. The command example is `export ANDROID_NDK={$NDK_PATH}/android-ndk-r20b`. > - In the `build.sh` script, run the `git clone` command to obtain the code in the third-party dependency library. Ensure that the network settings of Git are correct. @@ -69,6 +67,7 @@ MindSpore Lite provides a compilation script `build.sh` for one-click compilatio | -j[n] | Sets the number of threads used during compilation. Otherwise, the number of threads is set to 8 by default. | Integer | No | | -e | In the Arm architecture, select the backend operator and set the `gpu` parameter. The built-in GPU operator of the framework is compiled at the same time. | GPU | No | | -h | Displays the compilation help information. | None | No | +| -n | Specifies to compile the lightweight image processing module. | lite_cv | No | > When the `-I` parameter changes, such as `-I x86_64` is converted to `-I arm64`, adding `-i` for parameter compilation does not take effect. @@ -102,11 +101,17 @@ Then, run the following commands in the root directory of the source code to com bash build.sh -I arm64 -e gpu ``` +- Compile ARM64 with image preprocessing module: + ```bash + bash build.sh -I arm64 -n lite_cv + ``` + ### Output Description -After the compilation is complete, go to the `mindspore/output` directory of the source code to view the file generated after compilation. The file is divided into two parts. +After the compilation is complete, go to the `mindspore/output` directory of the source code to view the file generated after compilation. The file is divided into three parts. - `mindspore-lite-{version}-converter-{os}.tar.gz`:Contains model conversion tool. - `mindspore-lite-{version}-runtime-{os}-{device}.tar.gz`:Contains model inference framework、benchmarking tool and performance analysis tool. +- `mindspore-lite-{version}-minddata-{os}-{device}.tar.gz`:Contains image processing library ImageProcess. > version: version of the output, consistent with that of the MindSpore. > @@ -119,6 +124,7 @@ Execute the decompression command to obtain the compiled output: ```bash tar -xvf mindspore-lite-{version}-converter-{os}.tar.gz tar -xvf mindspore-lite-{version}-runtime-{os}-{device}.tar.gz +tar -xvf mindspore-lite-{version}-minddata-{os}-{device}.tar.gz ``` #### Description of Converter's Directory Structure @@ -147,7 +153,7 @@ The inference framework can be obtained under `-I x86_64`, `-I arm64` and `-I ar │ └── third_party # Header files and libraries of third party libraries │ ├── flatbuffers # Header files of FlatBuffers │ └── include # Header files of inference framework - │ └── time_profile # Model network layer time-consuming analysis tool + │ └── time_profiler # Model network layer time-consuming analysis tool ``` @@ -158,74 +164,45 @@ The inference framework can be obtained under `-I x86_64`, `-I arm64` and `-I ar │ └── benchmark # Benchmarking Tool │ └── lib # Inference framework dynamic library │ ├── libmindspore-lite.so # Dynamic library of infernece framework in MindSpore Lite - │ ├── liboptimize.so # Operator performance optimization library in MindSpore Lite + │ ├── libmindspore-lite-fp16.so # Operator performance optimization library support float16 in MindSpore Lite + │ ├── libmindspore-lite-optimize.so # Operator performance optimization library support dotprod instruction in MindSpore Lite │ └── third_party # Header files and libraries of third party libraries │ ├── flatbuffers # Header files of FlatBuffers │ └── include # Header files of inference framework - │ └── time_profile # Model network layer time-consuming analysis tool + │ └── time_profiler # Model network layer time-consuming analysis tool ``` - When the compilation option is `-I arm32`: ``` | - ├── mindspore-lite-{version}-runtime-arm64-cpu + ├── mindspore-lite-{version}-runtime-arm32-cpu │ └── benchmark # Benchmarking Tool │ └── lib # Inference framework dynamic library │ ├── libmindspore-lite.so # Dynamic library of infernece framework in MindSpore Lite │ └── third_party # Header files and libraries of third party libraries │ ├── flatbuffers # Header files of FlatBuffers │ └── include # Header files of inference framework - │ └── time_profile # Model network layer time-consuming analysis tool + │ └── time_profiler # Model network layer time-consuming analysis tool ``` -> 1. `liboptimize.so` only exists in the output package of runtime-arm64 and is only used on ARMv8.2 and CPUs that support fp16. -> 2. Compile ARM64 to get the inference framework output of arm64-cpu by default, if you add `-e gpu`, you will get the inference framework output of arm64-gpu, and the package name is `mindspore-lite-{version}-runtime-arm64-gpu.tar.gz`, compiling ARM32 is in the same way. -> 3. Before running the tools in the converter, benchmark or time_profile directory, you need to configure environment variables, and configure the path where the dynamic libraries of MindSpore Lite and Protobuf are located to the path where the system searches for dynamic libraries. Take the compiled under version 0.7.0-beta as an example: configure converter: `export LD_LIBRARY_PATH=./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/protobuf/lib:./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/flatbuffers/lib:${LD_LIBRARY_PATH}`; configure benchmark and timeprofiler: `export LD_LIBRARY_PATH= ./output/mindspore-lite-0.7.0-runtime-x86-cpu/lib:${LD_LIBRARY_PATH}`. - -## Windows Environment Compilation - -### Environment Requirements - -- The supported compilation environment is: Windows 10, 64-bit. - -- Compilation dependencies are: - - [CMake](https://cmake.org/download/) >= 3.14.1 - - [MinGW GCC](https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/7.3.0/threads-posix/seh/x86_64-7.3.0-release-posix-seh-rt_v5-rev0.7z/download) = 7.3.0 - - [Python](https://www.python.org/) >= 3.7.5 - -> The compilation script will execute `git clone` to obtain the code of the third-party dependent libraries. Please make sure that the git network settings are correct and available in advance. - -### Compilation Options - -The compilation options of MindSpore Lite are as follows: +> 1. `libmindspore-lite-optimize.so` only exists in the output package of runtime-arm64 and is only used on ARMv8.2 and CPUs that support dotprod instruction. +> 2. `libmindspore-lite-fp16.so` only exists in the output package of runtime-arm64 and is only used on ARMv8.2 and CPUs that support fp16. +> 3. Compile ARM64 to get the inference framework output of arm64-cpu by default, if you add `-e gpu`, you will get the inference framework output of arm64-gpu, and the package name is `mindspore-lite-{version}-runtime-arm64-gpu.tar.gz`, compiling ARM32 is in the same way. +> 4. Before running the tools in the converter, benchmark or time_profiler directory, you need to configure environment variables, and configure the path where the dynamic libraries of MindSpore Lite and Protobuf are located to the path where the system searches for dynamic libraries. Take the compiled under version 0.7.0-beta as an example: configure converter: `export LD_LIBRARY_PATH=./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/protobuf/lib:./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/flatbuffers/lib:${LD_LIBRARY_PATH}`; configure benchmark and timeprofiler: `export LD_LIBRARY_PATH= ./output/mindspore-lite-0.7.0-runtime-x86-cpu/lib:${LD_LIBRARY_PATH}`. -| Parameter | Parameter Description | Mandatory or Not | -| -------- | ----- | ---- | -| **lite** | **Set this parameter to compile the Mindspore Lite project.** | **Yes** | -| [n] | Set the number of threads used during compilation, otherwise the default is set to 6 threads. | No | +#### Description of Imageprocess's Directory Structure -### Compilation Example +The image processing library is only available under the `-I arm64 -n lite_cv` compilation option, and the content includes the following parts: -First, use the git tool to download the source code from the MindSpore code repository. -```bash -git clone https://gitee.com/mindspore/mindspore.git ``` - -Then, use the cmd tool to compile MindSpore Lite in the root directory of the source code and execute the following commands. - -- Compile the Windows version with the default number of threads (6 threads). - ```bash - call build.bat lite - ``` -- Compile the Windows version with the specified number of threads 8. - ```bash - call build.bat lite 8 - ``` - -### Output Description - -After the compilation is complete, enter the `mindspore/output/` directory, unzip the output file `mindspore-lite-{version}-converter-win-cpu.zip`, which contains the conversion tool executable file. - -> version: version of the output, consistent with that of the MindSpore. +| +├── mindspore-lite-{version}-minddata-{os}-{device} +│ └── include # Head file +│ ├── lite_cv # Image processing library header file +│ └── lib # Dynamic library +│ ├── libminddata-lite.so # Image processing dynamic library +│ └── third_party # Third-party Iibrary header files and libraries +│ ├── flatbuffers # Header files of FlatBuffers +``` diff --git a/lite/tutorials/source_en/index.rst b/lite/tutorials/source_en/index.rst index 26e9445ec1baace48e64a3418dd12fe1a1ec36a3..8a371e46b1c91f9bd423db181d9577fb251b5aa9 100644 --- a/lite/tutorials/source_en/index.rst +++ b/lite/tutorials/source_en/index.rst @@ -21,4 +21,5 @@ MindSpore Lite Tutorials build use/converter_tool use/evaluating_the_model + use/image_processing use/runtime diff --git a/lite/tutorials/source_en/quick_start/quick_start.md b/lite/tutorials/source_en/quick_start/quick_start.md index b0712f03d6a6b713fa0b63160f5be2714a3fc8a2..34d8d9aa7fedafd2b5bc3e581464cff1a7f66e87 100644 --- a/lite/tutorials/source_en/quick_start/quick_start.md +++ b/lite/tutorials/source_en/quick_start/quick_start.md @@ -43,7 +43,7 @@ After you retrain a model provided by MindSpore, export the model in the [.mindi Take the mobilenetv2 model as an example. Execute the following script to convert a model into a MindSpore Lite model for on-device inference. ```bash -./converter_lite --fmk=MS --modelFile=mobilenetv2.mindir --outputFile=mobilenetv2.ms +./converter_lite --fmk=MINDIR --modelFile=mobilenetv2.mindir --outputFile=mobilenetv2.ms ``` ## Deploying an Application @@ -54,9 +54,9 @@ The following section describes how to build and execute an on-device image clas - Android Studio 3.2 or later (Android 4.0 or later is recommended.) - Native development kit (NDK) 21.3 -- CMake 3.10.2 +- [CMake](https://cmake.org/download) 3.10.2 - Android software development kit (SDK) 26 or later -- OpenCV 4.0.0 or later (included in the sample code) +- [JDK]( https://www.oracle.com/downloads/otn-pub/java/JDK/) 1.8 or later ### Building and Running @@ -68,7 +68,7 @@ The following section describes how to build and execute an on-device image clas ![start_sdk](../images/lite_quick_start_sdk.png) - (Optional) If an NDK version issue occurs during the installation, manually download the corresponding [NDK version](https://developer.android.com/ndk/downloads) (the version used in the sample code is 21.3). Specify the SDK location in `Android NDK location` of `Project Structure`. + (Optional) If an NDK version issue occurs during the installation, manually download the corresponding [NDK version](https://developer.android.com/ndk/downloads) (the version used in the sample code is 21.3). Specify the NDK location in `Android NDK location` of `Project Structure`. ![project_structure](../images/lite_quick_start_project_structure.png) @@ -80,6 +80,8 @@ The following section describes how to build and execute an on-device image clas For details about how to connect the Android Studio to a device for debugging, see . + The mobile phone needs to be turn on "USB debugging mode" before Android Studio can recognize the mobile phone. Huawei mobile phones generally turn on "USB debugging model" in Settings > system and update > developer Options > USB debugging. + 3. Continue the installation on the Android device. After the installation is complete, you can view the content captured by a camera and the inference result. ![result](../images/lite_quick_start_app_result.png) @@ -95,31 +97,22 @@ This image classification sample program on the Android device includes a Java l ``` app -| -├── libs # library files that store MindSpore Lite dependencies -│ └── arm64-v8a -│ ├── libopencv_java4.so -│ └── libmindspore-lite.so │ -├── opencv # dependency files related to OpenCV -│ └── ... -| ├── src/main │ ├── assets # resource files -| | └── model.ms # model file +| | └── mobilenetv2.ms # model file │ | │ ├── cpp # main logic encapsulation classes for model loading and prediction -| | ├── include # header files related to MindSpore calling -| | | └── ... -│ | | +| | |── ... +| | ├── mindspore_lite_x.x.x-minddata-arm64-cpu` #MindSpore Lite version | | ├── MindSporeNetnative.cpp # JNI methods related to MindSpore calling │ | └── MindSporeNetnative.h # header file │ | │ ├── java # application code at the Java layer -│ │ └── com.huawei.himindsporedemo +│ │ └── com.mindspore.himindsporedemo │ │ ├── gallery.classify # implementation related to image processing and MindSpore JNI calling │ │ │ └── ... -│ │ └── obejctdetect # implementation related to camera enabling and drawing +│ │ └── widget # implementation related to camera enabling and drawing │ │ └── ... │ │ │ ├── res # resource files related to Android @@ -128,6 +121,7 @@ app ├── CMakeList.txt # CMake compilation entry file │ ├── build.gradle # Other Android configuration file +├── download.gradle # MindSpore version download └── ... ``` @@ -156,42 +150,40 @@ android{ Create a link to the `.so` library file in the `app/CMakeLists.txt` file: ``` -# Set MindSpore Lite Dependencies. -include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore) +# ============== Set MindSpore Dependencies. ============= +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/third_party/flatbuffers/include) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/include) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/include/ir/dtype) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/include/schema) + add_library(mindspore-lite SHARED IMPORTED ) -set_target_properties(mindspore-lite PROPERTIES - IMPORTED_LOCATION "${CMAKE_SOURCE_DIR}/libs/libmindspore-lite.so") +add_library(minddata-lite SHARED IMPORTED ) -# Set OpenCV Dependecies. -include_directories(${CMAKE_SOURCE_DIR}/opencv/sdk/native/jni/include) -add_library(lib-opencv SHARED IMPORTED ) -set_target_properties(lib-opencv PROPERTIES - IMPORTED_LOCATION "${CMAKE_SOURCE_DIR}/libs/libopencv_java4.so") +set_target_properties(mindspore-lite PROPERTIES IMPORTED_LOCATION + ${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/lib/libmindspore-lite.so) +set_target_properties(minddata-lite PROPERTIES IMPORTED_LOCATION + ${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/lib/libminddata-lite.so) +# --------------- MindSpore Lite set End. -------------------- # Link target library. target_link_libraries( ... - mindspore-lite - lib-opencv + # --- mindspore --- + minddata-lite + mindspore-lite ... ) ``` -In this example, the download.gradle File configuration auto download ` libmindspot-lite.so `and `libopencv_ Java4.so` library file, placed in the 'app / libs / arm64-v8a' directory. +In this example, the download.gradle File configuration auto download MindSpore Lite version, placed in the `app/src/main/cpp/mindspore_lite_x.x.x-minddata-arm64-cpu` directory. Note: if the automatic download fails, please manually download the relevant library files and put them in the corresponding location. -libmindspore-lite.so [libmindspore-lite.so]( https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) - -libmindspore-lite include [libmindspore-lite include]( https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/include.zip) - -libopencv_java4.so [libopencv_java4.so](https://download.mindspore.cn/model_zoo/official/lite/lib/opencv%204.4.0/libopencv_java4.so) - -libopencv include [libopencv include]( https://download.mindspore.cn/model_zoo/official/lite/lib/opencv%204.4.0/include.zip) - - +MindSpore Lite version [MindSpore Lite version](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz) ### Downloading and Deploying a Model File @@ -201,8 +193,6 @@ Note: if the automatic download fails, please manually download the relevant lib mobilenetv2.ms [mobilenetv2.ms]( https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ms) - - ### Compiling On-Device Inference Code Call MindSpore Lite C++ APIs at the JNI layer to implement on-device inference. @@ -225,10 +215,8 @@ The inference code process is as follows. For details about the complete code, s *labelEnv = labelNet; // Create context. - lite::Context *context = new lite::Context; - - context->device_ctx_.type = lite::DT_CPU; - context->thread_num_ = numThread; //Specify the number of threads to run inference + mindspore::lite::Context *context = new mindspore::lite::Context; + context->thread_num_ = num_thread; // Create the mindspore session. labelNet->CreateSessionMS(modelBuffer, bufferLen, "device label", context); @@ -253,7 +241,7 @@ The inference code process is as follows. For details about the complete code, s ```cpp // Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing - BitmapToMat(env, srcBitmap, matImageSrc); + BitmapToMat(env, srcBitmap, matImageSrc); // Processing such as zooming the picture size. matImgPreprocessed = PreProcessImageData(matImageSrc); @@ -278,7 +266,38 @@ The inference code process is as follows. For details about the complete code, s delete[] (dataHWC); ``` -3. Perform inference on the input tensor based on the model, obtain the output tensor, and perform post-processing. +3. Pretreat the input data. + + ```cpp + bool PreProcessImageData(const LiteMat &lite_mat_bgr, LiteMat *lite_norm_mat_ptr) { + bool ret = false; + LiteMat lite_mat_resize; + LiteMat &lite_norm_mat_cut = *lite_norm_mat_ptr; + ret = ResizeBilinear(lite_mat_bgr, lite_mat_resize, 256, 256); + if (!ret) { + MS_PRINT("ResizeBilinear error"); + return false; + } + LiteMat lite_mat_convert_float; + ret = ConvertTo(lite_mat_resize, lite_mat_convert_float, 1.0 / 255.0); + if (!ret) { + MS_PRINT("ConvertTo error"); + return false; + } + LiteMat lite_mat_cut; + ret = Crop(lite_mat_convert_float, lite_mat_cut, 16, 16, 224, 224); + if (!ret) { + MS_PRINT("Crop error"); + return false; + } + float means[3] = {0.485, 0.456, 0.406}; + float vars[3] = {1.0 / 0.229, 1.0 / 0.224, 1.0 / 0.225}; + SubStractMeanNormalize(lite_mat_cut, lite_norm_mat_cut, means, vars); + return true; + } + ``` + +4. Perform inference on the input tensor based on the model, obtain the output tensor, and perform post-processing. - Perform graph execution and on-device inference. @@ -289,7 +308,12 @@ The inference code process is as follows. For details about the complete code, s - Obtain the output data. ```cpp - auto msOutputs = mSession->GetOutputs(); + auto names = mSession->GetOutputTensorNames(); + std::unordered_map msOutputs; + for (const auto &name : names) { + auto temp_dat =mSession->GetOutputByTensorName(name); + msOutputs.insert(std::pair {name, temp_dat}); + } std::string retStr = ProcessRunnetResult(msOutputs, ret); ``` @@ -298,39 +322,34 @@ The inference code process is as follows. For details about the complete code, s std::string ProcessRunnetResult(std::unordered_map msOutputs, int runnetRet) { - // Get model output results. - std::unordered_map::iterator iter; - iter = msOutputs.begin(); - auto brach1_string = iter->first; - auto branch1_tensor = iter->second; + std::unordered_map::iterator iter; + iter = msOutputs.begin(); - int OUTPUTS_LEN = branch1_tensor->ElementsNum(); + // The mobilenetv2.ms model output just one branch. + auto outputTensor = iter->second; + int tensorNum = outputTensor->ElementsNum(); - float *temp_scores = static_cast(branch1_tensor->MutableData()); + // Get a pointer to the first score. + float *temp_scores = static_cast(outputTensor->MutableData()); - float scores[RET_CATEGORY_SUM]; - for (int i = 0; i < RET_CATEGORY_SUM; ++i) { - scores[i] = temp_scores[i]; + float scores[RET_CATEGORY_SUM]; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + if (temp_scores[i] > 0.5) { + MS_PRINT("MindSpore scores[%d] : [%f]", i, temp_scores[i]); } + scores[i] = temp_scores[i]; + } - // Converted to text information that needs to be displayed in the APP. - std::string retStr = ""; - if (runnetRet == 0) { - for (int i = 0; i < RET_CATEGORY_SUM; ++i) { - if (scores[i] > 0.3){ - retStr += g_labels_name_map[i]; - retStr += ":"; - std::string score_str = std::to_string(scores[i]); - retStr += score_str; - retStr += ";"; - } - } - else { - MS_PRINT("MindSpore run net failed!"); - for (int i = 0; i < RET_CATEGORY_SUM; ++i) { - retStr += " :0.0;"; - } - } - return retStr; + // Score for each category. + // Converted to text information that needs to be displayed in the APP. + std::string categoryScore = ""; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + categoryScore += labels_name_map[i]; + categoryScore += ":"; + std::string score_str = std::to_string(scores[i]); + categoryScore += score_str; + categoryScore += ";"; + } + return categoryScore; } ``` \ No newline at end of file diff --git a/lite/tutorials/source_en/use/benchmark_tool.md b/lite/tutorials/source_en/use/benchmark_tool.md index d6b3a09ae8554a462fd9a464120ee8cfc1f228f1..0dff3079895ef9bf83ff99a6e2d96f0b7c16e837 100644 --- a/lite/tutorials/source_en/use/benchmark_tool.md +++ b/lite/tutorials/source_en/use/benchmark_tool.md @@ -64,23 +64,16 @@ Mean bias of all nodes: 0% ``` -When the origin model's input or output data type is uint8, they needs to be reduced by 128 and converted to int8 type before it can be used as benchmark data to verify accuracy. And when the output data type is INT8, you need to specify calibDataType as INT8 in the parameter. - -```bash -./benchmark --modelPath=./models/test_benchmark_int8.ms --inDataPath=./input/test_benchmark_int8.bin --device=CPU --accuracyThreshold=3 --calibDataPath=./output/test_benchmark_int8.out --calibDataType=INT8 -``` - ## Parameter Description The command used for benchmark testing based on the compiled Benchmark tool is as follows: ```bash ./benchmark [--modelPath=] [--accuracyThreshold=] - [--calibDataPath=] [--cpuBindMode=] - [--device=] [--help] [--inDataPath=] - [--inDataType=] [--loopCount=] - [--numThreads=] [--omModelPath=] - [--resizeDims=] [--warmUpLoopCount=] + [--calibDataPath=] [--calibDataType=] + [--cpuBindMode=] [--device=] [--help] + [--inDataPath=] [--loopCount=] + [--numThreads=] [--warmUpLoopCount=] [--fp16Priority=] ``` @@ -91,7 +84,7 @@ The following describes the parameters in detail. | `--modelPath=` | Mandatory | Specifies the file path of the MindSpore Lite model for benchmark testing. | String | Null | - | | `--accuracyThreshold=` | Optional | Specifies the accuracy threshold. | Float | 0.5 | - | | `--calibDataPath=` | Optional | Specifies the file path of the benchmark data. The benchmark data, as the comparison output of the tested model, is output from the forward inference of the tested model under other deep learning frameworks using the same input. | String | Null | - | -| `--calibDataType=` | Optional | Specifies the calibration data type. | String | FLOAT | FLOAT or INT8 | +| `--calibDataType=` | Optional | Specifies the calibration data type. | String | FLOAT | UINT8, FLOAT or INT8 | | `--cpuBindMode=` | Optional | Specifies the type of the CPU core bound to the model inference program. | Integer | 1 | −1: medium core
1: large core
0: not bound | | `--device=` | Optional | Specifies the type of the device on which the model inference program runs. | String | CPU | CPU or GPU | | `--help` | Optional | Displays the help information about the `benchmark` command. | - | - | - | diff --git a/lite/tutorials/source_en/use/converter_tool.md b/lite/tutorials/source_en/use/converter_tool.md index 38cd115fb12a93031009cf9f2d12e1ab77045a46..21f89632b3dbd2cb18313f3b869a4119b89fa2d0 100644 --- a/lite/tutorials/source_en/use/converter_tool.md +++ b/lite/tutorials/source_en/use/converter_tool.md @@ -53,7 +53,7 @@ The following describes how to use the conversion command by using several commo The output is as follows: ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` This indicates that the Caffe model is successfully converted into the MindSpore Lite model and the new file `lenet.ms` is generated. @@ -61,7 +61,7 @@ The following describes how to use the conversion command by using several commo - MindSpore model `model.mindir` ```bash - ./converter_lite --fmk=MS --modelFile=model.mindir --outputFile=model + ./converter_lite --fmk=MINDIR --modelFile=model.mindir --outputFile=model ``` - TensorFlow Lite model `model.tflite` @@ -79,16 +79,18 @@ The following describes how to use the conversion command by using several commo ./converter_lite --fmk=TFLITE --modelFile=model.tflite --outputFile=model --quantType=AwareTraining ``` - - TensorFlow Lite aware quantization model `model_quant.tflite` set the input and output data type to be int8 + - TensorFlow Lite aware quantization model `model_quant.tflite` set the input and output data type to be float ```bash - ./converter_lite --fmk=TFLITE --modelFile=model.tflite --outputFile=model --quantType=AwareTraining --inputInferenceType=INT8 --inferenceType=INT8 + ./converter_lite --fmk=TFLITE --modelFile=model.tflite --outputFile=model --quantType=AwareTraining --inferenceType=FLOAT ``` In the preceding scenarios, the following information is displayed, indicating that the conversion is successful. In addition, the target file `model.ms` is obtained. ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` +- If fail to run the conversion command, an [errorcode](https://www.mindspore.cn/lite/docs/en/master/apicc/errorcode_and_metatype.html) will be output. + ### Parameter Description MindSpore Lite model conversion tool provides multiple parameters. @@ -100,13 +102,12 @@ The following describes the parameters in detail. | Parameter | Mandatory or Not | Parameter Description | Value Range | Default Value | | -------- | ------- | ----- | --- | ---- | | `--help` | No | Prints all help information. | - | - | -| `--fmk=` | Yes | Original format of the input model. | MS, CAFFE, TFLITE, or ONNX | - | +| `--fmk=` | Yes | Original format of the input model. | MINDIR, CAFFE, TFLITE, or ONNX | - | | `--modelFile=` | Yes | Path of the input model. | - | - | | `--outputFile=` | Yes | Path of the output model. (If the path does not exist, a directory will be automatically created.) The suffix `.ms` can be automatically generated. | - | - | | `--weightFile=` | Yes (for Caffe models only) | Path of the weight file of the input model. | - | - | | `--quantType=` | No | Sets the quant type of the model. | PostTraining: quantization after training
AwareTraining: perceptual quantization | - | -|`--inputInferenceType=` | No(supported by aware quant models only) | Sets the input data type of the converted model. If the type is different from the origin model, the convert tool will insert data type convert op before the model to make sure the input data type is same as the input of origin model. | FLOAT or INT8 | FLOAT | -|`--inferenceType= `| No(supported by aware quant models only) | Sets the output data type of the converted model. If the type is different from the origin model, the convert tool will insert data type convert op before the model to make sure the output data type is same as the input of origin model. | FLOAT or INT8 | FLOAT | +|`--inferenceType= `| No(supported by aware quant models only) | Sets the input and output data type of the converted model. If the types are different from the origin model, the convert tool will insert data type convert op in the inputs and outputs of the model to make sure the data types are same as origin model. | UINT8, FLOAT or INT8 | FLOAT | |`--stdDev=`| No(supported by aware quant models only) | Sets the standard deviation of the input data. | (0,+∞) | 128 | |`--mean=`| No(supported by aware quant models only) | Sets the mean value of the input data. | [-128, 127] | -0.5 | @@ -119,9 +120,7 @@ The following describes the parameters in detail. To use the MindSpore Lite model conversion tool, the following environment preparations are required. -- Compile: The model conversion tool code is in the `mindspore/lite/tools/converter` directory of the MindSpore source code, refer to the [Environment Requirements](https://www.mindspore.cn/lite/tutorial/en/master/build.html#environment-requirements-1) and [Compilation Example](https://www.mindspore.cn/lite/tutorial/en/master/build.html#compilation-example-1) in the build document. - -- Run: Refer to [Output Description](https://www.mindspore.cn/lite/tutorial/en/master/build.html#output-description-1) in the deployment document to obtain the `converter` tool, and set the environment variable of MinGW(Add the bin directory of MinGW in the system variable Path). +- Get the toolkit: To obtain the 'Converter' tool, download the zip package of windows conversion tool and unzip it to the local directory. ### Parameter Description @@ -129,12 +128,7 @@ Reference description Linux environment model conversion tool [parameter descrip ### Example -First, use the cmd tool to enter the command to compile in the root directory of the source code, refer to `build.md`. -```bash -call build.bat lite -``` - -Then, set the log printing level to INFO. +Set the log printing level to INFO. ```bash set MSLOG=INFO ``` @@ -151,7 +145,7 @@ Several common examples are selected below to illustrate the use of conversion c The result is shown as: ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` This means that the Caffe model has been successfully converted to the MindSpore Lite model and the new file `lenet.ms` has been obtained. @@ -159,7 +153,7 @@ Several common examples are selected below to illustrate the use of conversion c - MindSpore model `model.mindir` ```bash - call converter_lite --fmk=MS --modelFile=model.mindir --outputFile=model + call converter_lite --fmk=MINDIR --modelFile=model.mindir --outputFile=model ``` - TensorFlow Lite model`model.tflite` @@ -179,5 +173,6 @@ Several common examples are selected below to illustrate the use of conversion c In the above cases, the following conversion success prompt is displayed, and the `model.ms` target file is obtained at the same time. ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` +- If fail to run the conversion command, an [errorcode](https://www.mindspore.cn/lite/docs/en/master/apicc/errorcode_and_metatype.html) will be output. diff --git a/lite/tutorials/source_en/use/image_processing.md b/lite/tutorials/source_en/use/image_processing.md new file mode 100644 index 0000000000000000000000000000000000000000..cce6863a5b7258473fd3c4e9dc84f2c8bd4ccb8c --- /dev/null +++ b/lite/tutorials/source_en/use/image_processing.md @@ -0,0 +1,149 @@ +# Preprocess image data + + + +- [Preprocess image data](#preprocess-image-data) + - [Overview](#Overview) + - [Import image preprocessing function library](#import-image-preprocessing-function-library) + - [Initialize the image](#initialize-the-image) + - [Usage example](#usage-example) + - [Optional image preprocessing operator](#optional-image-preprocessing-operator) + - [Resize image](#resize-image) + - [Usage example](#usage-example-1) + - [Convert the image data type](#convert-the-image-data-type) + - [Usage example](#usage-example-2) + - [Crop image data](#crop-image-data) + - [Usage example](#usage-example-3) + - [Normalize image data](#normalize-image-data) + - [Usage example](#usage-example-4) + + + +## Overview + +The main purpose of image preprocessing is to eliminate irrelevant information in the image, restore useful real information, enhance the detectability of related information and simplify data to the greatest extent, thereby improving the reliability of feature extraction, image segmentation, matching and recognition. Here, by creating a LiteMat object, the image data is processed before inference to meet the data format requirements for model inference. + +The process is as follows: + +## Import image preprocessing function library + +``` +#include "lite_cv/lite_mat.h" +#include "lite_cv/image_process.h" +``` + +## Initialize the image + +Here, the [InitFromPixel](https://www.mindspore.cn/lite/docs/en/master/apicc/dataset.html#initfrompixel) function in the `image_process.h` file is used to initialize the image. + +``` +bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType data_type, int w, int h, LiteMat &m); +``` + +### Usage example + +``` +// Create the data object of the LiteMat object. +LiteMat lite_mat_bgr; + +// Initialize the lite_mat_bgr object. +// The image data pointer passed in by the user (The data in the Bitmap corresponding to the Android platform). +InitFromPixel(pixel_ptr, LPixelType::RGBA2GRAY, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); +``` + +## Optional image preprocessing operator + +The image processing operators here can be used in any combination according to the actual situation. + +### Resize image + +Here we use the [ResizeBilinear](https://www.mindspore.cn/lite/docs/en/master/apicc/dataset.html#resizebilinear) function in `image_process.h` to resize the image through a bilinear algorithm. Currently, the supported data type is unit8, the supported channels are 3 and 1. + +``` +bool ResizeBilinear(const LiteMat &src, LiteMat &dst, int dst_w, int dst_h); +``` + +#### Usage example + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// Create a resize image data object. +LiteMat lite_mat_resize; + +// Resize the image. +ResizeBilinear(lite_mat_bgr, lite_mat_resize, 256, 256); +``` + +### Convert the image data type + +Here we use the [ConvertTo](https://www.mindspore.cn/lite/docs/en/master/apicc/dataset.html#convertto) function in `image_process.h` to convert the image data type. Currently, the supported conversion is to convert uint8 to float. + +``` +bool ConvertTo(const LiteMat &src, LiteMat &dst, double scale = 1.0); +``` + +#### Usage example + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// Create the converted data type object. +LiteMat lite_mat_convert_float; + +// Perform conversion type operations on the object. Currently, the supported conversion is to convert uint8 to float. +ConvertTo(lite_mat_bgr, lite_mat_convert_float); +``` + +### Crop image data + +Here we use the [Crop](https://www.mindspore.cn/lite/docs/en/master/apicc/dataset.html#crop) function in `image_process.h` to crop the image. Currently, channels 3 and 1 are supported. + +``` +bool Crop(const LiteMat &src, LiteMat &dst, int x, int y, int w, int h); +``` + +#### Usage example + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// Create the cropped object. +LiteMat lite_mat_cut; + +// The image is cropped by the values of x, y, w, h. +Crop(lite_mat_bgr, lite_mat_cut, 16, 16, 224, 224); +``` + +### Normalize image data + +In order to eliminate the dimensional influence among the data indicators, and solve the comparability problem among the data indicators through standardization processing, here is the use of the [SubStractMeanNormalize](https://www.mindspore.cn/lite/docs/en/master/apicc/dataset.html#substractmeannormalize) function in `image_process.h` to normalize the image data. + +``` +bool SubStractMeanNormalize(const LiteMat &src, LiteMat &dst, float *mean, float *norm); +``` + +#### Usage example + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// The mean value of the image data. +// The variance of the image data. +float means[1] = {0.485}; +float norm[1] = {1.0 / 0.229}; + +// Create a normalized image object. +LiteMat lite_mat_bgr_norm; + +// The image data is normalized by the mean value and variance of the image data. +SubStractMeanNormalize(lite_mat_bgr, lite_mat_bgr_norm, means, norm); +``` \ No newline at end of file diff --git a/lite/tutorials/source_en/use/runtime.md b/lite/tutorials/source_en/use/runtime.md index 748ef39812baddc070e870445719177ee72218b9..a50dbccf1efa6b98490dce9d31b8c7a3d8b31893 100644 --- a/lite/tutorials/source_en/use/runtime.md +++ b/lite/tutorials/source_en/use/runtime.md @@ -1,4 +1,4 @@ -# Use Runtime for Model Inference +# Use Runtime for Model Inference @@ -28,6 +28,10 @@ - [Example](#example-5) - [Obtaining Version String](#obtaining-version-string) - [Example](#example-6) + - [Session parallel launch](#session-parallel-launch) + - [Single Session parallel launch](#single-session-parallel-launch) + - [Multiple Session parallel launch](#multiple-session-parallel-launch) + - [Example](#example-7) @@ -50,7 +54,7 @@ Its components and their functions are described as follows: - `Operator`: operator prototype, including operator attributes and methods for inferring the shape, data type, and format. - `Kernel`: operator, which provides specific operator implementation and the operator forwarding function. - `Tensor`: tensor used by MindSpore Lite, which provides functions and APIs for tensor memory operations. - + ## Reading Models In MindSpore Lite, a model file is an `.ms` file converted using the model conversion tool. During model inference, the model needs to be loaded from the file system and parsed. Related operations are mainly implemented in the Model component. The Model component holds model data such as weight data and operator attributes. @@ -77,66 +81,16 @@ Contexts save some basic configuration parameters required by sessions to guide MindSpore Lite supports heterogeneous inference. The preferred backend for inference is specified by `device_ctx_` in `Context` and is CPU by default. During graph compilation, operator selection and scheduling are performed based on the preferred backend. -```cpp -/// \brief DeviceType defined for holding user's preferred backend. -typedef enum { - DT_CPU, /**< CPU device type */ - DT_GPU, /**< GPU device type */ - DT_NPU /**< NPU device type, not supported yet */ -} DeviceType; - -/// \brief DeviceContext defined for holding DeviceType. -typedef struct { - DeviceType type; /**< device type */ -} DeviceContext; - -DeviceContext device_ctx_{DT_CPU}; -``` - MindSpore Lite has a built-in thread pool shared by processes. During inference, `thread_num_` is used to specify the maximum number of threads in the thread pool. The default maximum number is 2. It is recommended that the maximum number be no more than 4. Otherwise, the performance may be affected. -```c++ -int thread_num_ = 2; /**< thread number config for thread pool */ -``` - MindSpore Lite supports dynamic memory allocation and release. If `allocator` is not specified, a default `allocator` is generated during inference. You can also use the `Context` method to allow multiple `Context` to share the memory allocator. If users create the `Context` by using `new`, it should be released by using `delete` once it's not required. Usually the `Context` is released after finishing the session creation. -```cpp -/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. -/// -/// \note List public class and interface for reference. -class Allocator; - -/// \brief Context defined for holding environment variables during runtime. -class MS_API Context { - public: - /// \brief Constructor of MindSpore Lite Context using input value for parameters. - /// - /// \param[in] thread_num Define the work thread number during the runtime. - /// \param[in] allocator Define the allocator for malloc. - /// \param[in] device_ctx Define device information during the runtime. - Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx); - - public: - std::shared_ptr allocator = nullptr; -} -``` - ### Creating Sessions Use the `Context` created in the previous step to call the static `CreateSession` method of LiteSession to create `LiteSession`. The `LiteSession` instance returned by the function is a pointer, which is created by using `new`. If the pointer is not required, you need to release it by using `delete`. -```cpp -/// \brief Static method to create a LiteSession pointer. -/// -/// \param[in] context Define the context of session to be created. -/// -/// \return Pointer of MindSpore Lite LiteSession. -static LiteSession *CreateSession(lite::Context *context); -``` - ### Example The following sample code demonstrates how to create a `Context` and how to allow two `LiteSession` to share a memory pool. @@ -148,13 +102,16 @@ if (context == nullptr) { return RET_ERROR; } // The preferred backend is GPU, which means, if there is a GPU operator, it will run on the GPU first, otherwise it will run on the CPU. -context->device_ctx_.type = lite::DT_GPU; +context->device_type_ = lite::DT_GPU; // The medium core takes priority in thread and core binding methods. This parameter will work in the BindThread interface. For specific binding effect, see the "Run Graph" section. context->cpu_bind_mode_ = MID_CPU; -// Configure the number of worker threads in the thread pool to 2, including the main thread. +// Configure the number of worker threads in the thread pool to 2, including the main thread. context->thread_num_ = 2; // Allocators can be shared across multiple Contexts. -auto *context2 = new Context(context->thread_num_, context->allocator, context->device_ctx_); +auto *context2 = new Context(); +context2->thread_num_ = context->thread_num_; +context2->allocator = context->allocator; +context2->device_type_ = context->device_type_; context2->cpu_bind_mode_ = context->cpu_bind_mode_; // Use Context to create Session. auto session1 = session::LiteSession::CreateSession(context); @@ -167,7 +124,7 @@ if (session1 == nullptr) { // session1 and session2 can share one memory pool. auto session2 = session::LiteSession::CreateSession(context2); delete (context2); -if (session == nullptr) { +if (session2 == nullptr) { MS_LOG(ERROR) << "CreateSession failed while running %s", modelName.c_str(); return RET_ERROR; } @@ -179,6 +136,8 @@ if (session == nullptr) { When using MindSpore Lite for inference, after the session creation and graph compilation have been completed, if you need to resize the input shape, you can reset the shape of the input tensor, and then call the session's Resize() interface. +> Not all models support variable dimensions. For example, when there is a MatMul operator in the model whose input Tensor is a weight tensor and an input tensor, calling the variable dimension interface will cause the shape of the input tensor and the weight tensor being unmatched. + ```cpp /// \brief Get input MindSpore Lite MSTensors of model. /// @@ -187,10 +146,11 @@ virtual std::vector GetInputs() const = 0; /// \brief Resize inputs shape. /// -/// \param[in] inputs Define the new inputs shape. +/// \param[in] inputs Define Model inputs. +/// \param[in] dims Define all inputs new shape. /// /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. -virtual int Resize(const std::vector &inputs) = 0; +virtual int Resize(const std::vector &inputs, const std::vector> &dims) = 0; ``` ### Example @@ -201,9 +161,10 @@ The following code demonstrates how to resize the input of MindSpore Lite: // Assume we have created a LiteSession instance named session. auto inputs = session->GetInputs(); std::vector resize_shape = {1, 128, 128, 3}; +std::vector> new_shapes; +new_shapes.push_back(resize_shape); // Assume the model has only one input,resize input shape to [1, 128, 128, 3] -inputs[0]->set_shape(resize_shape); -session->Resize(inputs); +session->Resize(inputs, new_shapes); ``` ### Compiling Graphs @@ -324,14 +285,6 @@ Note: After a MindSpore Lite session performs graph compilation, you can use `RunGraph` of `LiteSession` for model inference. ```cpp -/// \brief Run session with callback. -/// -/// \param[in] before Define a call_back_function to be called before running each node. -/// \param[in] after Define a call_back_function to be called after running each node. -/// -/// \note RunGraph should be called after CompileGraph. -/// -/// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; ``` @@ -506,16 +459,16 @@ virtual void *MutableData() const = 0; ### Example -The following sample code shows how to obtain the output `MSTensor` from `LiteSession` using the `GetOutputMapByNode` method and print the first ten data or all data records of each output `MSTensor`. +The following sample code shows how to obtain the output `MSTensor` from `LiteSession` using the `GetOutputs` method and print the first ten data or all data records of each output `MSTensor`. ```cpp // Assume we have created a LiteSession instance named session before. -auto output_map = session->GetOutputMapByNode(); +auto output_map = session->GetOutputs(); // Assume that the model has only one output node. auto out_node_iter = output_map.begin(); std::string name = out_node_iter->first; // Assume that the unique output node has only one output tensor. -auto out_tensor = out_node_iter->second.front(); +auto out_tensor = out_node_iter->second; if (out_tensor == nullptr) { std::cerr << "Output tensor is nullptr" << std::endl; return -1; @@ -530,7 +483,7 @@ if (out_data == nullptr) { std::cerr << "Data of out_tensor is nullptr" << std::endl; return -1; } -// Print the first 10 float data or all output data of the output tensor. +// Print the first 10 float data or all output data of the output tensor. std::cout << "Output data: "; for (size_t i = 0; i < 10 && i < out_tensor->ElementsNum(); i++) { std::cout << " " << out_data[i]; @@ -539,7 +492,7 @@ std::cout << std::endl; // The elements in outputs do not need to be free by users, because outputs are managed by the MindSpore Lite. ``` -Note that the vectors or map returned by the `GetOutputsByNodeName`, `GetOutputMapByNode`, `GetOutputByTensorName` and `GetOutputMapByTensor` methods do not need to be released by users. +Note that the vectors or map returned by the `GetOutputsByNodeName`, `GetOutputByTensorName` and `GetOutputs` methods do not need to be released by users. The following sample code shows how to obtain the output `MSTensor` from `LiteSession` using the `GetOutputsByNodeName` method. @@ -555,28 +508,16 @@ if (out_tensor == nullptr) { } ``` -The following sample code shows how to obtain the output `MSTensor` from `LiteSession` using the `GetOutputMapByTensor` method. - -```cpp -// Assume we have created a LiteSession instance named session before. -auto output_map = session->GetOutputMapByTensor(); -// Assume that output node named output_node_name_0 has only one output tensor. -auto out_tensor = output_vec.front(); -if (out_tensor == nullptr) { - std::cerr << "Output tensor is nullptr" << std::endl; - return -1; -} -``` - The following sample code shows how to obtain the output `MSTensor` from `LiteSession` using the `GetOutputByTensorName` method. ```cpp +// Assume we have created a LiteSession instance named session. // We can use GetOutputTensorNames method to get all name of output tensor of model which is in order. -auto tensor_names = this->GetOutputTensorNames(); +auto tensor_names = session->GetOutputTensorNames(); // Assume we have created a LiteSession instance named session before. // Use output tensor name returned by GetOutputTensorNames as key for (auto tensor_name : tensor_names) { - auto out_tensor = this->GetOutputByTensorName(tensor_name); + auto out_tensor = session->GetOutputByTensorName(tensor_name); if (out_tensor == nullptr) { std::cerr << "Output tensor is nullptr" << std::endl; return -1; @@ -592,5 +533,114 @@ The following sample code shows how to obtain version string using `Version` met ```cpp #include "include/version.h" -std::string version = mindspore::lite::Version(); +std::string version = mindspore::lite::Version(); +``` + +## Session parallel launch +MindSpore Lite supports multiple `LiteSession` parallel inferences, but does not support multiple threads calling the `RunGraph` interface of a single `LiteSession` at the same time. + +### Single Session parallel launch + +MindSpore Lite does not support multi-threaded parallel calling of the inference interface of a single `LiteSession`, otherwise we will get the following error message: +```cpp +ERROR [mindspore/lite/src/lite_session.cc:297] RunGraph] 10 Not support multi-threading +``` + +### Multiple Session parallel launch + +MindSpore Lite supports multiple `LiteSession` in doing inference in parallel. The thread pool and memory pool of each `LiteSession` are independent. + +### Example + +The following code shows how to create multiple `LiteSession` and do inference in parallel: +```cpp +#include +#include "src/common/file_utils.h" +#include "include/model.h" +#include "include/version.h" +#include "include/context.h" +#include "include/lite_session.h" + +mindspore::session::LiteSession *GenerateSession(mindspore::lite::Model *model) { + if (model == nullptr) { + std::cerr << "Read model file failed while running" << std::endl; + return nullptr; + } + auto context = new (std::nothrow) mindspore::lite::Context; + if (context == nullptr) { + std::cerr << "New context failed while running" << std::endl; + return nullptr; + } + + auto session = mindspore::session::LiteSession::CreateSession(context); + delete (context); + if (session == nullptr) { + std::cerr << "CreateSession failed while running" << std::endl; + return nullptr; + } + auto ret = session->CompileGraph(model); + if (ret != mindspore::lite::RET_OK) { + std::cout << "CompileGraph failed while running" << std::endl; + delete (session); + return nullptr; + } + auto msInputs = session->GetInputs(); + for (auto msInput : msInputs) { + (void)msInput->MutableData(); + } + return session; +} + +int main(int argc, const char **argv) { + size_t size = 0; + char *graphBuf = mindspore::lite::ReadFile("test.ms", &size); + if (graphBuf == nullptr) { + std::cerr << "Read model file failed while running" << std::endl; + return -1; + } + auto model = mindspore::lite::Model::Import(graphBuf, size); + if (model == nullptr) { + std::cerr << "Import model file failed while running" << std::endl; + delete[](graphBuf); + return -1; + } + delete[](graphBuf); + auto session1 = GenerateSession(model); + if (session1 == nullptr) { + std::cerr << "Generate session 1 failed" << std::endl; + delete(model); + return -1; + } + auto session2 = GenerateSession(model); + if (session2 == nullptr) { + std::cerr << "Generate session 2 failed" << std::endl; + delete(model); + return -1; + } + + std::thread thread1([&](){ + auto status = session1->RunGraph(); + if (status != 0) { + std::cerr << "Inference error " << status << std::endl; + return; + } + std::cout << "Session1 inference success" << std::endl; + }); + + std::thread thread2([&](){ + auto status = session2->RunGraph(); + if (status != 0) { + std::cerr << "Inference error " << status << std::endl; + return; + } + std::cout << "Session2 inference success" << std::endl; + }); + + thread1.join(); + thread2.join(); + delete (session1); + delete (session2); + delete (model); + return 0; +} ``` diff --git a/lite/tutorials/source_en/use/timeprofiler_tool.md b/lite/tutorials/source_en/use/timeprofiler_tool.md index b0e3d35860448974da085d8230d58654bf46868e..1442ecc46d9b1606ee501e4b3b19ae7139eed88d 100644 --- a/lite/tutorials/source_en/use/timeprofiler_tool.md +++ b/lite/tutorials/source_en/use/timeprofiler_tool.md @@ -20,16 +20,16 @@ After model conversion and before inference, you can use the TimeProfiler tool t To use the TimeProfiler tool, you need to prepare the environment as follows: -- Compilation: Install build dependencies and perform build. The code of the TimeProfiler tool is stored in the `mindspore/lite/tools/time_profile` directory of the MindSpore source code. For details about the build operations, see the [Environment Requirements](https://www.mindspore.cn/lite/tutorial/en/master/build.html#environment-requirements) and [Compilation Example](https://www.mindspore.cn/lite/tutorial/en/master/build.html#compilation-example) in the build document. +- Compilation: Install build dependencies and perform build. The code of the TimeProfiler tool is stored in the `mindspore/lite/tools/time_profiler` directory of the MindSpore source code. For details about the build operations, see the [Environment Requirements](https://www.mindspore.cn/lite/tutorial/en/master/build.html#environment-requirements) and [Compilation Example](https://www.mindspore.cn/lite/tutorial/en/master/build.html#compilation-example) in the build document. -- Run: Obtain the `timeprofile` tool and configure environment variables by referring to [Output Description](https://www.mindspore.cn/lite/tutorial/en/master/build.html#output-description) in the build document. +- Run: Obtain the `timeprofiler` tool and configure environment variables by referring to [Output Description](https://www.mindspore.cn/lite/tutorial/en/master/build.html#output-description) in the build document. ## Parameter Description The command used for analyzing the time consumption of forward inference at the network layer based on the compiled TimeProfiler tool is as follows: ```bash -./timeprofile --modelPath= [--help] [--loopCount=] [--numThreads=] [--cpuBindMode=] [--inDataPath=] [--fp16Priority=] +./timeprofiler --modelPath= [--help] [--loopCount=] [--numThreads=] [--cpuBindMode=] [--inDataPath=] [--fp16Priority=] ``` The following describes the parameters in detail. @@ -49,7 +49,7 @@ The following describes the parameters in detail. Take the `test_timeprofiler.ms` model as an example and set the number of model inference cycles to 10. The command for using TimeProfiler to analyze the time consumption at the network layer is as follows: ```bash -./timeprofile --modelPath=./models/test_timeprofiler.ms --loopCount=10 +./timeprofiler --modelPath=./models/test_timeprofiler.ms --loopCount=10 ``` After this command is executed, the TimeProfiler tool outputs the statistics on the running time of the model at the network layer. In this example, the command output is as follows: The statistics are displayed by`opName` and `optype`. `opName` indicates the operator name, `optype` indicates the operator type, and `avg` indicates the average running time of the operator per single run, `percent` indicates the ratio of the operator running time to the total operator running time, `calledTimess` indicates the number of times that the operator is run, and `opTotalTime` indicates the total time that the operator is run for a specified number of times. Finally, `total time` and `kernel cost` show the average time consumed by a single inference operation of the model and the sum of the average time consumed by all operators in the model inference, respectively. diff --git a/lite/tutorials/source_zh_cn/_static/logo_source.png b/lite/tutorials/source_zh_cn/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/lite/tutorials/source_zh_cn/_static/logo_source.png and b/lite/tutorials/source_zh_cn/_static/logo_source.png differ diff --git a/lite/tutorials/source_zh_cn/build.md b/lite/tutorials/source_zh_cn/build.md index a3e60383d37df133bbfc65f5b614311e45119032..71a6d6c612ed53473bafc8c3e26f9b50f3daf471 100644 --- a/lite/tutorials/source_zh_cn/build.md +++ b/lite/tutorials/source_zh_cn/build.md @@ -10,11 +10,7 @@ - [编译输出](#编译输出) - [模型转换工具converter目录结构说明](#模型转换工具converter目录结构说明) - [模型推理框架runtime及其他工具目录结构说明](#模型推理框架runtime及其他工具目录结构说明) - - [Windows环境编译](#windows环境编译) - - [环境要求](#环境要求-1) - - [编译选项](#编译选项-1) - - [编译示例](#编译示例-1) - - [编译输出](#编译输出-1) + - [图像处理库目录结构说明](#图像处理库目录结构说明) @@ -24,10 +20,11 @@ | 模块 | 支持平台 | 说明 | | --- | ---- | ---- | -| converter | Linux、Windows | 模型转换工具 | +| converter | Linux | 模型转换工具 | | runtime | Linux、Android | 模型推理框架 | | benchmark | Linux、Android | 基准测试工具 | -| time_profiler | Linux、Android | 性能分析工具 | +| timeprofiler | Linux、Android | 性能分析工具 | +| imageprocess | Linux、Android | 图像处理库 | ## Linux环境编译 @@ -35,7 +32,7 @@ - 系统环境:Linux x86_64,推荐使用Ubuntu 18.04.02LTS -- runtime、benchmark、time_profiler编译依赖 +- runtime、benchmark、timeprofiler编译依赖 - [CMake](https://cmake.org/download/) >= 3.14.1 - [GCC](https://gcc.gnu.org/releases.html) >= 7.3.0 - [Android_NDK](https://dl.google.com/android/repository/android-ndk-r20b-linux-x86_64.zip) >= r20 @@ -53,6 +50,7 @@ - [Libevent](https://libevent.org) >= 2.0 - [M4](https://www.gnu.org/software/m4/m4.html) >= 1.4.18 - [OpenSSL](https://www.openssl.org/) >= 1.1.1 + - [Python](https://www.python.org/) >= 3.7.5 > - 当安装完依赖项Android_NDK后,需配置环境变量:`export ANDROID_NDK={$NDK_PATH}/android-ndk-r20b`。 > - 编译脚本中会执行`git clone`获取第三方依赖库的代码,请提前确保git的网络设置正确可用。 @@ -69,6 +67,7 @@ MindSpore Lite提供编译脚本`build.sh`用于一键式编译,位于MindSpor | -j[n] | 设定编译时所用的线程数,否则默认设定为8线程 | Integer | 否 | | -e | 选择除CPU之外的其他内置算子类型,仅在ARM架构下适用,当前仅支持GPU | GPU | 否 | | -h | 显示编译帮助信息 | 无 | 否 | +| -n | 指定编译轻量级图片处理模块 | lite_cv | 否 | > 在`-I`参数变动时,如`-I x86_64`变为`-I arm64`,添加`-i`参数进行增量编译不生效。 @@ -102,11 +101,17 @@ git clone https://gitee.com/mindspore/mindspore.git bash build.sh -I arm64 -e gpu ``` +- 编译ARM64带图像预处理模块。 + ```bash + bash build.sh -I arm64 -n lite_cv + ``` + ### 编译输出 -编译完成后,进入`mindspore/output/`目录,可查看编译后生成的文件。文件分为两部分: +编译完成后,进入`mindspore/output/`目录,可查看编译后生成的文件。文件分为三部分: - `mindspore-lite-{version}-converter-{os}.tar.gz`:包含模型转换工具converter。 -- `mindspore-lite-{version}-runtime-{os}-{device}.tar.gz`:包含模型推理框架runtime、基准测试工具benchmark和性能分析工具time_profiler。 +- `mindspore-lite-{version}-runtime-{os}-{device}.tar.gz`:包含模型推理框架runtime、基准测试工具benchmark和性能分析工具timeprofiler。 +- `mindspore-lite-{version}-minddata-{os}-{device}.tar.gz`:包含图像处理库imageprocess。 > version:输出件版本号,与所编译的分支代码对应的版本一致。 > @@ -119,6 +124,7 @@ git clone https://gitee.com/mindspore/mindspore.git ```bash tar -xvf mindspore-lite-{version}-converter-{os}.tar.gz tar -xvf mindspore-lite-{version}-runtime-{os}-{device}.tar.gz +tar -xvf mindspore-lite-{version}-minddata-{os}-{device}.tar.gz ``` #### 模型转换工具converter目录结构说明 @@ -148,7 +154,7 @@ tar -xvf mindspore-lite-{version}-runtime-{os}-{device}.tar.gz │ └── third_party # 第三方库头文件和库 │ ├── flatbuffers # FlatBuffers头文件 │ └── include # 推理框架头文件 - │ └── time_profile # 模型网络层耗时分析工具 + │ └── time_profiler # 模型网络层耗时分析工具 ``` @@ -159,75 +165,45 @@ tar -xvf mindspore-lite-{version}-runtime-{os}-{device}.tar.gz │ └── benchmark # 基准测试工具 │ └── lib # 推理框架动态库 │ ├── libmindspore-lite.so # MindSpore Lite推理框架的动态库 - │ ├── liboptimize.so # MindSpore Lite算子性能优化库 + │ ├── libmindspore-lite-fp16.so # MindSpore Lite Float16算子性能优化库 + │ ├── libmindspore-lite-optimize.so # MindSpore Lite量化算子性能优化库 │ └── third_party # 第三方库头文件和库 │ ├── flatbuffers # FlatBuffers头文件 │ └── include # 推理框架头文件 - │ └── time_profile # 模型网络层耗时分析工具 + │ └── time_profiler # 模型网络层耗时分析工具 ``` - 当编译选项为`-I arm32`时: ``` | - ├── mindspore-lite-{version}-runtime-arm64-cpu + ├── mindspore-lite-{version}-runtime-arm32-cpu │ └── benchmark # 基准测试工具 │ └── lib # 推理框架动态库 │ ├── libmindspore-lite.so # MindSpore Lite推理框架的动态库 │ └── third_party # 第三方库头文件和库 │ ├── flatbuffers # FlatBuffers头文件 │ └── include # 推理框架头文件 - │ └── time_profile # 模型网络层耗时分析工具 + │ └── time_profiler # 模型网络层耗时分析工具 ``` -> 1. `liboptimize.so`仅在runtime-arm64的输出包中存在,仅在ARMv8.2和支持fp16特性的CPU上使用。 -> 2. 编译ARM64默认可获得arm64-cpu的推理框架输出件,若添加`-e gpu`则获得arm64-gpu的推理框架输出件,此时包名为`mindspore-lite-{version}-runtime-arm64-gpu.tar.gz`,编译ARM32同理。 -> 3. 运行converter、benchmark或time_profile目录下的工具前,都需配置环境变量,将MindSpore Lite和Protobuf的动态库所在的路径配置到系统搜索动态库的路径中。以0.7.0-beta版本下编译为例:配置converter:`export LD_LIBRARY_PATH=./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/protobuf/lib:./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/flatbuffers/lib:${LD_LIBRARY_PATH}`;配置benchmark和timeprofiler:`export LD_LIBRARY_PATH=./output/mindspore-lite-0.7.0-runtime-x86-cpu/lib:${LD_LIBRARY_PATH}`。 - -## Windows环境编译 - -### 环境要求 - -- 支持的编译环境为:Windows 10,64位。 - -- 编译依赖 - - [CMake](https://cmake.org/download/) >= 3.14.1 - - [MinGW GCC](https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/7.3.0/threads-posix/seh/x86_64-7.3.0-release-posix-seh-rt_v5-rev0.7z/download) = 7.3.0 - - [Python](https://www.python.org/) >= 3.7.5 - -> 编译脚本中会执行`git clone`获取第三方依赖库的代码,请提前确保git的网络设置正确可用。 - -### 编译选项 - -MindSpore Lite的编译选项如下。 - -| 参数 | 参数说明 | 是否必选 | -| -------- | ----- | ---- | -| **lite** | **设置该参数,则对Mindspore Lite工程进行编译** | **是** | -| [n] | 设定编译时所用的线程数,否则默认设定为6线程 | 否 | +> 1. `libmindspore-lite-optimize.so`仅在runtime-arm64的输出包中存在,仅在ARMv8.2及以上版本且支持dotprod指令的CPU上使用的性能优化库。 +> 2. `libmindspore-lite-fp16.so`仅在runtime-arm64的输出包中存在,仅在ARMv8.2及以上版本且支持fp16的CPU上使用的性能优化库。 +> 3. 编译ARM64默认可获得arm64-cpu的推理框架输出件,若添加`-e gpu`则获得arm64-gpu的推理框架输出件,此时包名为`mindspore-lite-{version}-runtime-arm64-gpu.tar.gz`,编译ARM32同理。 +> 4. 运行converter、benchmark或time_profiler目录下的工具前,都需配置环境变量,将MindSpore Lite和Protobuf的动态库所在的路径配置到系统搜索动态库的路径中。以0.7.0-beta版本下编译为例:配置converter:`export LD_LIBRARY_PATH=./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/protobuf/lib:./output/mindspore-lite-0.7.0-converter-ubuntu/third_party/flatbuffers/lib:${LD_LIBRARY_PATH}`;配置benchmark和timeprofiler:`export LD_LIBRARY_PATH=./output/mindspore-lite-0.7.0-runtime-x86-cpu/lib:${LD_LIBRARY_PATH}`。 -### 编译示例 +#### 图像处理库目录结构说明 -首先,使用git工具从MindSpore代码仓下载源码。 +图像处理库在`-I arm64 -n lite_cv`编译选项下获得,内容包括以下几部分: -```bash -git clone https://gitee.com/mindspore/mindspore.git ``` - -然后,使用cmd工具在源码根目录下,执行如下命令即可编译MindSpore Lite。 - -- 以默认线程数(6线程)编译Windows版本。 - ```bash - call build.bat lite - ``` -- 以指定线程数8编译Windows版本。 - ```bash - call build.bat lite 8 - ``` - -### 编译输出 - -编译完成之后,进入`mindspore/output/`目录,解压后即可获取输出件`mindspore-lite-{version}-converter-win-cpu.zip`,其中含有转换工具可执行文件。 - -> version:输出件版本号,与所编译的分支代码对应的版本一致。 +| +├── mindspore-lite-{version}-minddata-{os}-{device} +│ └── include # 头文件 +│ ├── lite_cv # 图像处理库头文件 +│ └── lib # 动态库 +│ ├── libminddata-lite.so # 图像处理动态库 +│ └── third_party # 第三方库头文件和库 +│ ├── flatbuffers # Flatbuffers的动态库 +``` diff --git a/lite/tutorials/source_zh_cn/index.rst b/lite/tutorials/source_zh_cn/index.rst index 3bfde552d2bec6205ba366d6d30c200bce0904d7..1f3de867e254412ed590e67d0ed725519cbb3b4e 100644 --- a/lite/tutorials/source_zh_cn/index.rst +++ b/lite/tutorials/source_zh_cn/index.rst @@ -21,4 +21,5 @@ MindSpore端侧教程 build use/converter_tool use/evaluating_the_model + use/image_processing use/runtime diff --git a/lite/tutorials/source_zh_cn/quick_start/quick_start.md b/lite/tutorials/source_zh_cn/quick_start/quick_start.md index ef76d900d3bbb15f9e2680656e356f7e9bf71b2a..046ea3cabfe9be898d821c9752c98363b918be37 100644 --- a/lite/tutorials/source_zh_cn/quick_start/quick_start.md +++ b/lite/tutorials/source_zh_cn/quick_start/quick_start.md @@ -42,7 +42,7 @@ MindSpore Model Zoo中图像分类模型可[在此下载](https://download.minds 以mobilenetv2模型为例,如下脚本将其转换为MindSpore Lite模型用于端侧推理。 ```bash -./converter_lite --fmk=MS --modelFile=mobilenetv2.mindir --outputFile=mobilenetv2.ms +./converter_lite --fmk=MINDIR --modelFile=mobilenetv2.mindir --outputFile=mobilenetv2.ms ``` ## 部署应用 @@ -53,9 +53,9 @@ MindSpore Model Zoo中图像分类模型可[在此下载](https://download.minds - Android Studio >= 3.2 (推荐4.0以上版本) - NDK 21.3 -- CMake 3.10.2 +- [CMake](https://cmake.org/download) 3.10.2 - Android SDK >= 26 -- OpenCV >= 4.0.0 (本示例代码已包含) +- [JDK]( https://www.oracle.com/downloads/otn-pub/java/JDK/) >= 1.8 ### 构建与运行 @@ -67,7 +67,7 @@ MindSpore Model Zoo中图像分类模型可[在此下载](https://download.minds ![start_sdk](../images/lite_quick_start_sdk.png) - (可选)若安装时出现NDK版本问题,可手动下载相应的[NDK版本](https://developer.android.com/ndk/downloads?hl=zh-cn)(本示例代码使用的NDK版本为21.3),并在`Project Structure`的`Android NDK location`设置中指定SDK的位置。 + (可选)若安装时出现NDK版本问题,可手动下载相应的[NDK版本](https://developer.android.com/ndk/downloads?hl=zh-cn)(本示例代码使用的NDK版本为21.3),并在`Project Structure`的`Android NDK location`设置中指定NDK的位置。 ![project_structure](../images/lite_quick_start_project_structure.png) @@ -79,10 +79,14 @@ MindSpore Model Zoo中图像分类模型可[在此下载](https://download.minds Android Studio连接设备调试操作,可参考。 + 手机需开启“USB调试模式”,Android Studio才能识别到手机。 华为手机一般在`设置->系统和更新->开发人员选项->USB调试`中打开“USB调试模式”。 + 3. 在Android设备上,点击“继续安装”,安装完即可查看到设备摄像头捕获的内容和推理结果。 ![install](../images/lite_quick_start_install.png) + + 识别结果如下图所示。 ![result](../images/lite_quick_start_app_result.png) @@ -98,29 +102,22 @@ MindSpore Model Zoo中图像分类模型可[在此下载](https://download.minds ``` app -| -├── libs # 存放MindSpore Lite依赖的库文件 -│ └── arm64-v8a -│ ├── libopencv_java4.so -│ └── libmindspore-lite.so -│ -├── opencv # opencv 相关依赖文件 -│ └── ... -| ├── src/main │ ├── assets # 资源文件 -| | └── model.ms # 存放模型文件 +| | └── mobilenetv2.ms # 存放模型文件 │ | │ ├── cpp # 模型加载和预测主要逻辑封装类 | | ├── .. +| | ├── mindspore_lite_x.x.x-minddata-arm64-cpu # MindSpore Lite版本 | | ├── MindSporeNetnative.cpp # MindSpore调用相关的JNI方法 │ | └── MindSporeNetnative.h # 头文件 +| | └── MsNetWork.cpp # MindSpore接口封装 │ | │ ├── java # java层应用代码 -│ │ └── com.huawei.himindsporedemo +│ │ └── com.mindspore.himindsporedemo │ │ ├── gallery.classify # 图像处理及MindSpore JNI调用相关实现 │ │ │ └── ... -│ │ └── obejctdetect # 开启摄像头及绘制相关实现 +│ │ └── widget # 开启摄像头及绘制相关实现 │ │ └── ... │ │ │ ├── res # 存放Android相关的资源文件 @@ -129,6 +126,7 @@ app ├── CMakeList.txt # cmake编译入口文件 │ ├── build.gradle # 其他Android配置文件 +├── download.gradle # 工程依赖文件下载 └── ... ``` @@ -136,19 +134,11 @@ app Android JNI层调用MindSpore C++ API时,需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html)生成`libmindspore-lite.so`库文件。 -本示例中,bulid过程由download.gradle文件配置自动下载`libmindspore-lite.so`以及OpenCV的`libopencv_java4.so`库文件,并放置在`app/libs/arm64-v8a`目录下。 +本示例中,build过程由download.gradle文件自动从华为服务器下载MindSpore Lite版本文件,并放置在`app/src/ main/cpp/mindspore_lite_x.x.x-minddata-arm64-cpu`目录下。 注: 若自动下载失败,请手动下载相关库文件并将其放在对应位置: -libmindspore-lite.so [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) - -libmindspore-lite include文件 [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/include.zip) - -libopencv_java4.so [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/opencv%204.4.0/libopencv_java4.so) - -libopencv include文件 [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/opencv%204.4.0/include.zip) - - +MindSpore Lite版本 [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz) ``` android{ @@ -169,23 +159,29 @@ android{ 在`app/CMakeLists.txt`文件中建立`.so`库文件链接,如下所示。 ``` -# Set MindSpore Lite Dependencies. -include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore) +# ============== Set MindSpore Dependencies. ============= +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/third_party/flatbuffers/include) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/include) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/include/ir/dtype) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/include/schema) + add_library(mindspore-lite SHARED IMPORTED ) -set_target_properties(mindspore-lite PROPERTIES - IMPORTED_LOCATION "${CMAKE_SOURCE_DIR}/libs/libmindspore-lite.so") +add_library(minddata-lite SHARED IMPORTED ) -# Set OpenCV Dependecies. -include_directories(${CMAKE_SOURCE_DIR}/opencv/sdk/native/jni/include) -add_library(lib-opencv SHARED IMPORTED ) -set_target_properties(lib-opencv PROPERTIES - IMPORTED_LOCATION "${CMAKE_SOURCE_DIR}/libs/libopencv_java4.so") +set_target_properties(mindspore-lite PROPERTIES IMPORTED_LOCATION + ${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/lib/libmindspore-lite.so) +set_target_properties(minddata-lite PROPERTIES IMPORTED_LOCATION + ${CMAKE_SOURCE_DIR}/src/main/cpp/${MINDSPORELITE_VERSION}/lib/libminddata-lite.so) +# --------------- MindSpore Lite set End. -------------------- # Link target library. target_link_libraries( ... - mindspore-lite - lib-opencv + # --- mindspore --- + minddata-lite + mindspore-lite ... ) ``` @@ -218,13 +214,12 @@ target_link_libraries( *labelEnv = labelNet; // Create context. - lite::Context *context = new lite::Context; - context->device_ctx_.type = lite::DT_CPU; - context->thread_num_ = numThread; //Specify the number of threads to run inference + mindspore::lite::Context *context = new mindspore::lite::Context; + context->thread_num_ = num_thread; // Create the mindspore session. - labelNet->CreateSessionMS(modelBuffer, bufferLen, "device label", context); - delete(context); + labelNet->CreateSessionMS(modelBuffer, bufferLen, context); + delete (context); ``` @@ -245,7 +240,7 @@ target_link_libraries( ```cpp // Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing - BitmapToMat(env, srcBitmap, matImageSrc); + BitmapToMat(env, srcBitmap, matImageSrc); // Processing such as zooming the picture size. matImgPreprocessed = PreProcessImageData(matImageSrc); @@ -270,7 +265,38 @@ target_link_libraries( delete[] (dataHWC); ``` -3. 对输入Tensor按照模型进行推理,获取输出Tensor,并进行后处理。 +3. 对输入数据进行处理。 + + ```cpp + bool PreProcessImageData(const LiteMat &lite_mat_bgr, LiteMat *lite_norm_mat_ptr) { + bool ret = false; + LiteMat lite_mat_resize; + LiteMat &lite_norm_mat_cut = *lite_norm_mat_ptr; + ret = ResizeBilinear(lite_mat_bgr, lite_mat_resize, 256, 256); + if (!ret) { + MS_PRINT("ResizeBilinear error"); + return false; + } + LiteMat lite_mat_convert_float; + ret = ConvertTo(lite_mat_resize, lite_mat_convert_float, 1.0 / 255.0); + if (!ret) { + MS_PRINT("ConvertTo error"); + return false; + } + LiteMat lite_mat_cut; + ret = Crop(lite_mat_convert_float, lite_mat_cut, 16, 16, 224, 224); + if (!ret) { + MS_PRINT("Crop error"); + return false; + } + float means[3] = {0.485, 0.456, 0.406}; + float vars[3] = {1.0 / 0.229, 1.0 / 0.224, 1.0 / 0.225}; + SubStractMeanNormalize(lite_mat_cut, lite_norm_mat_cut, means, vars); + return true; + } + ``` + +4. 对输入Tensor按照模型进行推理,获取输出Tensor,并进行后处理。 - 图执行,端测推理。 @@ -281,7 +307,12 @@ target_link_libraries( - 获取输出数据。 ```cpp - auto msOutputs = mSession->GetOutputs(); + auto names = mSession->GetOutputTensorNames(); + std::unordered_map msOutputs; + for (const auto &name : names) { + auto temp_dat =mSession->GetOutputByTensorName(name); + msOutputs.insert(std::pair {name, temp_dat}); + } std::string retStr = ProcessRunnetResult(msOutputs, ret); ``` @@ -290,39 +321,34 @@ target_link_libraries( std::string ProcessRunnetResult(std::unordered_map msOutputs, int runnetRet) { - // Get model output results. - std::unordered_map::iterator iter; - iter = msOutputs.begin(); - auto brach1_string = iter->first; - auto branch1_tensor = iter->second; + std::unordered_map::iterator iter; + iter = msOutputs.begin(); - int OUTPUTS_LEN = branch1_tensor->ElementsNum(); + // The mobilenetv2.ms model output just one branch. + auto outputTensor = iter->second; + int tensorNum = outputTensor->ElementsNum(); - float *temp_scores = static_cast(branch1_tensor->MutableData()); - float scores[RET_CATEGORY_SUM]; - for (int i = 0; i < RET_CATEGORY_SUM; ++i) { - scores[i] = temp_scores[i]; - } + // Get a pointer to the first score. + float *temp_scores = static_cast(outputTensor->MutableData()); - // Converted to text information that needs to be displayed in the APP. - std::string retStr = ""; - if (runnetRet == 0) { - for (int i = 0; i < RET_CATEGORY_SUM; ++i) { - if (scores[i] > 0.3){ - retStr += g_labels_name_map[i]; - retStr += ":"; - std::string score_str = std::to_string(scores[i]); - retStr += score_str; - retStr += ";"; - } - } - else { - MS_PRINT("MindSpore run net failed!"); - for (int i = 0; i < RET_CATEGORY_SUM; ++i) { - retStr += " :0.0;"; - } - } + float scores[RET_CATEGORY_SUM]; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + if (temp_scores[i] > 0.5) { + MS_PRINT("MindSpore scores[%d] : [%f]", i, temp_scores[i]); + } + scores[i] = temp_scores[i]; + } - return retStr; + // Score for each category. + // Converted to text information that needs to be displayed in the APP. + std::string categoryScore = ""; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + categoryScore += labels_name_map[i]; + categoryScore += ":"; + std::string score_str = std::to_string(scores[i]); + categoryScore += score_str; + categoryScore += ";"; + } + return categoryScore; } ``` diff --git a/lite/tutorials/source_zh_cn/use/benchmark_tool.md b/lite/tutorials/source_zh_cn/use/benchmark_tool.md index 83c6aadc638de8c469b46875c7a1f863e148c539..69d329a0860a786b702463a34342d6634f487274 100644 --- a/lite/tutorials/source_zh_cn/use/benchmark_tool.md +++ b/lite/tutorials/source_zh_cn/use/benchmark_tool.md @@ -63,12 +63,6 @@ Mean bias of all nodes: 0% ======================================================= ``` -原模型输入输出数据类型为uint8时,需要将其减去128再转换为int8类型后才能作为标杆数据验证精度,输出数据类型为int8时需要在参数中指定calibDataType为INT8。 - -```bash -./benchmark --modelPath=./models/test_benchmark_int8.ms --inDataPath=./input/test_benchmark_int8.bin --device=CPU --accuracyThreshold=3 --calibDataPath=./output/test_benchmark_int8.out --calibDataType=INT8 -``` - ## 参数说明 @@ -76,11 +70,10 @@ Mean bias of all nodes: 0% ```bash ./benchmark [--modelPath=] [--accuracyThreshold=] - [--calibDataPath=] [--cpuBindMode=] - [--device=] [--help] [--inDataPath=] - [--inDataType=] [--loopCount=] - [--numThreads=] [--omModelPath=] - [--resizeDims=] [--warmUpLoopCount=] + [--calibDataPath=] [--calibDataType=] + [--cpuBindMode=] [--device=] [--help] + [--inDataPath=] [--loopCount=] + [--numThreads=] [--warmUpLoopCount=] [--fp16Priority=] ``` @@ -91,7 +84,7 @@ Mean bias of all nodes: 0% | `--modelPath=` | 必选 | 指定需要进行基准测试的MindSpore Lite模型文件路径。 | String | null | - | | `--accuracyThreshold=` | 可选 | 指定准确度阈值。 | Float | 0.5 | - | | `--calibDataPath=` | 可选 | 指定标杆数据的文件路径。标杆数据作为该测试模型的对比输出,是该测试模型使用相同输入并由其它深度学习框架前向推理而来。 | String | null | - | -| `--calibDataType=` | 可选 | 指定标杆数据类型。 | String | FLOAT | FLOAT、INT8 | +| `--calibDataType=` | 可选 | 指定标杆数据类型。 | String | FLOAT | FLOAT、INT8、UINT8 | | `--cpuBindMode=` | 可选 | 指定模型推理程序运行时绑定的CPU核类型。 | Integer | 1 | -1:表示中核
1:表示大核
0:表示不绑定 | | `--device=` | 可选 | 指定模型推理程序运行的设备类型。 | String | CPU | CPU、GPU | | `--help` | 可选 | 显示`benchmark`命令的帮助信息。 | - | - | - | diff --git a/lite/tutorials/source_zh_cn/use/converter_tool.md b/lite/tutorials/source_zh_cn/use/converter_tool.md index 1b9ad944df5fa482e4e91a49b80a0234a86cc8f9..122ded8747984115886f42af209ef9847272426a 100644 --- a/lite/tutorials/source_zh_cn/use/converter_tool.md +++ b/lite/tutorials/source_zh_cn/use/converter_tool.md @@ -35,7 +35,7 @@ MindSpore Lite提供离线转换模型功能的工具,支持多种类型的模 ### 使用示例 -首先,在源码根目录下,输入命令进行编译,可参考`build.md`。 +在源码根目录下,输入命令进行编译,可参考`build.md`。 ```bash bash build.sh -I x86_64 ``` @@ -53,7 +53,7 @@ bash build.sh -I x86_64 结果显示为: ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` 这表示已经成功将Caffe模型转化为MindSpore Lite模型,获得新文件`lenet.ms`。 @@ -61,7 +61,7 @@ bash build.sh -I x86_64 - MindSpore模型`model.mindir` ```bash - ./converter_lite --fmk=MS --modelFile=model.mindir --outputFile=model + ./converter_lite --fmk=MINDIR --modelFile=model.mindir --outputFile=model ``` - TensorFlow Lite模型`model.tflite` @@ -79,16 +79,17 @@ bash build.sh -I x86_64 ./converter_lite --fmk=TFLITE --modelFile=model_quant.tflite --outputFile=model --quantType=AwareTraining ``` - - 感知量化模型输入设置为int8,输出设置为int8 + - 感知量化模型输入输出类型设置为float ```bash - ./converter_lite --fmk=TFLITE --modelFile=model_quant.tflite --outputFile=model --quantType=AwareTraining --inputInferenceType=INT8 --inferenceType=INT8 + ./converter_lite --fmk=TFLITE --modelFile=model_quant.tflite --outputFile=model --quantType=AwareTraining --inferenceType=FLOAT ``` 以上几种情况下,均显示如下转换成功提示,且同时获得`model.ms`目标文件。 ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` +- 如果转换命令执行失败,程序会返回一个[错误码](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/errorcode_and_metatype.html)。 > 训练后量化示例请参考。 @@ -101,15 +102,18 @@ MindSpore Lite模型转换工具提供了多种参数设置,用户可根据需 | 参数 | 是否必选 | 参数说明 | 取值范围 | 默认值 | | -------- | ------- | ----- | --- | ---- | | `--help` | 否 | 打印全部帮助信息。 | - | - | -| `--fmk=` | 是 | 输入模型的原始格式。 | MS、CAFFE、TFLITE、ONNX | - | +| `--fmk=` | 是 | 输入模型的原始格式。 | MINDIR、CAFFE、TFLITE、ONNX | - | | `--modelFile=` | 是 | 输入模型的路径。 | - | - | | `--outputFile=` | 是 | 输出模型的路径(不存在时将自动创建目录),不需加后缀,可自动生成`.ms`后缀。 | - | - | | `--weightFile=` | 转换Caffe模型时必选 | 输入模型weight文件的路径。 | - | - | -| `--quantType=` | 否 | 设置模型的量化类型。 | PostTraining:训练后量化
AwareTraining:感知量化。 | - | -|` --inputInferenceType=` | 否 | 设置感知量化模型输入数据类型,如果和原模型不一致则转换工具会在模型前插转换算子,使得转换后的模型输入类型和inputInferenceType保持一致。 | FLOAT、INT8 | FLOAT | -| `--inferenceType=` | 否 | 设置感知量化模型输出数据类型,如果和原模型不一致则转换工具会在模型前插转换算子,使得转换后的模型输出类型和inferenceType保持一致。 | FLOAT、INT8 | FLOAT | +| `--quantType=` | 否 | 设置模型的量化类型。 | WeightQuant:训练后量化(权重量化)
PostTraining:训练后量化(全量化)
AwareTraining:感知量化 | - | +|` --inferenceType=` | 否 | 设置感知量化模型输入输出数据类型,如果和原模型不一致则转换工具会在模型前后插转换算子,使得转换后的模型输入输出类型和inferenceType保持一致。 | UINT8、FLOAT、INT8 | FLOAT | | `--stdDev= `| 否 | 感知量化模型转换时用于设置输入数据的标准差。 | (0,+∞) | 128 | | `--mean=` | 否 | 感知量化模型转换时用于设置输入数据的均值。 | [-128, 127] | -0.5 | +| `--bitNum=` | 否 | 设定训练后量化(权重量化)的比特数,目前仅支持8bit量化 | 8 | 8 | +| `--quantSize=` | 否 | 设定参与训练后量化(权重量化)的卷积核尺寸阈值,若卷积核尺寸大于该值,则对此权重进行量化 | (0,+∞) | 0 | +| `--convWeightQuantChannelThreshold=` | 否 | 设定参与训练后量化(权重量化)的卷积通道数阈值,若卷积通道数大于该值,则对此权重进行量化 | (0,+∞) | 16 | +| `--config_file=` | 否 | 训练后量化(全量化)校准数据集配置文件路径 | - | - | > - 参数名和参数值之间用等号连接,中间不能有空格。 > - Caffe模型一般分为两个文件:`*.prototxt`模型结构,对应`--modelFile`参数;`*.caffemodel`模型权值,对应`--weightFile`参数。 @@ -120,9 +124,7 @@ MindSpore Lite模型转换工具提供了多种参数设置,用户可根据需 使用MindSpore Lite模型转换工具,需要进行如下环境准备工作。 -- 编译:模型转换工具代码在MindSpore源码的`mindspore/lite/tools/converter`目录中,参考部署文档中的[环境要求](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id5)和[编译示例](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id7)编译Windows版本。 - -- 运行:参考部署文档中的[编译输出](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id8),获得`converter`工具,,并配置MinGW环境变量(在系统变量Path里添加MinGW的bin目录)。 +- 获取工具包:下载Windows转换工具的Zip包并解压至本地目录,获得`converter`工具。 ### 参数说明 @@ -130,12 +132,7 @@ MindSpore Lite模型转换工具提供了多种参数设置,用户可根据需 ### 使用示例 -首先,使用cmd工具在源码根目录下,输入命令进行编译,可参考`build.md`。 -```bash -call build.bat lite -``` - -然后,设置日志打印级别为INFO。 +首先,设置日志打印级别为INFO。 ```bash set MSLOG=INFO ``` @@ -152,7 +149,7 @@ set MSLOG=INFO 结果显示为: ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` 这表示已经成功将Caffe模型转化为MindSpore Lite模型,获得新文件`lenet.ms`。 @@ -160,7 +157,7 @@ set MSLOG=INFO - MindSpore模型`model.mindir` ```bash - call converter_lite --fmk=MS --modelFile=model.mindir --outputFile=model + call converter_lite --fmk=MINDIR --modelFile=model.mindir --outputFile=model ``` - TensorFlow Lite模型`model.tflite` @@ -180,5 +177,6 @@ set MSLOG=INFO 以上几种情况下,均显示如下转换成功提示,且同时获得`model.ms`目标文件。 ``` - INFO [converter/converter.cc:190] Runconverter] CONVERTER RESULT: SUCCESS! + CONVERTER RESULT SUCCESS:0 ``` +- 如果转换命令执行失败,程序会返回一个[错误码](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/errorcode_and_metatype.html)。 diff --git a/lite/tutorials/source_zh_cn/use/image_processing.md b/lite/tutorials/source_zh_cn/use/image_processing.md new file mode 100644 index 0000000000000000000000000000000000000000..139ad867a98de40dc0534a9c8ca74c3ce58b6c79 --- /dev/null +++ b/lite/tutorials/source_zh_cn/use/image_processing.md @@ -0,0 +1,149 @@ +# 预处理图像数据 + + + +- [预处理图像数据](#预处理图像数据) + - [概述](#概述) + - [导入图像预处理函数的库](#导入图像预处理函数的库) + - [对图像进行初始化](#对图像进行初始化) + - [使用示例](#使用示例) + - [可选的图像预处理算子](#可选的图像预处理算子) + - [对图像进行缩放操作](#对图像进行缩放操作) + - [使用示例](#使用示例-1) + - [对图像数据类型进行转换](#对图像数据类型进行转换) + - [使用示例](#使用示例-2) + - [对图像数据进行裁剪](#对图像数据进行裁剪) + - [使用示例](#使用示例-3) + - [对图像数据进行归一化处理](#对图像数据进行归一化处理) + - [使用示例](#使用示例-4) + + + +## 概述 + +图像预处理的主要目的是消除图像中无关的信息,恢复有用的真实信息,增强有关信息的可检测性和最大限度地简化数据,从而改进特征抽取、图像分割、匹配和识别的可靠性。此处是通过创建LiteMat对象,在推理前对图像数据进行处理,达到模型推理所需要的数据格式要求。 + +流程如下: + +## 导入图像预处理函数的库 + +``` +#include "lite_cv/lite_mat.h" +#include "lite_cv/image_process.h" +``` + +## 对图像进行初始化 + +这边使用的是`image_process.h`文件中的[InitFromPixel](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/dataset.html#initfrompixel)函数对图像进行初始化操作。 + +``` +bool InitFromPixel(const unsigned char *data, LPixelType pixel_type, LDataType data_type, int w, int h, LiteMat &m); +``` + +### 使用示例 + +``` +// Create the data object of the LiteMat object. +LiteMat lite_mat_bgr; + +// Initialize the lite_mat_bgr object. +// The image data pointer passed in by the user (The data in the Bitmap corresponding to the Android platform). +InitFromPixel(pixel_ptr, LPixelType::RGBA2GRAY, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); +``` + +## 可选的图像预处理算子 + +此处的图像处理算子,用户可以根据实际情况任意搭配使用。 + +### 对图像进行缩放操作 + +这边利用的是`image_process.h`中的[ResizeBilinear](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/dataset.html#resizebilinear)函数通过双线性算法调整图像大小,当前仅支持的数据类型为uint8,当前支持的通道为3和1。 + +``` +bool ResizeBilinear(const LiteMat &src, LiteMat &dst, int dst_w, int dst_h); +``` + +#### 使用示例 + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// Create a resize image data object. +LiteMat lite_mat_resize; + +// Resize the image. +ResizeBilinear(lite_mat_bgr, lite_mat_resize, 256, 256); +``` + +### 对图像数据类型进行转换 + +这边利用的是`image_process.h`中的[ConvertTo](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/dataset.html#convertto)函数对图像数据类型进行转换,目前支持的转换是将uint8转换为float。 + +``` +bool ConvertTo(const LiteMat &src, LiteMat &dst, double scale = 1.0); +``` + +#### 使用示例 + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// Create the converted data type object. +LiteMat lite_mat_convert_float; + +// Perform conversion type operations on the object. The currently supported conversion is to convert uint8 to float. +ConvertTo(lite_mat_bgr, lite_mat_convert_float); +``` + +### 对图像数据进行裁剪 + +这边利用的是`image_process.h`中的[Crop](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/dataset.html#crop)函数对图像进行裁剪,目前支持通道3和1。 + +``` +bool Crop(const LiteMat &src, LiteMat &dst, int x, int y, int w, int h); +``` + +#### 使用示例 + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// Create the cropped object. +LiteMat lite_mat_cut; + +// The image is cropped by the values of x, y, w, h. +Crop(lite_mat_bgr, lite_mat_cut, 16, 16, 224, 224); +``` + +### 对图像数据进行归一化处理 + +为了消除数据指标之间的量纲影响,通过标准化处理来解决数据指标之间的可比性问题,这边利用的是`image_process.h`中的[SubStractMeanNormalize](https://www.mindspore.cn/lite/docs/zh-CN/master/apicc/dataset.html#substractmeannormalize)函数对图像数据进行归一化处理。 + +``` +bool SubStractMeanNormalize(const LiteMat &src, LiteMat &dst, float *mean, float *norm); +``` + +#### 使用示例 + +``` +// Initialize the image data. +LiteMat lite_mat_bgr; +InitFromPixel(rgba_mat.data, LPixelType::RGBA2BGR, LDataType::UINT8, rgba_mat.cols, rgba_mat.rows, lite_mat_bgr); + +// The mean value of the image data. +// The variance of the image data. +float means[1] = {0.485}; +float norm[1] = {1.0 / 0.229}; + +// Create a normalized image object. +LiteMat lite_mat_bgr_norm; + +// The image data is normalized by the mean value and variance of the image data. +SubStractMeanNormalize(lite_mat_bgr, lite_mat_bgr_norm, means, norm); +``` \ No newline at end of file diff --git a/lite/tutorials/source_zh_cn/use/post_training_quantization.md b/lite/tutorials/source_zh_cn/use/post_training_quantization.md index 839a7347ac9387f3b7de95852484447a65f1f75c..edf2be8f7910701a6a3a14d055b3dac7ee3ab626 100644 --- a/lite/tutorials/source_zh_cn/use/post_training_quantization.md +++ b/lite/tutorials/source_zh_cn/use/post_training_quantization.md @@ -4,9 +4,15 @@ - [训练后量化](#训练后量化) - [概述](#概述) - - [使用示例](#使用示例) - - [部分模型精度结果](#部分模型精度结果) - - [参数说明](#参数说明) + - [权重量化](#权重量化) + - [参数说明](#参数说明) + - [使用步骤](#使用步骤) + - [部分模型精度结果](#部分模型精度结果) + - [全量化](#全量化) + - [参数说明](#参数说明-1) + - [使用步骤](#使用步骤-1) + - [部分模型精度结果](#部分模型精度结果-1) + @@ -14,14 +20,84 @@ ## 概述 -对于已经训练好的`float32`模型,通过训练后量化将模型转为`int8`模型,不仅能减小模型大小,而且能显著提高推理性能。在MindSpore端侧框架中,这部分功能集成在模型转换工具`conveter_lite`中,通过增加命令行参数,便能够转换得到量化后模型。 +对于已经训练好的`float32`模型,通过训练后量化将其转为`int8`,不仅能减小模型大小,而且能显著提高推理性能。在MindSpore Lite中,这部分功能集成在模型转换工具`conveter_lite`内,通过增加命令行参数,便能够转换得到量化后模型。 目前训练后量化属于alpha阶段(支持部分网络,不支持多输入模型),正在持续完善中。 +MindSpore Lite训练后量化分为两类: +1. 权重量化:单独对模型的权值进行量化; +2. 全量化:对模型的权值、激活值、bias值统一进行量化。 + +训练后量化在两种情况下所需的数据类型和参数设定不同,但均可通过转换工具设定。有关转换工具`converter_lite`的使用方法可参考[转换为MindSpore Lite模型](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html)。在此基础之上进行配置,启用训练后量化。 + +## 权重量化 + +下面对权重量化的使用方式和效果进行阐述。 + +### 参数说明 + +权重量化转换命令的一般形式为: +``` +./converter_lite --fmk=ModelType --modelFile=ModelFilePath --outputFile=ConvertedModelPath --quantType=WeightQuant --bitNum=BitNumValue --quantSize=QuantizationSizeThresholdValue --convWeightQuantChannelThreshold=ConvWeightQuantChannelThresholdValue +``` +下面对此命令的量化相关参数进行说明: + +| 参数 | 属性 | 功能描述 | 参数类型 | 默认值 | 取值范围 | +| -------- | ------- | ----- | ----- |----- | ----- | +| `--quantType=` | 必选 | 设置为WeightQuant,启用权重量化 | String | - | 必须设置为WeightQuant | +| `--bitNum=` | 可选 | 设定权重量化的比特数,目前仅支持8bit量化 | Integer | 8 | 8 | +| `--quantSize=` | 可选 | 设定参与权重量化的卷积核尺寸阈值,若卷积核尺寸大于该值,则对此权重进行量化;建议设置为500 | Integer | 0 | (0,+∞) | +| `--convWeightQuantChannelThreshold=` | 可选 | 设定参与权重量化的卷积通道数阈值,若卷积通道数大于该值,则对此权重进行量化;建议设置为16 | Integer | 16 | (0,+∞) | + +用户可根据模型及自身需要对权重量化的参数作出调整。 + + +### 使用步骤 + +1. 正确编译出`converter_lite`可执行文件。该部分可参考构建文档[编译MindSpore Lite](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html),获得`converter_lite`工具,并配置环境变量。 +2. 以TensorFlow Lite模型为例,执行权重量化模型转换命令: + ``` + ./converter_lite --fmk=TFLITE --modelFile=Inception_v3.tflite --outputFile=Inception_v3.tflite --quantType=WeightQuant --bitNum=8 --quantSize=0 --convWeightQuantChannelThreshold=0 + ``` +3. 上述命令执行成功后,便可得到量化后的模型`Inception_v3.tflite.ms`,量化后的模型大小通常会下降到FP32模型的1/4。 + +### 部分模型精度结果 + + | 模型 | 测试数据集 | FP32模型精度 | 权重量化精度 | + | -------- | ------- | ----- | ----- | + | [Inception_V3](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz) | [ImageNet](http://image-net.org/) | 77.92% | 77.84% | + | [Mobilenet_V1_1.0_224](https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz) | [ImageNet](http://image-net.org/) | 70.96% | 70.56% | + +> 以上所有结果均在x86环境上测得。 + +## 全量化 + +下面对全量化的使用方式和效果进行阐述。 + +### 参数说明 + +全量化转换命令的一般形式为: ``` ./converter_lite --fmk=ModelType --modelFile=ModelFilePath --outputFile=ConvertedModelPath --quantType=PostTraining --config_file=config.cfg ``` +下面对此命令的量化相关参数进行说明: -## 使用示例 +| 参数 | 属性 | 功能描述 | 参数类型 | 默认值 | 取值范围 | +| -------- | ------- | ----- | ----- |----- | ----- | +| `--quantType=` | 必选 | 设置为PostTraining,启用全量化 | String | - | 必须设置为PostTraining | +| `--config_file=` | 必选 | 校准数据集配置文件路径 | String | - | - | + +为了计算激活值的量化参数,用户需要提供校准数据集。校准数据集最好来自真实推理场景,能表征模型的实际输入情况,数量在100个左右。 +校准数据集配置文件采用`key=value`的方式定义相关参数,需要配置的`key`如下: + +| 参数名 | 属性 | 功能描述 | 参数类型 | 默认值 | 取值范围 | +| -------- | ------- | ----- | ----- | ----- | ----- | +| image_path | 必选 | 存放校准数据集的目录 | String | - | 该目录存放可直接用于执行推理的输入数据。由于目前框架还不支持数据预处理,所有数据必须事先完成所需的转换,使得它们满足推理的输入要求。 | +| batch_count | 可选 | 使用的输入数目 | Integer | 100 | (0,+∞) | +| method_x | 可选 | 网络层输入输出数据量化算法 | String | KL | KL,MAX_MIN。 KL: 基于[KL散度](http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf)对数据范围作量化校准; MAX_MIN:基于最大值、最小值计算数据的量化参数。 在模型以及数据集比较较简单的情况下,推荐使用MAX_MIN | +| thread_num | 可选 | 使用校准数据集执行推理流程时的线程数 | Integer | 1 | (0,+∞) | + + +### 使用步骤 1. 正确编译出`converter_lite`可执行文件。 2. 准备校准数据集,假设存放在`/dir/images`目录,编写配置文件`config.cfg`,内容如下: @@ -32,34 +108,17 @@ thread_num=1 ``` 校准数据集可以选择测试数据集的子集,要求`/dir/images`目录下存放的每个文件均是预处理好的输入数据,每个文件都可以直接用于推理的输入。 -3. 以MindSpore模型为例,执行带训练后量化的模型转换命令: +3. 以MindSpore模型为例,执行全量化的模型转换命令: ``` - ./converter_lite --fmk=MS --modelFile=lenet.ms --outputFile=lenet_quant --quantType=PostTraining --config_file=config.cfg + ./converter_lite --fmk=MINDIR --modelFile=lenet.mindir --outputFile=lenet_quant --quantType=PostTraining --config_file=config.cfg ``` -4. 上述命令执行成功后,便可得到量化后的模型lenet_quant.ms,通常量化后的模型大小会下降到FP32模型的1/4。 +4. 上述命令执行成功后,便可得到量化后的模型`lenet_quant.ms`,通常量化后的模型大小会下降到FP32模型的1/4。 -## 部分模型精度结果 +### 部分模型精度结果 - | 模型 | 测试数据集 | method_x | FP32模型精度 | 训练后量化精度 | 说明 | + | 模型 | 测试数据集 | method_x | FP32模型精度 | 全量化精度 | 说明 | | -------- | ------- | ----- | ----- | ----- | ----- | | [Inception_V3](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz) | [ImageNet](http://image-net.org/) | KL | 77.92% | 77.95% | 校准数据集随机选择ImageNet Validation数据集中的100张 | | [Mobilenet_V1_1.0_224](https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz) | [ImageNet](http://image-net.org/) | KL | 70.96% | 70.69% | 校准数据集随机选择ImageNet Validation数据集中的100张 | > 以上所有结果均在x86环境上测得。 - -## 参数说明 - -| 参数 | 属性 | 功能描述 | 参数类型 | 默认值 | 取值范围 | -| -------- | ------- | ----- | ----- |----- | ----- | -| --quantType | 必选 | 设置为PostTraining,启用训练后量化 | String | - | 必须设置为PostTraining | -| --config_file | 必选 | 校准数据集配置文件路径 | String | - | - | - -为了计算激活值的量化参数,用户需要提供校准数据集。校准数据集最好来自真实推理场景,能表征模型的实际输入情况,数量在100个左右。 -校准数据集配置文件采用`key=value`的方式定义相关参数,需要配置的`key`如下: - -| 参数名 | 属性 | 功能描述 | 参数类型 | 默认值 | 取值范围 | -| -------- | ------- | ----- | ----- | ----- | ----- | -| image_path | 必选 | 存放校准数据集的目录 | String | - | 该目录存放可直接用于执行推理的输入数据。由于目前框架还不支持数据预处理,所有数据必须事先完成所需的转换,使得它们满足推理的输入要求。 | -| batch_count | 可选 | 使用的输入数目 | Integer | 100 | 大于0 | -| method_x | 可选 | 网络层输入输出数据量化算法 | String | KL | KL,MAX_MIN。 KL: 基于[KL散度](http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf)对数据范围作量化校准; MAX_MIN:基于最大值、最小值计算数据的量化参数。 在模型以及数据集比较较简单的情况下,推荐使用MAX_MIN | -| thread_num | 可选 | 使用校准数据集执行推理流程时的线程数 | Integer | 1 | 大于0 | \ No newline at end of file diff --git a/lite/tutorials/source_zh_cn/use/runtime.md b/lite/tutorials/source_zh_cn/use/runtime.md index 2ba5ab7bad1af9f591d3e7c7a2b2f92a18953c25..942784e896b537f2817907c2217ec190b9d55e7d 100644 --- a/lite/tutorials/source_zh_cn/use/runtime.md +++ b/lite/tutorials/source_zh_cn/use/runtime.md @@ -28,6 +28,10 @@ - [使用示例](#使用示例-5) - [获取版本号](#获取版本号) - [使用示例](#使用示例-6) + - [Session并行](#Session并行) + - [单Session并行](#单session并行) + - [多Session并行](#多session并行) + - [使用示例](#使用示例-7) @@ -49,7 +53,7 @@ Runtime总体使用流程如下图所示: - `Operator`:算子原型,包含算子的属性,以及shape、data type和format的推导方法。 - `Kernel`:算子库提供算子的具体实现,提供算子forward的能力。 - `Tensor`:MindSpore Lite使用的Tensor,提供了Tensor内存操作的功能和接口。 - + ## 读取模型 在MindSpore Lite中,模型文件是从模型转换工具转换得到的`.ms`文件。进行模型推理时,需要从文件系统加载模型,并进行模型解析,这部分操作主要在Model中实现。Model持有权重数据、算子属性等模型数据。 @@ -76,66 +80,16 @@ static Model *Import(const char *model_buf, size_t size); MindSpore Lite支持异构推理,推理时的主选后端由`Context`中的`device_ctx_`指定,默认为CPU。在进行图编译时,会根据主选后端进行算子选型调度。 -```cpp -/// \brief DeviceType defined for holding user's preferred backend. -typedef enum { - DT_CPU, /**< CPU device type */ - DT_GPU, /**< GPU device type */ - DT_NPU /**< NPU device type, not supported yet */ -} DeviceType; - -/// \brief DeviceContext defined for holding DeviceType. -typedef struct { - DeviceType type; /**< device type */ -} DeviceContext; - -DeviceContext device_ctx_{DT_CPU}; -``` - MindSpore Lite内置一个进程共享的线程池,推理时通过`thread_num_`指定线程池的最大线程数,默认为2线程,推荐最多不超过4个线程,否则可能会影响性能。 -```cpp -int thread_num_ = 2; /**< thread number config for thread pool */ -``` - MindSpore Lite支持动态内存分配和释放,如果没有指定`allocator`,推理时会生成一个默认的`allocator`,也可以通过`Context`方法在多个`Context`中共享内存分配器。 如果用户通过`new`创建`Context`,不再需要时,需要用户通过`delete`释放。一般在创建完Session后,Context即可释放。 -```cpp -/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. -/// -/// \note List public class and interface for reference. -class Allocator; - -/// \brief Context defined for holding environment variables during runtime. -class MS_API Context { - public: - /// \brief Constructor of MindSpore Lite Context using input value for parameters. - /// - /// \param[in] thread_num Define the work thread number during the runtime. - /// \param[in] allocator Define the allocator for malloc. - /// \param[in] device_ctx Define device information during the runtime. - Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx); - - public: - std::shared_ptr allocator = nullptr; -} -``` - ### 创建会话 用上一步创建得到的`Context`,调用LiteSession的静态`CreateSession`方法来创建`LiteSession`。函数返回的`LiteSession`实例是一个指针,通过`new`创建,不再需要时,需要用户通过`delete`释放。 -```cpp -/// \brief Static method to create a LiteSession pointer. -/// -/// \param[in] context Define the context of session to be created. -/// -/// \return Pointer of MindSpore Lite LiteSession. -static LiteSession *CreateSession(lite::Context *context); -``` - ### 使用示例 下面示例代码演示了`Context`的创建,以及在两个`LiteSession`间共享内存池的功能: @@ -147,13 +101,16 @@ if (context == nullptr) { return RET_ERROR; } // The preferred backend is GPU, which means, if there is a GPU operator, it will run on the GPU first, otherwise it will run on the CPU. -context->device_ctx_.type = lite::DT_GPU; +context->device_type_ = lite::DT_GPU; // The medium core takes priority in thread and core binding methods. This parameter will work in the BindThread interface. For specific binding effect, see the "Run Graph" section. context->cpu_bind_mode_ = MID_CPU; -// Configure the number of worker threads in the thread pool to 2, including the main thread. +// Configure the number of worker threads in the thread pool to 2, including the main thread. context->thread_num_ = 2; // Allocators can be shared across multiple Contexts. -auto *context2 = new Context(context->thread_num_, context->allocator, context->device_ctx_); +auto *context2 = new Context(); +context2->thread_num_ = context->thread_num_; +context2->allocator = context->allocator; +context2->device_type_ = context->device_type_; context2->cpu_bind_mode_ = context->cpu_bind_mode_; // Use Context to create Session. auto session1 = session::LiteSession::CreateSession(context); @@ -166,7 +123,7 @@ if (session1 == nullptr) { // session1 and session2 can share one memory pool. auto session2 = session::LiteSession::CreateSession(context2); delete (context2); -if (session == nullptr) { +if (session2 == nullptr) { MS_LOG(ERROR) << "CreateSession failed while running %s", modelName.c_str(); return RET_ERROR; } @@ -178,19 +135,7 @@ if (session == nullptr) { 使用MindSpore Lite进行推理时,在已完成会话创建与图编译之后,如果需要对输入的shape进行Resize,则可以通过对输入的tensor重新设置shape,然后调用session的Resize()接口。 -```cpp -/// \brief Get input MindSpore Lite MSTensors of model. -/// -/// \return The vector of MindSpore Lite MSTensor. -virtual std::vector GetInputs() const = 0; - -/// \brief Resize inputs shape. -/// -/// \param[in] inputs Define the new inputs shape. -/// -/// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. -virtual int Resize(const std::vector &inputs) = 0; -``` +> 某些网络是不支持可变维度,会提示错误信息后异常退出,比如,模型中有MatMul算子,并且MatMul的一个输入Tensor是权重,另一个输入Tensor是输入时,调用可变维度接口会导致输入Tensor和权重Tensor的Shape不匹配,最终导致推理失败。 ### 使用示例 @@ -199,9 +144,10 @@ virtual int Resize(const std::vector &inputs) = 0; // Assume we have created a LiteSession instance named session. auto inputs = session->GetInputs(); std::vector resize_shape = {1, 128, 128, 3}; +std::vector> new_shapes; +new_shapes.push_back(resize_shape); // Assume the model has only one input,resize input shape to [1, 128, 128, 3] -inputs[0]->set_shape(resize_shape); -session->Resize(inputs); +session->Resize(inputs, new_shapes); ``` ### 图编译 @@ -321,14 +267,6 @@ memcpy(in_data, input_buf, data_size); MindSpore Lite会话在进行图编译以后,即可使用`LiteSession`的`RunGraph`进行模型推理。 ```cpp -/// \brief Run session with callback. -/// -/// \param[in] before Define a call_back_function to be called before running each node. -/// \param[in] after Define a call_back_function to be called after running each node. -/// -/// \note RunGraph should be called after CompileGraph. -/// -/// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; ``` @@ -503,16 +441,16 @@ virtual void *MutableData() const = 0; ### 使用示例 -下面示例代码演示了使用`GetOutputMapByNode`接口获取输出`MSTensor`,并打印了每个输出`MSTensor`的前十个数据或所有数据: +下面示例代码演示了使用`GetOutputs`接口获取输出`MSTensor`,并打印了每个输出`MSTensor`的前十个数据或所有数据: ```cpp // Assume we have created a LiteSession instance named session before. -auto output_map = session->GetOutputMapByNode(); +auto output_map = session->GetOutputs(); // Assume that the model has only one output node. auto out_node_iter = output_map.begin(); std::string name = out_node_iter->first; // Assume that the unique output node has only one output tensor. -auto out_tensor = out_node_iter->second.front(); +auto out_tensor = out_node_iter->second; if (out_tensor == nullptr) { std::cerr << "Output tensor is nullptr" << std::endl; return -1; @@ -527,7 +465,7 @@ if (out_data == nullptr) { std::cerr << "Data of out_tensor is nullptr" << std::endl; return -1; } -// Print the first 10 float data or all output data of the output tensor. +// Print the first 10 float data or all output data of the output tensor. std::cout << "Output data: "; for (size_t i = 0; i < 10 && i < out_tensor->ElementsNum(); i++) { std::cout << " " << out_data[i]; @@ -536,7 +474,7 @@ std::cout << std::endl; // The elements in outputs do not need to be free by users, because outputs are managed by the MindSpore Lite. ``` -需要注意的是,`GetOutputsByNodeName`、`GetOutputMapByNode`、`GetOutputByTensorName`和`GetOutputMapByTensor`方法返回的vector或map不需要用户释放。 +需要注意的是,`GetOutputsByNodeName`、`GetOutputByTensorName`和`GetOutputs`方法返回的vector或map不需要用户释放。 下面示例代码演示了使用`GetOutputsByNodeName`接口获取输出`MSTensor`的方法: @@ -552,28 +490,16 @@ if (out_tensor == nullptr) { } ``` -下面示例代码演示了使用`GetOutputMapByTensor`接口获取输出`MSTensor`的方法: - -```cpp -// Assume we have created a LiteSession instance named session before. -auto output_map = session->GetOutputMapByTensor(); -// Assume that output node named output_node_name_0 has only one output tensor. -auto out_tensor = output_vec.front(); -if (out_tensor == nullptr) { - std::cerr << "Output tensor is nullptr" << std::endl; - return -1; -} -``` - 下面示例代码演示了使用`GetOutputByTensorName`接口获取输出`MSTensor`的方法: ```cpp +// Assume we have created a LiteSession instance named session. // We can use GetOutputTensorNames method to get all name of output tensor of model which is in order. -auto tensor_names = this->GetOutputTensorNames(); +auto tensor_names = session->GetOutputTensorNames(); // Assume we have created a LiteSession instance named session before. // Use output tensor name returned by GetOutputTensorNames as key for (auto tensor_name : tensor_names) { - auto out_tensor = this->GetOutputByTensorName(tensor_name); + auto out_tensor = session->GetOutputByTensorName(tensor_name); if (out_tensor == nullptr) { std::cerr << "Output tensor is nullptr" << std::endl; return -1; @@ -589,5 +515,114 @@ MindSpore Lite提供了`Version`方法可以获取版本号,包含在`include/ 下面代码演示如何获取MindSpore Lite的版本号: ```cpp #include "include/version.h" -std::string version = mindspore::lite::Version(); +std::string version = mindspore::lite::Version(); +``` + +## Session并行 +MindSpore Lite支持多个`LiteSession`并行推理,但不支持多个线程同时调用单个`LiteSession`的`RunGraph`接口。 + +### 单Session并行 + +MindSpore Lite不支持多线程并行执行单个`LiteSession`的推理,否则会得到以下错误信息: +```cpp +ERROR [mindspore/lite/src/lite_session.cc:297] RunGraph] 10 Not support multi-threading +``` + +### 多Session并行 + +MindSpore Lite支持多个`LiteSession`同时进行推理的场景,每个`LiteSession`的线程池和内存池都是独立的。 + +### 使用示例 + +下面代码演示了如何创建多个`LiteSession`,并且并行执行推理的过程: +```cpp +#include +#include "src/common/file_utils.h" +#include "include/model.h" +#include "include/version.h" +#include "include/context.h" +#include "include/lite_session.h" + +mindspore::session::LiteSession *GenerateSession(mindspore::lite::Model *model) { + if (model == nullptr) { + std::cerr << "Read model file failed while running" << std::endl; + return nullptr; + } + auto context = new (std::nothrow) mindspore::lite::Context; + if (context == nullptr) { + std::cerr << "New context failed while running" << std::endl; + return nullptr; + } + + auto session = mindspore::session::LiteSession::CreateSession(context); + delete (context); + if (session == nullptr) { + std::cerr << "CreateSession failed while running" << std::endl; + return nullptr; + } + auto ret = session->CompileGraph(model); + if (ret != mindspore::lite::RET_OK) { + std::cout << "CompileGraph failed while running" << std::endl; + delete (session); + return nullptr; + } + auto msInputs = session->GetInputs(); + for (auto msInput : msInputs) { + (void)msInput->MutableData(); + } + return session; +} + +int main(int argc, const char **argv) { + size_t size = 0; + char *graphBuf = mindspore::lite::ReadFile("test.ms", &size); + if (graphBuf == nullptr) { + std::cerr << "Read model file failed while running" << std::endl; + return -1; + } + auto model = mindspore::lite::Model::Import(graphBuf, size); + if (model == nullptr) { + std::cerr << "Import model file failed while running" << std::endl; + delete[](graphBuf); + return -1; + } + delete[](graphBuf); + auto session1 = GenerateSession(model); + if (session1 == nullptr) { + std::cerr << "Generate session 1 failed" << std::endl; + delete(model); + return -1; + } + auto session2 = GenerateSession(model); + if (session2 == nullptr) { + std::cerr << "Generate session 2 failed" << std::endl; + delete(model); + return -1; + } + + std::thread thread1([&](){ + auto status = session1->RunGraph(); + if (status != 0) { + std::cerr << "Inference error " << status << std::endl; + return; + } + std::cout << "Session1 inference success" << std::endl; + }); + + std::thread thread2([&](){ + auto status = session2->RunGraph(); + if (status != 0) { + std::cerr << "Inference error " << status << std::endl; + return; + } + std::cout << "Session2 inference success" << std::endl; + }); + + thread1.join(); + thread2.join(); + delete (session1); + delete (session2); + delete (model); + return 0; +} ``` diff --git a/lite/tutorials/source_zh_cn/use/timeprofiler_tool.md b/lite/tutorials/source_zh_cn/use/timeprofiler_tool.md index fbe404c17898439bb7659b9d2e5afaf841dbf5be..7c7a60576bf95fb081f42b20dfbecef92646ad02 100644 --- a/lite/tutorials/source_zh_cn/use/timeprofiler_tool.md +++ b/lite/tutorials/source_zh_cn/use/timeprofiler_tool.md @@ -20,16 +20,16 @@ 使用TimeProfiler工具,需要进行如下环境准备工作。 -- 编译:TimeProfiler工具代码在MindSpore源码的`mindspore/lite/tools/time_profile`目录中,参考构建文档中的[环境要求](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id1)和[编译示例](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id3)执行编译。 +- 编译:TimeProfiler工具代码在MindSpore源码的`mindspore/lite/tools/time_profiler`目录中,参考构建文档中的[环境要求](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id1)和[编译示例](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id3)执行编译。 -- 运行:参考部署文档中的[编译输出](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id4),获得`timeprofile`工具,并配置环境变量。 +- 运行:参考部署文档中的[编译输出](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html#id4),获得`timeprofiler`工具,并配置环境变量。 ## 使用示例 使用TimeProfiler对`test_timeprofiler.ms`模型的网络层进行耗时分析,并且设置模型推理循环运行次数为10,则其命令代码如下: ```bash -./timeprofile --modelPath=./models/test_timeprofiler.ms --loopCount=10 +./timeprofiler --modelPath=./models/test_timeprofiler.ms --loopCount=10 ``` 该条命令执行后,TimeProfiler工具会输出模型网络层运行耗时的相关统计信息。对于本例命令,输出的统计信息如下。其中统计信息按照`opName`和`optype`两种划分方式分别显示,`opName`表示算子名,`optype`表示算子类别,`avg`表示该算子的平均单次运行时间,`percent`表示该算子运行耗时占所有算子运行总耗时的比例,`calledTimess`表示该算子的运行次数,`opTotalTime`表示该算子运行指定次数的总耗时。最后,`total time`和`kernel cost`分别显示了该模型单次推理的平均耗时和模型推理中所有算子的平均耗时之和。 @@ -77,7 +77,7 @@ total time : 2.90800 ms, kernel cost : 2.74851 ms 使用编译好的TimeProfiler工具进行模型网络层耗时分析时,其命令格式如下所示。 ```bash -./timeprofile --modelPath= [--help] [--loopCount=] [--numThreads=] [--cpuBindMode=] [--inDataPath=] [--fp16Priority=] +./timeprofiler --modelPath= [--help] [--loopCount=] [--numThreads=] [--cpuBindMode=] [--inDataPath=] [--fp16Priority=] ``` 下面提供详细的参数说明。 diff --git a/resource/api_mapping.md b/resource/api_mapping.md index 0eed65d611cd8eaba77c9f0804c892e8c2913d4e..3f4a30cd61ed62dafe076ec22402dadeacff1809 100644 --- a/resource/api_mapping.md +++ b/resource/api_mapping.md @@ -36,7 +36,7 @@ Mapping between PyTorch APIs and MindSpore APIs, which is provided by the commun | torch.expm1 | mindspore.ops.operations.Expm1 | | torch.eye | mindspore.ops.operations.Eye | | torch.flatten | mindspore.ops.operations.Flatten | -| torch.flip | mindspore.ops.operations.ReverseV2 +| torch.flip | mindspore.ops.operations.ReverseV2 | | torch.floor | mindspore.ops.operations.Floor | | torch.fmod | mindspore.ops.operations.Mod | | torch.linspace | mindspore.nn.LinSpace | @@ -167,13 +167,13 @@ Mapping between PyTorch APIs and MindSpore APIs, which is provided by the commun | torch.utils.data.distributed.DistributedSampler | mindspore.dataset.DistributedSampler | | torch.zeros | mindspore.ops.operations.ZerosLike | | torch.zeros_like | mindspore.ops.operations.ZerosLike | -| torchvision.datasets.ImageFolder | mindspore.dataset.ImageFolderDatasetV2 | +| torchvision.datasets.ImageFolder | mindspore.dataset.ImageFolderDataset | | torchvision.ops.nms | mindspore.ops.operations.NMSWithMask | | torchvision.ops.roi_align | mindspore.ops.operations.ROIAlign | -| torchvision.transforms.CenterCrop | mindspore.dataset.vision.py_transforms.CenterCrop | -| torchvision.transforms.ColorJitter | mindspore.dataset.vision.py_transforms.RandomColorAdjust | -| torchvision.transforms.Compose | mindspore.dataset.vision.py_transforms.Compose | -| torchvision.transforms.Normalize | mindspore.dataset.vision.py_transforms.Normalize | -| torchvision.transforms.RandomHorizontalFlip | mindspore.dataset.vision.py_transforms.RandomHorizontalFlip | -| torchvision.transforms.Resize | mindspore.dataset.vision.py_transforms.Resize | -| torchvision.transforms.ToTensor | mindspore.dataset.vision.py_transforms.ToTensor | +| torchvision.transforms.CenterCrop | mindspore.dataset.vision.py_transforms.CenterCrop | +| torchvision.transforms.ColorJitter | mindspore.dataset.vision.py_transforms.RandomColorAdjust | +| torchvision.transforms.Compose | mindspore.dataset.transforms.py_transforms.Compose | +| torchvision.transforms.Normalize | mindspore.dataset.vision.py_transforms.Normalize | +| torchvision.transforms.RandomHorizontalFlip | mindspore.dataset.vision.py_transforms.RandomHorizontalFlip | +| torchvision.transforms.Resize | mindspore.dataset.vision.py_transforms.Resize | +| torchvision.transforms.ToTensor | mindspore.dataset.vision.py_transforms.ToTensor | diff --git a/tutorials/notebook/computer_vision_application.ipynb b/tutorials/notebook/computer_vision_application.ipynb index 6d8dfd2d87f44f46f8ca5573d295735a4ff30d91..2b2f978b1667398cc02fb7191d73ad3c9d875551 100644 --- a/tutorials/notebook/computer_vision_application.ipynb +++ b/tutorials/notebook/computer_vision_application.ipynb @@ -213,7 +213,7 @@ "import mindspore.common.dtype as mstype\n", "import mindspore.ops.functional as F\n", "import mindspore.dataset as ds\n", - "import mindspore.dataset.transforms.vision.c_transforms as C\n", + "import mindspore.dataset.vision.c_transforms as C\n", "import mindspore.dataset.transforms.c_transforms as C2\n", "\n", "\n", @@ -252,8 +252,8 @@ " changeswap_op]\n", "\n", " # Apply map operations on images\n", - " cifar_ds = cifar_ds.map(input_columns=\"label\", operations=type_cast_op)\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=c_trans)\n", + " cifar_ds = cifar_ds.map(operations=type_cast_op, input_columns=\"label\")\n", + " cifar_ds = cifar_ds.map(operations=c_trans, input_columns=\"image\")\n", "\n", " # Apply shuffle operations\n", " cifar_ds = cifar_ds.shuffle(buffer_size=10)\n", @@ -314,7 +314,7 @@ "import matplotlib.pyplot as plt\n", "dataset_show = create_dataset()\n", "iterator_show= dataset_show.create_dict_iterator()\n", - "images = iterator_show.get_next()[\"image\"]\n", + "images = iterator_show.get_next()[\"image\"].asnumpy()\n", "# Images[0].shape is (3,224,224).We need transpose as (224,224,3) for using in plt.show().\n", "picture_show = np.transpose(images[0],(1,2,0))\n", "plt.imshow(picture_show)\n" diff --git a/tutorials/notebook/convert_dataset_to_mindspore_data_format/convert_dataset_to_mindspore_data_format.ipynb b/tutorials/notebook/convert_dataset_to_mindspore_data_format/convert_dataset_to_mindspore_data_format.ipynb index f34bc0c817f399bc5bdac90a497910d626d24d5f..0fea6b2a76021d054acc0f0e3fc7cc786c25159b 100644 --- a/tutorials/notebook/convert_dataset_to_mindspore_data_format/convert_dataset_to_mindspore_data_format.ipynb +++ b/tutorials/notebook/convert_dataset_to_mindspore_data_format/convert_dataset_to_mindspore_data_format.ipynb @@ -194,7 +194,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': array([255, 216, 255, 224, 0, 16, 74, 70, 73, 70, 0, 1, 1,\n", + "{'data': Tensor(shape=[803], dtype=UInt8, value= [255, 216, 255, 224, 0, 16, 74, 70, 73, 70, 0, 1, 1,\n", " 0, 0, 1, 0, 1, 0, 0, 255, 219, 0, 67, 0, 2,\n", " 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2,\n", " 2, 4, 3, 2, 2, 2, 2, 5, 4, 4, 3, 4, 6,\n", @@ -250,8 +250,7 @@ " 143, 6, 252, 112, 209, 62, 35, 120, 247, 224, 174, 137, 168,\n", " 77, 241, 3, 92, 240, 206, 167, 29, 245, 142, 155, 115, 114,\n", " 80, 27, 5, 157, 73, 203, 164, 139, 42, 249, 103, 12, 145,\n", - " 195, 22, 229, 5, 128, 31, 149, 148, 81, 69, 21, 255, 217],\n", - " dtype=uint8), 'label': array(3, dtype=int64)}\n" + " 195, 22, 229, 5, 128, 31, 149, 148, 81, 69, 21, 255, 217]), 'label': Tensor(shape=[], dtype=Int64, value= 3)}\n" ] } ], @@ -376,7 +375,7 @@ "# create MindDataset for reading data\n", "csv_data_set = ds.MindDataset(dataset_file=csv_mindrecord_path)\n", "# create a dictionary iterator and read a data record through the iterator\n", - "print(next(csv_data_set.create_dict_iterator()))" + "print(next(csv_data_set.create_dict_iterator(output_numpy=True)))" ] }, { @@ -493,7 +492,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': array([255, 216, 255, ..., 35, 255, 217], dtype=uint8), 'id': array(30707, dtype=int64), 'label': array(4, dtype=int64)}\n" + "{'data': Tensor(shape=[1431], dtype=UInt8, value= [255, 216, 255, ..., 35, 255, 217]), 'id': Tensor(shape=[], dtype=Int64, value= 30707), 'label': Tensor(shape=[], dtype=Int64, value= 4)}\n" ] } ], @@ -620,7 +619,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': array([255, 216, 255, ..., 127, 255, 217], dtype=uint8), 'fine_label': array(88, dtype=int64), 'coarse_label': array(8, dtype=int64), 'id': array(10349, dtype=int64)}\n" + "{'data': Tensor(shape=[1374], dtype=UInt8, value= [255, 216, 255, ..., 127, 255, 217]), 'fine_label': Tensor(shape=[], dtype=Int64, value= 88), 'coarse_label': Tensor(shape=[], dtype=Int64, value= 8), 'id': Tensor(shape=[], dtype=Int64, value= 10349)}\n" ] } ], @@ -767,7 +766,7 @@ "# create MindDataset for reading data\n", "imagenet_data_set = ds.MindDataset(dataset_file=file_name)\n", "# create a dictionary iterator and read a data record through the iterator\n", - "print(next(imagenet_data_set.create_dict_iterator()))" + "print(next(imagenet_data_set.create_dict_iterator(output_numpy=True)))" ] }, { @@ -938,7 +937,7 @@ "# create MindDataset for reading data\n", "define_data_set = ds.MindDataset(dataset_file=file_name)\n", "# create a dictionary iterator and read a data record through the iterator\n", - "print(next(define_data_set.create_dict_iterator()))" + "print(next(define_data_set.create_dict_iterator(output_numpy=True)))" ] }, { diff --git a/tutorials/notebook/customized_debugging_information.ipynb b/tutorials/notebook/customized_debugging_information.ipynb index 44be7bd3a753b3c00b1851729badec85be8b4584..95b9ab65083051ebefd3f6d1084c5bcc614ada4f 100644 --- a/tutorials/notebook/customized_debugging_information.ipynb +++ b/tutorials/notebook/customized_debugging_information.ipynb @@ -18,7 +18,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "本文将使用[快速入门](https://gitee.com/mindspore/docs/blob/master/tutorials/tutorial_code/lenet.py)作为样例,并通过构建自定义调试函数:`Callback`、`metrics`、`Print算子`、日志打印等,同时将构建的自定义调试函数添加进代码中,通过运行效果来展示具体如何使用MindSpore提供给我们的自定义调试能力,帮助快速调试训练网络。\n", + "本文将使用[快速入门](https://gitee.com/mindspore/docs/blob/master/tutorials/tutorial_code/lenet/lenet.py)作为样例,并通过构建自定义调试函数:`Callback`、`metrics`、`Print算子`、日志打印等,同时将构建的自定义调试函数添加进代码中,通过运行效果来展示具体如何使用MindSpore提供给我们的自定义调试能力,帮助快速调试训练网络。\n", "体验过程如下:\n", "1. 数据集准备。\n", "2. 定义深度学习网络LeNet5。\n", @@ -84,9 +84,9 @@ "outputs": [], "source": [ "import mindspore.dataset as ds\n", - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "from mindspore.dataset.transforms.vision import Inter\n", + "from mindspore.dataset.vision import Inter\n", "from mindspore.common import dtype as mstype\n", "\n", "def create_dataset(data_path, batch_size=32, repeat_size=1,\n", @@ -116,11 +116,11 @@ " type_cast_op = C.TypeCast(mstype.int32)\n", "\n", " # apply map operations on images\n", - " mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=resize_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", "\n", " # apply DatasetOps\n", " buffer_size = 10000\n", @@ -614,4 +614,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/data_loading_enhance/data_loading_enhancement.ipynb b/tutorials/notebook/data_loading_enhance/data_loading_enhancement.ipynb index fb562620846ff291b4c5438bb568de8baf6af099..6733e3aa75749174cab117c121e3a18aca7fbc5c 100644 --- a/tutorials/notebook/data_loading_enhance/data_loading_enhancement.ipynb +++ b/tutorials/notebook/data_loading_enhance/data_loading_enhancement.ipynb @@ -343,7 +343,7 @@ " print(data[\"data\"])\n", "\n", "func = lambda x:x**2\n", - "ds11 = ds10.map(input_columns=\"data\",operations=func)\n", + "ds11 = ds10.map(operations=func, input_columns=\"data\")\n", "print(\"After map:\")\n", "for data in ds11.create_dict_iterator():\n", " print(data[\"data\"])" @@ -383,11 +383,11 @@ "[ 3 2 1 0 -1]\n", "[4 3 2 1 0]\n", "After zip:\n", - "{'data': array([0, 1, 2, 3, 4], dtype=int32), 'data2': array([ 0, -1, -2, -3, -4], dtype=int32)}\n", - "{'data': array([1, 2, 3, 4, 5], dtype=int32), 'data2': array([ 1, 0, -1, -2, -3], dtype=int32)}\n", - "{'data': array([2, 3, 4, 5, 6], dtype=int32), 'data2': array([ 2, 1, 0, -1, -2], dtype=int32)}\n", - "{'data': array([3, 4, 5, 6, 7], dtype=int32), 'data2': array([ 3, 2, 1, 0, -1], dtype=int32)}\n", - "{'data': array([4, 5, 6, 7, 8], dtype=int32), 'data2': array([4, 3, 2, 1, 0], dtype=int32)}\n" + "{'data': Tensor(shape=[5], dtype=Int64, value= [0, 1, 2, 3, 4]), 'data2': Tensor(shape=[5], dtype=Int64, value= [ 0, -1, -2, -3, -4])}\n", + "{'data': Tensor(shape=[5], dtype=Int64, value= [1, 2, 3, 4, 5]), 'data2': Tensor(shape=[5], dtype=Int64, value= [ 1, 0, -1, -2, -3])}\n", + "{'data': Tensor(shape=[5], dtype=Int64, value= [2, 3, 4, 5, 6]), 'data2': Tensor(shape=[5], dtype=Int64, value= [ 2, 1, 0, -1, -2])}\n", + "{'data': Tensor(shape=[5], dtype=Int64, value= [3, 4, 5, 6, 7]), 'data2': Tensor(shape=[5], dtype=Int64, value= [ 3, 2, 1, 0, -1])}\n", + "{'data': Tensor(shape=[5], dtype=Int64, value= [4, 5, 6, 7, 8]), 'data2': Tensor(shape=[5], dtype=Int64, value= [4, 3, 2, 1, 0])}\n" ] } ], @@ -449,7 +449,7 @@ "outputs": [], "source": [ "DATA_DIR = \"./enhance_images\"\n", - "ds1 = ds.ImageFolderDatasetV2(DATA_DIR, decode=True)" + "ds1 = ds.ImageFolderDataset(DATA_DIR, decode=True)" ] }, { @@ -465,8 +465,8 @@ "metadata": {}, "outputs": [], "source": [ - "from mindspore.dataset.transforms.vision import Inter\n", - "import mindspore.dataset.transforms.vision.c_transforms as transforms" + "from mindspore.dataset.vision import Inter\n", + "import mindspore.dataset.vision.c_transforms as transforms" ] }, { @@ -476,7 +476,7 @@ "outputs": [], "source": [ "resize_op = transforms.Resize(size=(800,800), interpolation=Inter.LINEAR)\n", - "ds2 = ds1.map(input_columns=\"image\", operations=resize_op)" + "ds2 = ds1.map(operations=resize_op, input_columns=\"image\")" ] }, { @@ -518,7 +518,7 @@ ], "source": [ "for data in ds2.create_dict_iterator():\n", - " imgplot_resized = plt.imshow(data[\"image\"])\n", + " imgplot_resized = plt.imshow(data[\"image\"].asnumpy())\n", " plt.show()" ] }, @@ -537,14 +537,15 @@ "metadata": {}, "outputs": [], "source": [ - "import mindspore.dataset.transforms.vision.py_transforms as transforms" + "from mindspore.dataset.transforms.py_transforms import Compose\n", + "import mindspore.dataset.vision.py_transforms as transforms" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "2. 定义数据增强算子,通过`ComposeOp`接口将多个数据增强组合使用。" + "2. 定义数据增强算子,通过`Compose`接口将多个数据增强组合使用。" ] }, { @@ -578,17 +579,17 @@ } ], "source": [ - "ds3 = ds.ImageFolderDatasetV2(DATA_DIR)\n", + "ds3 = ds.ImageFolderDataset(DATA_DIR)\n", "\n", "transforms_list = [\n", " transforms.Decode(), # Decode images to PIL format.\n", " transforms.RandomCrop(size=(800,800)),\n", " transforms.ToTensor() # Convert PIL images to Numpy ndarray.\n", "]\n", - "compose = transforms.ComposeOp(transforms_list)\n", - "ds4 = ds3.map(input_columns=\"image\", operations=compose())\n", + "compose = Compose(transforms_list)\n", + "ds4 = ds3.map(operations=compose, input_columns=\"image\")\n", "for data in ds4.create_dict_iterator():\n", - " imgplot_resized = plt.imshow(data[\"image\"].transpose(1, 2, 0))\n", + " imgplot_resized = plt.imshow(data[\"image\"].asnumpy().transpose(1, 2, 0))\n", " plt.show()" ] }, diff --git a/tutorials/notebook/debugging_in_pynative_mode.ipynb b/tutorials/notebook/debugging_in_pynative_mode.ipynb index ce3d50557b55592afefaca452b1ecbd56d45521a..82d4d76daedb25fead84f8c41fc11266c2555e9f 100644 --- a/tutorials/notebook/debugging_in_pynative_mode.ipynb +++ b/tutorials/notebook/debugging_in_pynative_mode.ipynb @@ -34,7 +34,7 @@ "\n", "4. 执行神经网络训练,查看网络各参数梯度。\n", "\n", - "> 你可以在这里找到完整可运行的样例代码:。" + "> 你可以在这里找到完整可运行的样例代码:。" ] }, { @@ -92,9 +92,9 @@ "metadata": {}, "outputs": [], "source": [ - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "from mindspore.dataset.transforms.vision import Inter\n", + "from mindspore.dataset.vision import Inter\n", "from mindspore.common import dtype as mstype\n", "import mindspore.dataset as ds\n", "import numpy as np\n", @@ -126,11 +126,11 @@ " type_cast_op = C.TypeCast(mstype.int32)\n", "\n", " # using map method to apply operations to a dataset\n", - " mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=resize_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", " \n", " # process the generated dataset\n", " buffer_size = 10000\n", @@ -187,8 +187,8 @@ "datas = create_dataset(train_data_path)\n", "data1 = datas.create_dict_iterator()\n", "data= data1.get_next()\n", - "images = data[\"image\"]\n", - "labels = data[\"label\"]\n", + "images = data[\"image\"].asnumpy()\n", + "labels = data[\"label\"].asnumpy()\n", "print(images.shape)\n", "count = 1\n", "for i in images:\n", @@ -600,4 +600,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/linear_regression.ipynb b/tutorials/notebook/linear_regression.ipynb index 4e3665dcf1c09e5aba7b1b2ee0527b46788b94d5..25008ff1e34df63dabb852fa6b2e3cee642b080f 100644 --- a/tutorials/notebook/linear_regression.ipynb +++ b/tutorials/notebook/linear_regression.ipynb @@ -4,33 +4,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##
使用MindSpore实现简单线性函数拟合" + "# 使用MindSpore实现简单线性函数拟合" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 概述" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "回归问题算法通常是利用一系列属性来预测一个值,预测的值是连续的。例如给出一套房子的一些特征数据,如面积、卧室数等等来预测房价,利用最近一周的气温变化和卫星云图来预测未来的气温情况等。如果一套房子实际价格为500万元,通过回归分析的预测值为499万元,则认为这是一个比较好的回归分析。在机器学习问题中,常见的回归分析有线性回归、多项式回归、逻辑回归等。本例子介绍线性回归算法,并通过MindSpore进行线性回归AI训练体验。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "主要流程如下:\n", + "## 概述\n", + "\n", + "回归问题算法通常是利用一系列属性来预测一个值,预测的值是连续的。例如给出一套房子的一些特征数据,如面积、卧室数等等来预测房价,利用最近一周的气温变化和卫星云图来预测未来的气温情况等。如果一套房子实际价格为500万元,通过回归分析的预测值为499万元,则认为这是一个比较好的回归分析。在机器学习问题中,常见的回归分析有线性回归、多项式回归、逻辑回归等。本例子介绍线性回归算法,并通过MindSpore进行线性回归AI训练体验。\n", + "\n", + "整体流程如下:\n", "\n", "1. 生成数据集\n", - "2. 定义前向传播网络\n", - "3. 定义反向传播网络\n", - "4. 定义线性拟合过程的可视化函数\n", + "2. 定义训练网络\n", + "3. 定义前向传播网络与反向传播网络并关联\n", + "4. 拟合过程可视化准备\n", "5. 执行训练" ] }, @@ -38,114 +28,90 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 环境准备" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "系统:Ubuntu18.04\n", - "\n", - "MindSpore版本:GPU\n", + "## 环境准备\n", "\n", - "设置MindSpore运行配置\n", - "\n", - "第三方支持包:`matplotlib`,未安装此包的,可使用命令`pip install matplotlib`预先安装。" + "设置MindSpore运行配置" ] }, { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.002170Z", + "start_time": "2020-09-14T10:38:39.441746Z" + } + }, "outputs": [], "source": [ "from mindspore import context\n", "\n", - "context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")" + "context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "`PYNATIVE_MODE`:自定义调试模式。\n", + "`GRAPH_MODE`:自定义调试模式。\n", "\n", - "`device_target`:设置MindSpore的训练硬件为GPU。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 生成数据集" + "`device_target`:设置MindSpore的训练硬件为CPU。\n", + "\n", + "> 本教程代码依赖`matplotlib`第三方支持包,可使用命令`pip install matplotlib`安装。" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "## 生成数据集\n", + "\n", "### 定义数据集生成函数\n", "\n", - "`get_data`用于生成训练数据集和测试数据集。由于拟合的是线性数据,假定要拟合的目标函数为:$y=2x+3$,那么我们需要的训练数据集应随机分布于函数周边,这里采用了$y=2x+3+noise$的方式生成,其中`noise`为遵循标准正态分布规律的随机数值。" + "`get_data`用于生成训练数据集和测试数据集。由于拟合的是线性数据,假定要拟合的目标函数为:$f(x)=2x+3$,那么我们需要的训练数据集应随机分布于函数周边,这里采用了$f(x)=2x+3+noise$的方式生成,其中`noise`为遵循标准正态分布规律的随机数值。" ] }, { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.007850Z", + "start_time": "2020-09-14T10:38:40.003169Z" + } + }, "outputs": [], "source": [ "import numpy as np\n", - "import mindspore as ms\n", - "from mindspore import Tensor\n", - " \n", - "def get_data(num,w=2.0, b=3.0):\n", - " np_x = np.ones([num, 1])\n", - " np_y = np.ones([num, 1])\n", + "\n", + "def get_data(num, w=2.0, b=3.0):\n", " for i in range(num):\n", " x = np.random.uniform(-10.0, 10.0)\n", - " np_x[i] = x\n", " noise = np.random.normal(0, 1)\n", " y = x * w + b + noise\n", - " np_y[i] = y\n", - " return Tensor(np_x,ms.float32), Tensor(np_y,ms.float32)" + " yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "对于数据生成函数我们将有以下两个作用。\n", - "\n", - "1. 生成训练数据,对模型函数进行训练。\n", - "2. 生成验证数据,在训练结束后,对模型函数进行精度验证。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 生成测试数据" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用数据生成函数`get_data`随机生成50组验证数据,并可视化展示。" + "使用`get_data`生成50组测试数据,并可视化。" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { - "scrolled": true + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.355635Z", + "start_time": "2020-09-14T10:38:40.009930Z" + } }, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEICAYAAAC6fYRZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAX5ElEQVR4nO3dfZBcVZ3G8edJEFaUVTSTEEhiwALWaNWCdkVejARBBdY1YIkGt3YpdY2olC+lVYtSpZTW1uouirWlC4aSFbeUlxWQLIK8SSBbvMgEA4QNLAFxGRInk6ASXwprMr/94942neZ2pnv63u7bt7+fqq7pvvdOn1N3ep45c+455zoiBACopln9rgAAoDiEPABUGCEPABVGyANAhRHyAFBhhDwAVBghDwAVRsgDKdtrbf99h9+z2HbY3qeoegHdIOQxkGw/ZfsPtn/b8PhGv+u1N7aX2x7rdz0wXGh9YJD9dUTc1u9KAGVGSx6VYXs/27+2/bqGbSNpi3+u7QNt32B7wvav0ucLOixjtu0LbW+3/aSkv2ra/37bm2zvtP2k7Q+n218i6SZJBzf853Gw7aW270nrvdX2N2zvm8PpACQR8qiQiHhe0rWSzmrY/B5Jd0bENiWf93+X9CpJiyT9QVKnXTwfkvQOSUdLqkl6d9P+ben+P5f0fkkX2X59RPxO0qmStkTES9PHFkm7JH1K0hxJx0o6SdJHO6wT0BIhj0H2w7QFXH98SNL3tWfIvy/dpojYERHXRMTvI2KnpH+UdEKHZb5H0tcj4umIeFbSPzXujIgfRcQTkbhT0i2SlrV6s4hYHxH3RsRkRDwl6VszqBPQEn3yGGSnN/fJ254l6cW23yjpl5KOknRdum9/SRdJOkXSgem3HGB7dkTsarPMgyU93fD6F03lnyrpC5KOUNKI2l/Sw63ezPYRkr6m5L+C/ZX8Tq5vsy7AtGjJo1IiYkrS1Upa8++TdEPaapekT0s6UtIbI+LPJb053e4OitgqaWHD60X1J7b3k3SNpAslzYuIl0u6seH9s9b1vljSo5IOT+v0uQ7rA+wVIY8q+r6k90r6m/R53QFK+uF/bfsVSlrcnbpa0sdtL7B9oKTzGvbtK2k/SROSJtNW/dsa9o9LeqXtlzXV6TlJv7X9F5I+MoM6AS0R8hhk/9U0Tv46SYqI+yT9TknXyk0Nx39d0oslbZd0r6Qfz6DMSyXdLOlBSQ8oudCrtNydkj6u5A/Br5T8J7GmYf+jkq6Q9GR6DeFgSZ9Jj9uZvvdVM6gT0JK5MxQAVBcteQCoMEIeaGL7kqZuoPrjkn7XDegU3TUAUGGlGic/Z86cWLx4cb+rAQADZf369dsjYiRrX6lCfvHixRodHe13NQBgoNj+Rat99MkDQIUR8gBQYYQ8AFQYIQ8AFUbIA0CFEfIAUGGEPAD029SUND4uFTA5lZAHgH6ampJOPFFasEBavjx5nSNCHgD6aWJCuvtuaXIy+ToxkevbE/IA0E9z50rHHSfts0/yde7cXN++VMsaAMDQsaU77kha8HPnJq9zRMgDQL/NmiXNm1fMW3f7BrYX2r7D9ibbj9j+RLr9FbZvtf14+vXA7qsLAOhEHn3yk5I+HRGvkXSMpI/ZXqLkBse3R8Thkm7Xnjc8BgD0QNchHxFbI+KB9PlOSZskHSJphaTL08Mul3R6t2UBADqT6+ga24slHS3pPknzImKrlPwhkJR5ydj2Ktujtkcnch46BADDLreQt/1SSddI+mREPNfu90XE6oioRURtZCTzxiYAgBnKJeRtv0hJwH8vIq5NN4/bnp/uny9pWx5lAQDal8foGkv6tqRNEfG1hl1rJJ2dPj9b0vXdlgUA6Ewe4+SPl/S3kh62vSHd9jlJX5Z0te0PSvo/SWfmUBYAoANdh3xE/LekVlO0Tur2/QEAM8faNQBQYYQ8ANS1u657geu/542QBwApCe7ly6VDDpFOOKH1uu4Fr/+eN0IeAKSkZb5unbRrV/J1fDz7uILXf88bIQ8AUrLEb32Z38bnzQpe/z1vhDwASMlSv8uWJeG9bFnrpX/r67+PjUlr1+a+/nveWE8eAKTObt5R4PrveSPkAaBugMK7XXTXAECzARoiOR1CHgAaDdgQyekQ8gDQaMCGSE6HkAeARgM2RHI6XHgFMBymptobOdPJKJsBQEseQPV12s9eH2Uz4AEvEfIABkkno14aj61YP3snCHkAg6GT1njzsXPmVKqfvRP0yQMYDFmt8VYTl5qP3b69Uv3sncjrRt6X2d5me2PDtgtsP2N7Q/o4LY+yAAypTka9zJkj1WrS7Nm7j61QP3sn8mrJf0fSNyR9t2n7RRFxYU5lABhm7Y56mZqS3vIWaXRUWrpU+slPhi7YG+XSko+IuyQ9m8d7AUBL7bTGG7tq7r8/6aoZYkVfeD3X9kNpd86BWQfYXmV71PboxBBd8QZQkIpNZupWkSF/saRXSzpK0lZJX806KCJWR0QtImojIyMFVgfAUBiw9d6LVljIR8R4ROyKiClJl0paWlRZALCHIb3ImqWwkLc9v+HlGZI2tjoWAFCMXEbX2L5C0nJJc2yPSfqCpOW2j5IUkp6S9OE8ygIAtC+XkI+IszI2fzuP9wYAzBzLGgBAhRHyAFBhhDwAVBghDwAVRsgDQIUR8gDKoZMbgqBthDyA/uv09nxoGyEPoP+G+PZ8RSPkAfQfK0cWhtv/Aei/dm8Igo4R8gDKob5yJHJFdw0AVBghDwAVRsgDQIUR8gBQYYQ8gGzMQK0EQh7ACzEDtTJyCXnbl9neZntjw7ZX2L7V9uPp1wPzKAtADzADtTLyasl/R9IpTdvOk3R7RBwu6fb0NYBBwAzUysgl5CPiLknPNm1eIeny9Pnlkk7PoywAOWvse68/l5IZqGNj0tq1zEAdYEX2yc+LiK2SlH7NbArYXmV71PboBP8SAr3V2Pd+wgl79sNLyQxUAn6g9f3Ca0SsjohaRNRGRkb6XR1guDT3vdMPXzlFhvy47fmSlH7dVmBZAGaiue+9k354hlgOhCIXKFsj6WxJX06/Xl9gWQBmonn1x4j2VoKsd/PcfXfyB+GOO5IFxlA6eQ2hvELSPZKOtD1m+4NKwv2tth+X9Nb0NYAyaGyF11d/tPd8vjcMsRwYubTkI+KsFrtOyuP9AeQoj1Z4vZun/h4MsSwt1pMHqmhqqnW3S1YrvNN13LnJx8CgEw2omlZLEtS7aEZG8pno1G7XDvqKljxQNVkt9ZGRPbtobr9d2rGDVvgQoCUPVE3WkgTNwb9jB63wIUFLHqiaen/5+PjuEM/jQune+vlRWrTkgapauVJauDDpl4/obi0alh4eWIQ8UEXj49K6dUn3zLp1yetuLpQyLn5gEfJAFdm7lxuI6L57haWHBxYhD1RF4yzWefOkZcuk2bOTr52Og29W7+dn6eGBQ8gDVdDcZx6RhPEzz0h33plPKDMufiAR8kAVZPWZE8oQIQ9UA33maIFx8kAVsJYMWqAlD5RZJzfmoHsGGQh5oKyYgIQcEPJAWTEBCTkg5IGy4mIqclD4hVfbT0naKWmXpMmIqBVdJjBwshb/4mIqctCrlvyJEXEUAQ9k2FvfOxdT0SW6a4A8dTIapo6+dxSoFyEfkm6xvd72qh6UB/THTEfD0PeOAvViMtTxEbHF9lxJt9p+NCLuqu9Mg3+VJC1atKgH1QEKMtMbZNP3jgIV3pKPiC3p122SrpO0tGn/6oioRURtZGSk6OoAxemmRU7fOwpSaEve9kskzYqInenzt0n6YpFlAn1DixwlVHR3zTxJ1zn5sO8j6fsR8eOCywT6p94iB0qi0JCPiCcl/WWRZQAAWmMIJQBUGCEPABVGyANAhRHyAFBhhDwAVBghDwAVRsgDQIUR8gBQYYQ8AFQYIQ8AFUbIA0CFEfIYPjO5exMwoAh5DI+pKWnr1uSuTZ3evQkYUL24MxTQf/Vb89Xv3CR1dvcmYEAR8hgOjbfms5N137mfKoYA3TUYDo235lu2TBobk9au5e5NqDxa8hgO3JoPQ4qQx/Dg1nwYQoV319g+xfZjtjfbPq/o8gAAuxUa8rZnS/qmpFMlLZF0lu0lRZYJANit6Jb8UkmbI+LJiPijpCslrSi4TABAquiQP0TS0w2vx9Jtf2J7le1R26MTExMFVwcAhkvRIZ81hGGPueQRsToiahFRGxkZKbg6GAgsOwDkpuiQH5O0sOH1AklbCi4Tg6w+M5VlB4BcFB3y90s63PahtveVtFLSmoLLxCBrnJlaX3YAwIwVGvIRMSnpXEk3S9ok6eqIeKTIMjHgGmem1pcdoPsGmLHCJ0NFxI2Sbiy6HFRE88zUiN0Lix13XLJvFqtxAO3itwXlU5+ZatN9A3SJkEe5ZXXfAGgba9eg3FhYDOgKIY/yY2ExYMborgGACiPkAaDCCHnkh/HsQOkQ8shH43IEJ5wgbd1K2AMlQMgjH43j2detkxYtYu0ZoAQIeeSjPp599uxkmCOTl4BSIOSRj/p49rExadkyJi8BJcE4eeRn1izpoIOYvASUCC155K9x7ZlOMDoHyB0hj3LgZiFAIQh5JJpb0Xm2qtt5L1abBApByOOFrejJyfxa1e220FltEiiEo0T9n7VaLUZHR/tdjeEzPp6E8ORkErI/+5l09NG7X4+NzXyBsOb33tt7TU1xwRaYAdvrI6KWta+wlrztC2w/Y3tD+jitqLLQpeZW9JIl+bWqO2mhz/SCLYCWih5CeVFEXFhwGehW1prteQ2DZD14oK8YJ49E85rtea7hznrwQN8UfeH1XNsP2b7M9oFZB9heZXvU9ugEIyoGC+PagdLrKuRt32Z7Y8ZjhaSLJb1a0lGStkr6atZ7RMTqiKhFRG1kZKSb6qCXGNcODISuumsi4uR2jrN9qaQbuikLJZM1rp0uGaB0ihxdM7/h5RmSNhZVFrqQ1eXSTjcM49qBgVBkn/w/237Y9kOSTpT0qQLLwkxkdbm02w3TuOrk2rWMmgFKislQwyxropLU/uQlAKXQl8lQGABZXS50wwCVwjj5YdZqohKTl4DKoCVfVe2OYc9aSoDlBYDKIOSriDHsAFKEfBXtbW12ZqkCQ4WQr6JWF09p4QNDhwuvVdTqgiqzVIGhQ0u+qrIunjI8Ehg6tOSHCWu7A0OHkB82rO0ODBW6awCgwgh5AKgwQh4AKoyQB4AKI+T7idmnAApGyPcLs08B9AAh3y97W18GAHLSVcjbPtP2I7anbNea9n3W9mbbj9l+e3fVrCBmnwLogW4nQ22U9C5J32rcaHuJpJWSXivpYEm32T4iInZ1WV51MPsUQA901ZKPiE0R8VjGrhWSroyI5yPi55I2S1raTVmVxM05ABSsqD75QyQ93fB6LN32ArZX2R61PTpBvzQA5Gra7hrbt0k6KGPX+RFxfatvy9iWOU4wIlZLWi1JtVqNsYQAkKNpQz4iTp7B+45JWtjweoGkLTN4HwBAF4rqrlkjaaXt/WwfKulwST8tqKzqYHIUgJx1O4TyDNtjko6V9CPbN0tSRDwi6WpJ/yPpx5I+xsiaaTA5CkABHCVqNdZqtRgdHe13NfpjfDwJ+MnJZOz82BjrvgNoi+31EVHL2seM17JgchSAAnBnqLJgchSAAhDyZcKt+QDkjO6aojFiBkAfEfJ5ag50RswA6DNCPi9Zgc5ywgD6jJDPS1agM2IGQJ8R8nnJCvT6iJmxMWntWkbMAOg5RtfkpdUQSEbMAOgjQj5PBDqAkqG7plMMiQQwQAj5TkxOSm96E0MiAQwMumvaNTUlLVsm3Xtv8vruu5MW/axZLEMAoLRoybdrYkK6//7dr2s16b3vpVUPoNQI+XbNnSsdf7w0e7Z0zDHStddK99zDRCcApUZ3Tbuah0hKyXj4u+9mohOA0iLkO9E8RJKlgQGUXLe3/zvT9iO2p2zXGrYvtv0H2xvSxyXdVzUHeQ9/rIc+AQ+gpLrtk98o6V2S7srY90REHJU+zumynO6xIiSAIdRVyEfEpoh4LK/K5K6x5c6KkACGUJGjaw61/TPbd9pe1uog26tsj9oencgzeJtb7nPmsCIkgKEz7YVX27dJOihj1/kRcX2Lb9sqaVFE7LD9Bkk/tP3aiHiu+cCIWC1ptSTVarWZd5bX12+vXwRtbrlv386FUgBDZ9qWfEScHBGvy3i0CnhFxPMRsSN9vl7SE5KOyK/aTbL627OW/uVCKYAhU8gQStsjkp6NiF22D5N0uKQniyhLUnZ/+7x5tNwBDL1uh1CeYXtM0rGSfmT75nTXmyU9ZPtBST+QdE5EPNtdVfei1R2YaLkDGHKOEi2ZW6vVYnR0dGbf3NwnDwBDwvb6iKhl7avO2jX1VnsE670DQKo6IS8x4QkAmlQr5JnwBAB7qFbIt7oACwBDqlqrUDYvB8wFWABDrlohL71wOWAAGGLV6q4BAOyBkAeACiPkAaDCCHkAqDBCHgAqjJAHgAor1QJltick/WKaw+ZI2t6D6nSDOuaj7HUse/0k6piXstfxVRExkrWjVCHfDtujrVZbKwvqmI+y17Hs9ZOoY14GoY6t0F0DABVGyANAhQ1iyK/udwXaQB3zUfY6lr1+EnXMyyDUMdPA9ckDANo3iC15AECbCHkAqLBShrztM20/YnvKdq1p32dtb7b9mO23t/j+Q23fZ/tx21fZ3rfg+l5le0P6eMr2hhbHPWX74fS4Gd6xfMZ1vMD2Mw31PK3Fcaek53az7fN6WL9/sf2o7YdsX2f75S2O6/k5nO6c2N4v/QxsTj93i3tRr4byF9q+w/am9PfmExnHLLf9m4af/+d7Wce0Dnv92Tnxr+l5fMj263tcvyMbzs8G28/Z/mTTMX0/jx2LiNI9JL1G0pGS1kqqNWxfIulBSftJOlTSE5JmZ3z/1ZJWps8vkfSRHtb9q5I+32LfU5Lm9OmcXiDpM9McMzs9p4dJ2jc910t6VL+3Sdonff4VSV8pwzls55xI+qikS9LnKyVd1eOf7XxJr0+fHyDpfzPquFzSDf347LX7s5N0mqSbJFnSMZLu62NdZ0v6pZJJRqU6j50+StmSj4hNEfFYxq4Vkq6MiOcj4ueSNkta2niAbUt6i6QfpJsul3R6kfVtKvs9kq7oRXkFWCppc0Q8GRF/lHSlknNeuIi4JSIm05f3SlrQi3Lb0M45WaHkcyYln7uT0s9CT0TE1oh4IH2+U9ImSYf0qvwcrZD03UjcK+nltuf3qS4nSXoiIqabgV96pQz5vThE0tMNr8f0wg/zKyX9uiEwso4pyjJJ4xHxeIv9IekW2+ttr+pRnRqdm/4bfJntAzP2t3N+e+EDSlp0WXp9Dts5J386Jv3c/UbJ57Dn0q6ioyXdl7H7WNsP2r7J9mt7WrHEdD+7snz+pOQ/slaNtX6fx4707fZ/tm+TdFDGrvMj4vpW35axrXkMaDvHdKzN+p6lvbfij4+ILbbnSrrV9qMRcVe3dWunjpIulvQlJefiS0q6lT7Q/BYZ35vbGNt2zqHt8yVNSvpei7cp9Bxm6NtnrlO2XyrpGkmfjIjnmnY/oKTr4bfp9ZgfSjq8x1Wc7mdXlvO4r6R3Svpsxu4ynMeO9C3kI+LkGXzbmKSFDa8XSNrSdMx2Jf/m7ZO2qrKO6dh09bW9j6R3SXrDXt5jS/p1m+3rlHQF5BZQ7Z5T25dKuiFjVzvnd8baOIdnS3qHpJMi7QDNeI9Cz2GGds5J/Zix9HPwMknPFlinF7D9IiUB/72IuLZ5f2PoR8SNtv/N9pyI6NmiW2387Ar9/HXgVEkPRMR4844ynMdODVp3zRpJK9PRDIcq+Qv608YD0nC4Q9K7001nS2r1n0GeTpb0aESMZe20/RLbB9SfK7nQuLEH9aqX39i3eUaLsu+XdLiT0Un7KvmXdU2P6neKpH+Q9M6I+H2LY/pxDts5J2uUfM6k5HP3k1Z/pIqQ9v9/W9KmiPhai2MOql8nsL1Uye/+jh7WsZ2f3RpJf5eOsjlG0m8iYmuv6tig5X/k/T6PM9LvK79ZDyUhNCbpeUnjkm5u2He+ktEOj0k6tWH7jZIOTp8fpiT8N0v6T0n79aDO35F0TtO2gyXd2FCnB9PHI0q6KHp5Tv9D0sOSHlLyyzS/uY7p69OUjM54opd1TH9WT0vakD4uaa5fv85h1jmR9EUlf5Ak6c/Sz9nm9HN3WI9/tm9S0q3xUMP5O03SOfXPpKRz03P2oJIL28f1uI6ZP7umOlrSN9Pz/LAaRtb1sJ77KwntlzVsK815nMmDZQ0AoMIGrbsGANABQh4AKoyQB4AKI+QBoMIIeQCoMEIeACqMkAeACvt/V7N/5YFcCdgAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEICAYAAAC6fYRZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3dd3gU1f7H8fc3CR2kIyUU/YkUCxYEBAsiCoKKvSvqVQlgwYLg9YqC4kXxgqJwESvXAupVvAgo0osoiAgiINKlBgKIIFKSPb8/drKEsOnZks3n9Tx5kpmd3flmsvnk5MyZM+acQ0REYlNcpAsQEZHQUciLiMQwhbyISAxTyIuIxDCFvIhIDFPIi4jEMIW8SCZmNtPM7snltm3NbFOoaxLJL4W8FFlmtt7M/jKzfRk+Xot0XVkxszvNbG6k65DiJSHSBYgU0BXOuamRLkIkWqklLzHHzEqZ2e9mdmqGddW9Vn8NM6tsZhPMbIeZ7fa+Tszla5cxs3e95y0Hzsn0eF8zW2Nme81suZld7a1vAowEzvX+4/jdW9/ZzH40sz/MbKOZPVNYx0EEFPISg5xzB4HPgJszrL4BmOWc247/ff8OUB+oB/wF5Lab52ng/7yPDkDXTI+vAc4HKgL9gffNrJZzbgWQBHzrnCvvnKvkbf8ncAdQCegMdDezq/Lw7YpkSyEvRd3nXqs9/eNeb/2HHB3yt3jrcM7tdM596pzb75zbCwwELszl/m4ABjrndjnnNgLDMj7onPvEObfFOedzzn0ErAJaZPVizrmZzrml3vY/AWPyUItIjtQnL0XdVVn0yU8HyphZS2AbcAYwDsDMygJDgY5AZW/7CmYW75xLy2F/tYGNGZY3ZHzQzO4AHgEaeKvKA9WyejGvvkHAqUBJoBTwSQ41iOSaWvISk5xzPuBj/K35W4AJXqsd4FGgEdDSOXcccIG33nLx0luBuhmW66V/YWb1gTeA+4GqXpfMzxleN9iUrx8C44G6zrmK+Pvtc1OHSK4o5CWWfQjcCNzqfZ2uAv5++N/NrAr+fvbc+hh4wjt5mwg8kOGxcviDfAeAmd2Fv4WeLhlINLOSmWrZ5Zw7YGYt8P9BEik0Cnkp6r7INE5+XPoDzrn5+E9s1ga+zPCcl4EyQArwHfBVHvbXH38XzTrga+C9DPtbDvwL+BZ/oJ8GfJPhudOBZcA2M0vx1vUABpjZXqAf/j8iIoXGdNMQEZHYpZa8iEgMU8iLiMQwhbyISAxTyIuIxLCouhiqWrVqrkGDBpEuQ0SkSPnhhx9SnHPVgz0WVSHfoEEDFi5cGOkyRESKFDPbkNVj6q4REYlhCnkRkRimkBcRiWEKeRGRGKaQFxGJYQp5EZEYppAXEYkEnw+SkyHEk0Qq5EVEws3ng4sugsREaNvWvxwiCnkRkXDbsQPmzYPUVP/nHTtCtiuFvIhIuNWoAa1bQ0KC/3ONGiHbVVRNayAiUiyYwYwZ/hZ8jRr+5RBRyIuIREJcHBx/fOh3E/I9iIhIxCjkRURimEJeRCSGKeRFRGKYQl5EJIKcc8zeMJv9h/eH5PUV8iIiETJ7w2ziBsRx4bsXMn7l+JDsQ0MoRUTCyedj/5YN1B/TgpT9KQDUKl+LqxtfHZLdKeRFRMLF5+OlO06id8N1gVVz7prDefXOC9kuFfIiImGwZtcaTnr1JGjoX75rsfH2yK0hvyBKIS8iEkI+56Pj+x2ZsnZKYN22ofEc36zNkTlrfL6QTXGgE68iIiEyfuV44gfEBwJ+9FWjcU+lcfzKzTBzpj/QQzztsFryIiKF7PcDv1P5hcqB5TNqnsH3935PQpwXuRm7aIJNO1yIXThqyYuIFKI+U/ocFfBLkpbwY7cfjwR8ZiGedlgteRGRgvL5WLJiJmf89+LAqj5t+jCo/aCcnxviaYcV8iIiBZCaeojmfauwpMKfgXW7++ymUulKuX+REE47rO4aEZF8+s+S/1BiYKlAwI//KA6XtC1vAR9iBW7Jm1ld4D9ATcAHjHLOvWJmVYCPgAbAeuAG59zugu5PRCTStu3bRq1/1Qosd9hZmUkj9hDXuk1Ib+WXH4XRkk8FHnXONQFaAT3NrCnQF5jmnGsITPOWRUSKtLv+d9dRAb+65698teQ04izO35/uXASrO1aBQ945t9U5t8j7ei+wAqgDdAFGe5uNBq4q6L5ERAqNzwfJybkO5Tkb5mD9jXcXvwvA4EsG4552/F/acccOgYwihXri1cwaAGcC84HjnXNbwf+HwMyi638YESm+0i9AmjfPP2xxxgz/yc8g/jr8FycOO5Ft+7YBUKNcDdY/tJ4yJcr4N0gfApn+WjHYXQOAmZUHPgV6Oef+yMPz7jOzhWa2cEeU/QUUkRgV7AKkIIbOG0LZ58sGAn7WnbNIfiz5SMDDkSGQmzYduYo1ihRKS97MSuAP+A+cc595q5PNrJbXiq8FbA/2XOfcKGAUQPPmzaOrM0tEYlN2rW+fj3VrFnLihy0Dq+7YWoN3h2/B4uODv14Ih0AWVGGMrjHgLWCFc25IhofGA12BQd7n/xV0XyIihSKLC5BcWhqde1Xny2pHBgJueQlqHdgF/VOiNsizUxjdNW2A24F2ZrbY++iEP9wvMbNVwCXesohIdEhvfXsBP/HXicQ9lxAI+LfHG27yudQ6EJrpBsKlwC1559xcIKtOqIuzWC8iEhX2HNhD5Rcq4/D3Fp+6ryyLhh2kRKs2MH06pKSEZLqBcNEVryISu3IYJvnktCep9EKlQMAvum8RS1/YS4nfvKmA4+OPau0XRZq7RkRiUzbDJJcmL+X0kacHNn3s3McYfOngI88tgn3vWVHIi0hsCjJMMrV6VVq92Yoftv4Q2GzX47uoXMabGjiEd2iKFHXXiEhsyjRP+wfJUyjxbIlAwI+7cRzuaXd0wIfwDk2Ropa8iMQmb5jk9t+Wc/zo02DcbADan9ieybdN9s81k1GI79AUKWrJi0jMundCN3/Ae369/1em3D7l2ICHkN+hKVLUkheR6JaPfvJ5G+fR5u02geVBFw+iz3l9sn9SiO/QFCkKeRGJXnmYSAzgQOoBThp2Epv3bgagcunKbLr1B8rWaZC7/UXx9AT5pe4aEYleuZxIDGDY/GGUGVgmEPDTb5/KrkmnUfaEk2PqRGpeqSUvItErF9P4rv99PSe8ckJg+ZbTbuH9q9/Htm+PyROpeaWQF5HolU0/uXOOLmO78MWvXwTWbXp4E3WOq+NfiPJ53sNFIS8i0S1IP/lXq7/isg8uCyy/ccUb3HPWPUc/L0ZPpOaVQl5Eiow/Dv5BtRercdh3GIDG1RqzJGkJJeNLBn9CDJ5IzSudeBWRIqHfjH5UHFQxEPAL713Iip4rsg54AdSSF5Eot2z7Mk7996mB5V4tezG049AIVlS0KORFJCql+dJo/XZrFmxeEFi38/GdVClTJYJVFT3qrhGRqDNm6RgSnk0IBPx/L30T18+ngM8HteRFJGrs+HMHNV46MtSxbf22THsnjbjnkqD1f3K84lWOpZAXkajQfUJ3Rv4wMrD8S89faJRWCe5NLPYXNBWEQl5EImr+pvm0eqtVYPm5i57jyQue9C84pwuaCkghLyIRcTD1II1ea8SGPRsAqFCyAlsf3Uq5kuWObKQLmgpMnVsiEnbDFwyn9MDSgYCfevtU/njij6MDPl36BU0K+HxRS15Ewua3Pb9R/+X6geUbTrmBsdeOxRTgIaOQF5GQc85x7cfXMu6XcYF1G3v9RmLFuhGsqnhQd42IHMvng+Rk/4nPAj7n6zVfEzcgLhDwI78A199IvPK2YjvHezgp5EXkaOl3Y0pMzP3NNoI8Z+/BvZQZWIYO73cA4KTjTuDgwDi6/YD/D0EONwGRwqGQF5Gj5eFuTFk9Z8CXfTlu0HEcSD0AwIJ7FrCq1xpKtvLuu2qmIZFhoj55ETlafm624T1nxcpvaNo9FRYOBuD+c+7n1U6vHtlu5kx/l46ZRsyEiUJeRI6Wj7Hpac7HBXekMm9TWmDdjt47qFa22tEbxsVBrVqFXbFkQ901InKsPIxN/3jZxyQ8m8C8TfMA+Oi6j3BPu2MDXiJCLXkRyZeU/SlUH1w9sNymbhtm3TmL+Lj4CFYlmSnkRSTP7p90P8O/Hx5YXtFzBY2rNY5gRZIVhbyI5NqCzQto+WbLwHL/tv3pd2G/CFYkOVHIixQ3Pl+eJ/w6lHaIJsObsHb3WgDKlihL8mPJlC9ZPpSVSiHQiVeR4iQfFzq9vvB1Sj1XKhDwX9/2NX/+/U8FfBGhlrxIcRLsQqcsbsKxcc9G6r1cL7B8TZNr+O/1/9VkYkWMQl6kOMnFhU7OOW787418svyTwLoNvTZQr2K9Y7aV6KeQFylOcrjQadraabR/r31geXin4fQ4p0e4q5RCpJAXKW7SL3TK4M9Df1LzXzXZd2gfACdUOoEVPVdQKqFUJCqUQlQoJ17N7G0z225mP2dYV8XMppjZKu9z5cLYl4gUroGzB1L+n+UDAf/d375j7UNrFfAxorBG17wLdMy0ri8wzTnXEJjmLYtIlFiZshLrb/xjxj8ASDo7Cfe0o2ViyxyeKUVJoXTXOOdmm1mDTKu7AG29r0cDM4E+hbE/Eck/n/Nx0eiLmL1hdmDd9se2U71c9WyeJUVVKMfJH++c2wrgfQ46X6mZ3WdmC81s4Q7dQEAkpD5d/inxA+IDAf/hNR/innbBAz7YnZ7yc8coiaiIXwzlnBvlnGvunGtevbpaEiJ5lovg3bl/J9bfuO6T6wBoWaclqU+lcvNpN2f9mpkvmsrPHaMk4kIZ8slmVgvA+7w9hPsSKZ5yEby9vupFtcFHpv1d1mMZ393zXfazRQa7aCo/d4ySiAtlyI8HunpfdwX+F8J9iRRPwYLXa9n/sHkh1t94Zf4rAPS7oB/uaUfT6k1zft30i6YSEo5cNBVsnUS9QjnxamZj8J9krWZmm4CngUHAx2b2N+A34PrC2JeIZJD5CtZq1TjU7kJOP20uK73Ge8n4kqT0TqFCqQq5f92sLprK4x2jJPIKa3RNFh17XFwYry8iWcgUxm/OGsq9F80NPPxlpw/peE5Wv545CHLRVNB1EtV0xatIURcXx+YyqSQOONL72mWlMW7zeVi/myJYmEQDhbxIUeTNCe+qV+eWcbcy9uexgYfWPbCGBofLqUtFAIW8SNHjjaiZsXku7W4/MppmWMdhPNDygQgWJtFIIS9SxOzfsoE6587m9zL+5brl6/DrQ6spnVA6soVJVIr4xVAiknsvzH2Bcm+dGAj4bxY247dHNirgJUtqyYsUAat2ruLk104OLN975j2Mavmc+t0lRwp5kSjmcz4uee8Spq+bHliX/FgyNcrpQiTJHYW8SJQat2Ic13x8TWD5/avf59bTb41gRVIUKeRFoszuv3ZT5cUqgeXm1ZvxbbfvSYgvEcGqpKjSiVeRKPLY148dFfBL55/N9w8tI6Fde836KPmilrxIpPl8/LhiOmf995LAqr+f93cGnvogPJd49ORjmlJA8kghLxJBhw8f5My/V2FZ+f0AxFkcux7fRcXSFf3zw2ecfEyzPko+qLtGJELe+fEdSj5fOhDwE8bGkdZtiz/g4cjkY5s2wcyZGiop+aKWvEiYbd27ldpDageWO6dU4YuRe7DWbY5trWvWRykghbxImDjnuOPzO3j/p/cD69a+Fs8JTU+F38ZCzZpqrUuhU8iLhMGs9bNoO7ptYHlo6wH06jTgyEnVuDgFvISEQl4khPYf3k/9l+uTsj8FgNoVarPmwTWUji8FrafqpKqEnEJeJEQGfzOYx6c+Hliec9cczqt33pENdCs9CQOFvEghW7NrDSe9elJg+a4z7uLtLm8fu6FOqkoYKORFConP+ej4fkemrJ0SWLft0W0cX15BLpGjcfIi4J8yIDnZfwFSduuyMH7leOIHxAcCfvRVo3FPOwW8RJxa8iLe7fQCJ0FnzPCvz7wu7tg20e8HfqfyC5UDy2fUPIPv7/2ehDj9akl0UEteZMcOf5hnnCMm2LpM+kzpc1TAL0lawo/dflTAS1RRyIvUqOFvrSckHBnOWK0aNG8O8fHHDHFcsm0J1t94cd6LAPRp0wf3tOP040+P1HcgkiU1OUTS54hJH87oHLRrBwsXwjnnwJgxAKT6Ujl71Nn8lPxT4Km7++ymUulKkapcJEcKeRE4ejjj9u1Humrmz4d69VjcoRlntlgU2Hz8TeO5otEVESpWJPfUXSOSkc/nb8mfey4kJLCvhOPRi9No3twf8Jf+36Wk9UtTwEuRoZAXSZc+yqZuXTDj81mv0+SRkgxpDX/bWpOdvVOYfNtk4ky/NlJ0qLtGJJ03omZDuVQerDOH8VNmc1q90/io9fO0Pr2zph6QIkkhL+I5XLUyr9xYl6frr4N448X2g+jV6mFK6AbaUoQp5EWAbzd+S9LEJH5quI4r6l/Kq11ep37lBpEuS6TAFPJSrO3+azd9p/Zl1KJRJB6XyLgbx9GlURdMXTMSIxTyUiw55/hw6Yc88vUjpOxP4eFWD9O/bX8qlKoQ6dJECpVCXoqdX3f+So+JPZi2bhot6rTgq1u/4sxaZ0a6LJGQUMhLsXEg9QCD5g7in3P/SZmEMozoNIL7zr6P+Lj4SJcmEjIKeSkWpq2dRveJ3Vm1axU3nXoTQzsMpWbZGrozk8Q8XdUhMS15XzK3fXYb7d9rj8/5mHzbZMZcO8Yf8BddBImJ0Lat/0IokRiklrzEJJ/z8cYPb9B3Wl/+PPQnT13wFE+c9wRlSpTxbxBsKmHdik9iUMhb8mbW0cxWmtlqM+sb6v2J/JT8E23ebkPSxCTOqNKUn5KWMOCiAUcCHoJPLywSg0Ia8mYWDwwHLgOaAjebWdNQ7lOKIe82fX8e3Efvr3tz1utnsXrXakYvb8T0HvNpfF3Ssd0x6dMLb9oEM2eqT15iVqi7a1oAq51zawHMbCzQBVge4v1KceFNKjZ+x1we6FKC30of5J4z72FQs0eo+vfTITUt6+6YjNMLi8SoUHfX1AE2Zlje5K0LMLP7zGyhmS3cEeQWayLZ2bhuMVfVmUOXG30c98dB5nYZzxtXvkHVeo3VHSNC6EM+2P/A7qgF50Y555o755pXr149xOVIrEj1pTLk2yE0+egCvj7JGDQtjkXLzqNNs8v9G6g7RgQIfXfNJqBuhuVEYEuI9ykxbv6m+XSb0I0lyUvo3LAzr3UcRoOe5Y4d767uGJGQh/z3QEMzOwHYDNwE3BLifUpR5PPleGHS7wd+54mpT/D6D69Tu0JtPr3hU65ufLUmExPJRki7a5xzqcD9wGRgBfCxc25ZKPcpRVD6HZmyuDDJOceYpWNo/FpjRi0axYMtH2RFzxVc0+QaBbxIDkJ+MZRzbhIwKdT7kSIsmwuTVu9aTY+JPZiydgrNazdn0q2TOKvWWREuWKTo0LQGEnlBLkw6mHqQAbMGcOqIU5m/eT6vXfYa3/3tu+wD3hsvj3NZbyNSzGhaA4m89JEwXp/8jPUzSZqYxK87f+XGU25kSIch1K5QO/vXSO/ymTfP/4dixgz/iVeRYk6/BRId4uLYXt644/OutPtPO1J9qXx565eMvW5szgEPwbt8REQteYk8n/Px1qK36DO1D/sO7ePJ85/kyfOfPHqumZykd/mkt+R18ZMIoJCXCFuavJSkiUnM2ziPC+pfwMjOI2lSvUneXyhTl48ufhLxU8hLRPx56E8GzBrAkO+GULFURd7p8g5dm3Ut2JBIXfwkcgyFvITdhF8ncP+k+9mwZwN3n3E3L17yIlXLVo10WSIxSSEvYbPpj008+OWDjPtlHE2rN2X2nbM5v/75kS5LJKYp5KVgcjEdQaovldcWvMZTM54i1ZfK8+2e59HWj1IyvmSYixUpfjSEUvIvh+kIABZsXkCLN1rw8OSHOb/e+SzrsYwn2vShZMpuXbQkEgYKeclZVleSZjM2fc+BPfSc2JNWb7Yi+c9kPrn+EybeMpETKzbQDbRFwkghL9nLrrUeZDoC5xxjfx5L4+GNGfnDSO5vcT8req7guqbX+UfO6KIlkbBSn7xkL5vJwzKPTV+zey09JvXg6zVfc3ats/ni5i9oXrv50a+ni5ZEwkohL9nLKZTj4jhYtRKD5wxk4JyBlIgrwbCOw+hxTg/i4+KPfT1dtCQSVgp5yV4OoTxz/Uy6T+zOLym/cH3T6xnaYSh1jquTxYt5dNGSSNgo5CVnQUJ5x5876D2lN6OXjKZBpQZMvGUinRp2ilCBIpIVhbzkic/5eOfHd3h86uP8cfAPnjjvCf5xwT8oW6JspEsTkSAU8pJry7YvI2liEnN/m8v59c7n353/zSk1Tol0WSKSDYW85Gj/4f08O+tZXvr2JY4rdRxvXfkWd55xJ3GmEbgi0U4hL0fLNE3BpFWT6DmpJ+t/X8+dZ9zJ4EsGU61stUhXKSK5pJCXIzLcQm9z27N56J46fPrLZzSp1oSZXWdyYYMLI12hiOSRQl6O2LGDtG+/4bWz0/jHOfNJXVWa5y56jt5temsyMZEiSiEvAQtTf6PbA2VYdNw+OuyszPBHF/B/VU+KdFkiUgA6cybsObCHByY9QIs3W7Kldnk+av86X76ccnTAZzVJmYhENbXkizHnHJ8s/4ReX/Vi275t9DynJ8+1e46KpSsevWGGvnpat/ZfARun9oFIUaCQL6bW7l5Lz0k9+Wr1V5xZ80z+d9P/OKfOOcE3zm6SMhGJamqOFTOH0g7x/JznOWXEKcz9bS4vd3iZBfcuyDrgIeiUwiJSNKglX4zM3jCbpAlJrEhZwbVNruWVjq/kPJkYaOZIkSJMIV8MpOxP4fEpj/PO4neoX7E+E26eQOeTO+ftRTRzpEiRpJCPYc453l38Lr2n9GbPwT30adOHpy54inIly0W6NBEJE4V8jFq+YzlJE5KY89sc2tRtw8jLR3JqjVMjXZaIhJlCPsbsP7yfgbMHMnjeYMqXLM8bV7zB3WfercnERIophXwM+Wr1V/SY2IN1v6+ja7OuDL5kMNXLVY90WSISQQr5GLBl7xZ6fdWLT5Z/QqOqjZjRdQZtG7SNdFkiEgUU8kVYmi+NEd+P4MnpT3Io7RDPXvQsvVv3plRCqUiXJiJRQiFfRC3auohuE7qxcMtCLjnxEkZ0HsFJVTJNJpZpbngRKX50Nq6I+ePgHzz05UOc88Y5bNyzkTHXjmHybZODB/xFF0FiIrRt618WkWJHLfkiwjnHpys+5aGvHmLr3q10b96dgRcPpFLpSsGfoPlmRIQCtuTN7HozW2ZmPjNrnumxJ8xstZmtNLMOBSuzeFu3ex2Xj7mc6z+5nhrlavDt375leOfhWQc8aL4ZEQEK3pL/GbgGeD3jSjNrCtwEnALUBqaa2cnOubQC7q948PrSD1WtxJDvhjJg1gDiLI4hlw7hgZYPkBCXix+b5psREQoY8s65FQB2bIB0AcY65w4C68xsNdAC+LYg+ysWvL70uRu/IenaUiwrv5+rG1/NKx1foW7Funl7Lc03I1LsherEax1gY4blTd66Y5jZfWa20MwW7tixI0TlhFkB7qK0c+NK7qk8h/O7prE3bT/jO/6Hz278LO8BLyJCLkLezKaa2c9BPrpk97Qg64ImnnNulHOuuXOuefXqMXB1Zj5HtTjnGL14NI0/voB3mzl6zzOW/9iGK1rcFtp6RSSm5dhd45xrn4/X3QRkbHomAlvy8TpFTz5GtazYsYLuE7sza8Mszk08l9dvm8ppPWqqL11ECixU3TXjgZvMrJSZnQA0BBaEaF/RJQ+jWv46/Bf/mP4Pmo1sxpLkJYy6fBRz757LabWa+f8wKOBFpIAKdOLVzK4GXgWqAxPNbLFzroNzbpmZfQwsB1KBnsVmZE0uR7VMXj2ZHpN6sHb3Wm4//XZeuvQlapTTMEcRKVwFHV0zDhiXxWMDgYEFef0iK5tRLVv3buXhyQ/z0bKPOLnqyUy7YxrtTmgX5gJFpLjQFa+FKZu5YtJ8aYxcOJK/T/87B1MP0r9tf/q06aPJxEQkpBTyhSV9VM28ef6++Bkz/C164MetP9JtQje+3/I97U9sz4hOI2hYtWGECxaR4kAhX1iCjKrZW6ks/Wb0Y9iCYVQrW40PrvmAm0+9OdjFYyIiIaGQLyzpo2rmzcO1PpdxO+fy4AcPsWXvFrqd3Y3nL36eymUqR7pKESlmNNVwQaVf3QowYwbrl8/jym4VufaT66hatirz/jaPf1/+bwW8iESEWvIFkaEf/nCbVgx97nL6zx6AYbx0yUs81Oqh3E0mJiISIkqggvD64b+plUrSKXP5edpcujTqwrDLhlGvYr1IVyciUoxDvhBujberQgJ97q7Gm7W3UfdAKT6/cSxdGl9VyIWKiORf8eyTL+Ct8ZxzvLfkPRoPb8I7dXbwWLPuLH9mhwJeRKJO8WzJF+DWeCtTVtJ9YndmrJ9Bq8RWTOk8hWY1m4W4YBGR/CmeLfl83BrvQOoB+s3ox+kjT+fHbT8ysvNIvrn7GwW8iES14tmSz+Ot8aasmUKPST1YvWs1t552K/+69F8cX153XBKR6Fc8Qx5ydWu8bfu28cjkRxjz8xgaVmnIlNun0P7E/EyvLyISGcU35LPhcz5eX/g6T0x7gr9S/+LpC5+m73l9KZ1QOtKliYjkiUI+k8XbFpM0IYn5m+fT7oR2jOg0gkbVGkW6LBGRfFHIe+Pl91Uqy9Mzn+GV+a9QpUwV3rv6PW497dYjk4kVwrh6EZFwK94h742X/zxlLg9cmcCm0oe476z7GNR+0NFzzWQzjbCISDQr1iG/Ye0iHqwzh/HtHKclH+Kjm76gdbPLj92wAOPqRUQiqVg2Rw+nHealeS/R9OMLmXqS8eLUOH5Yfh6tT+8c/An5GFcvIhINil1L/tuN39JtQjeWbl/KFSdfwasdXqF+z7LZ97XncVy9iEi0KDYhv/uv3fSd2pdRi0aReFwi424cR5dGXXJ/l6ZcjKsXEYk2MR/yzjk+WPoBj379KDv37+SRVo/wTNtnqFCqQqRLExEJuZgO+V93/kr3id2Zvm46Leq0YPJtkzmj5hmRLktEJGxiMuQPpB5g0NxB/Jt+pMsAAAb7SURBVHPuPymTUIYRnUZw39n3ER8XH+nSRETCKuZCfuraqfSY2INVu1Zx86k3M6TDEGqWrxnpskREIiJmhlAm70vmts9u45L3LsHnfEy+bTIfXvuhP+DTb7btXKTLFBEJq5gI+UmrJtF4eGM+XvYxT13wFEu7L+XS/7vU/2AB7wIlIlKUxUR3zclVT6ZVYiuGdhhK42qNj35QV6uKSDEWEy35k6qcxJe3fnlswIOuVhWRYi0mWvLZ0tWqIlKMxX7Ig65WFZFiKya6a0REJDiFvIhIDFPIi4jEMIW8iEgMU8iLiMQwhbyISAwzF0XzuZjZDmBDPp9eDUgpxHIKi+rKu2itTXXljerKm4LUVd85Vz3YA1EV8gVhZgudc80jXUdmqivvorU21ZU3qitvQlWXumtERGKYQl5EJIbFUsiPinQBWVBdeRettamuvFFdeROSumKmT15ERI4VSy15ERHJRCEvIhLDilTIm9n1ZrbMzHxm1jzTY0+Y2WozW2lmHbJ4/glmNt/MVpnZR2ZWMgQ1fmRmi72P9Wa2OIvt1pvZUm+7hYVdR5D9PWNmmzPU1imL7Tp6x3C1mfUNQ12DzewXM/vJzMaZWaUstgvL8crp+zezUt7PeLX3XmoQqloy7LOumc0wsxXe+/+hINu0NbM9GX6+/UJdV4Z9Z/uzMb9h3jH7yczOCkNNjTIci8Vm9oeZ9cq0TViOmZm9bWbbzeznDOuqmNkUL4ummFnlLJ7b1dtmlZl1zVcBzrki8wE0ARoBM4HmGdY3BZYApYATgDVAfJDnfwzc5H09Euge4nr/BfTL4rH1QLUwHrtngMdy2CbeO3YnAiW9Y9o0xHVdCiR4X78AvBCp45Wb7x/oAYz0vr4J+CgMP7tawFne1xWAX4PU1RaYEK73U15+NkAn4EvAgFbA/DDXFw9sw3/BUNiPGXABcBbwc4Z1LwJ9va/7BnvfA1WAtd7nyt7XlfO6/yLVknfOrXDOrQzyUBdgrHPuoHNuHbAaaJFxAzMzoB3wX2/VaOCqUNXq7e8GYEyo9hECLYDVzrm1zrlDwFj8xzZknHNfO+dSvcXvgMRQ7i8Hufn+u+B/74D/vXSx97MOGefcVufcIu/rvcAKoE4o91nIugD/cX7fAZXMrFYY938xsMY5l9+r6QvEOTcb2JVpdcb3UVZZ1AGY4pzb5ZzbDUwBOuZ1/0Uq5LNRB9iYYXkTx/4SVAV+zxAowbYpTOcDyc65VVk87oCvzewHM7svhHVkdL/37/LbWfx7mJvjGEp342/xBROO45Wb7z+wjfde2oP/vRUWXvfQmcD8IA+fa2ZLzOxLMzslXDWR888m0u+rm8i6sRWpY3a8c24r+P+IA8FuPl0oxy3qbv9nZlOBmkEeetI597+snhZkXeaxobnZJldyWePNZN+Kb+Oc22JmNYApZvaL9xc/37KrC/g38Cz+7/lZ/F1Jd2d+iSDPLfAY29wcLzN7EkgFPsjiZQr9eAUrNci6kL2P8srMygOfAr2cc39kengR/u6Ifd75ls+BhuGoi5x/NpE8ZiWBK4EngjwcyWOWG4Vy3KIu5J1z7fPxtE1A3QzLicCWTNuk4P83McFrgQXbplBqNLME4Brg7GxeY4v3ebuZjcPfVVCg0MrtsTOzN4AJQR7KzXEs9Lq8E0qXAxc7rzMyyGsU+vEKIjfff/o2m7yfc0WO/Ve80JlZCfwB/4Fz7rPMj2cMfefcJDMbYWbVnHMhn4grFz+bkLyvcukyYJFzLjnzA5E8ZkCymdVyzm31uq62B9lmE/7zBukS8Z+PzJNY6a4ZD9zkjXw4Af9f4wUZN/DCYwZwnbeqK5DVfwYF1R74xTm3KdiDZlbOzCqkf43/5OPPwbYtLJn6QK/OYn/fAw3NPwqpJP5/c8eHuK6OQB/gSufc/iy2Cdfxys33Px7/ewf876XpWf1hKixen/9bwArn3JAstqmZfm7AzFrg/93eGcq6vH3l5mczHrjDG2XTCtiT3lURBln+Rx2pY+bJ+D7KKosmA5eaWWWve/VSb13ehPrMcmF+4A+nTcBBIBmYnOGxJ/GPjFgJXJZh/SSgtvf1ifjDfzXwCVAqRHW+CyRlWlcbmJShjiXexzL83RahPnbvAUuBn7w3WK3MdXnLnfCP3lgTprpW4+93XOx9jMxcVziPV7DvHxiA/48QQGnvvbPaey+dGIZjdB7+f9N/ynCcOgFJ6e8z4H7v2CzBfwK7dajryu5nk6k2A4Z7x3QpGUbGhbi2svhDu2KGdWE/Zvj/yGwFDnv59Tf853GmAau8z1W8bZsDb2Z47t3ee201cFd+9q9pDUREYlisdNeIiEgQCnkRkRimkBcRiWEKeRGRGKaQFxGJYQp5EZEYppAXEYlh/w/U1890TNGtXAAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -159,10 +125,14 @@ "source": [ "import matplotlib.pyplot as plt\n", "\n", - "eval_x, eval_label = get_data(50)\n", - "x1, y1 = eval_x.asnumpy(), eval_label.asnumpy()\n", - "plt.scatter(x1, y1, color=\"red\", s=5)\n", - "plt.title(\"Eval_data\")\n", + "eval_data = list(get_data(50))\n", + "x_target_label = np.array([-10, 10, 0.1])\n", + "y_target_label = x_target_label * 2 + 3\n", + "x_eval_label,y_eval_label = zip(*eval_data)\n", + "\n", + "plt.scatter(x_eval_label, y_eval_label, color=\"red\", s=5)\n", + "plt.plot(x_target_label, y_target_label, color=\"green\")\n", + "plt.title(\"Eval data\")\n", "plt.show()" ] }, @@ -170,500 +140,426 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 定义前向传播网络" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 初始化网络模型" + "上图中绿色线条部分为目标函数,红点部分为验证数据`eval_data`。" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "使用`nn.Dense`定义了网络模型,即为线性模型,\n", + "### 定义数据增强函数\n", "\n", - "$$y=wx+b\\tag{1}$$\n", + "使用MindSpore的数据增强函数,将数据进行增强操作,操作解释如下:\n", "\n", - "其中,权重值$w$对应`weight`,$b$对应`bias`,并将其打印出来。" + "- `ds.GeneratorDataset`:将生成的数据转换为MindSpore的数据集,并且将生成的数据的x,y值存入到`data`和`label`的数组中。\n", + "- `batch`:将`batch_size`个数据组合成一个batch。\n", + "- `repeat`:将数据集数量倍增。" ] }, { "cell_type": "code", "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "weight: -0.00034249047 bias: -0.019308656\n" - ] + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.390782Z", + "start_time": "2020-09-14T10:38:40.356644Z" } - ], + }, + "outputs": [], "source": [ - "from mindspore.common.initializer import TruncatedNormal\n", - "from mindspore import nn\n", + "from mindspore import dataset as ds\n", "\n", - "net = nn.Dense(1,1,TruncatedNormal(0.02),TruncatedNormal(0.02))\n", - "print(\"weight:\", net.weight.set_data([0][0]), \"bias:\", net.bias.set_data([0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 查看初始化的网络模型" + "def create_dataset(num_data, batch_size=16, repeat_size=1):\n", + " input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data','label'])\n", + " input_data = input_data.batch(batch_size)\n", + " input_data = input_data.repeat(repeat_size)\n", + " return input_data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "我们将验证数据集和初始化的模型函数可视化。" + "使用数据集增强函数生成训练数据,并查看训练数据的格式。" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { - "scrolled": true + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.435708Z", + "start_time": "2020-09-14T10:38:40.391790Z" + } }, "outputs": [ { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEICAYAAAC6fYRZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAaVUlEQVR4nO3df7QcZZ3n8feHIMiIC8F7EwJJDDj4I7pnUdsoYCACg8g4A3hGN8weZdXdLI7sGXdnzi4uZx1Gd+aoM4575owjG0eEmREVUSSDKARIhFF+3SCEhMgQFOWSeHNDVBh12b253/2jqjeVtvre/lHVP6o/r3P6dHdVdT1PV/f99nO/9TxPKSIwM7NqOqTfFTAzs/I4yJuZVZiDvJlZhTnIm5lVmIO8mVmFOcibmVWYg7wNFUmbJf27FrddI2my7DqVQdITks7udz1s+DnIWynSIPVLSf+cuf1Vv+vVjKR/K+kf+12Pskm6WtL/6Hc9rHcO7XcFrNJ+KyJu63clzEaZW/LWU5IOl/RTSa/KLBtPW/2LJC2UdJOkaUk/SR8vbXHfR6Qt1Z9IegR4XcP6yyQ9LulZSY9IujBd/grgSuCU9D+On6bLf1PSdyU9I+lJSVfMUfac9U7TTB+R9O20/FsljWXWv1PSDyU9Lenyed7n1ZI+Jenr6b7ulfSSzPqXS9ooaZ+kRyW9I12+Dvg3wH9J3+c/tHJcbbg5yFtPRcRzwFeBizKL3wF8KyL2kHwnPwe8GFgO/BJoNc3zR8BL0tubgYsb1j8OrAaOAv4Y+HtJSyJiB3AJcHdEHBkRR6fb/xx4F3A08JvA+yRd0KTsVur9u8C7gUXAYcAfAkhaCXwaeCdwHPAiYL4ftovS97AQ2An8SbqvFwAbgWvTci4C/lrSKyNiPfB54OPp+/ytecqwCnCQtzJ9LW2112//Pl1+LQcH+d9NlxERT0fEVyLiFxHxLEnwOqPF8t4B/ElE7IuIJ4G/zK6MiC9HxK6ImI2ILwGPAaua7SwiNkfEw+n2W4EvNKtLi/X+XET8U0T8ErgOODld/jvATRFxZ/oj+N+B2Xne61cj4r6ImCEJ3PV9vRV4IiI+FxEzEfEA8JW0DBtBzslbmS5okpO/AzhC0uuBH5MEqBsAJP0a8EngXJJWKsALJS2IiP3zlHcc8GTm+Q+zKyW9C/jPwIp00ZHAGE2k9fso8CqSlvfhwJebbNtKvX+ceckv0vJ/pd4R8XNJTzer1zz7ejHw+nrKKXUo8Hfz7M8qyi1567mImCVpyV5E0oq/KW39AvwB8DLg9RHxL4DT0+VqYde7gWWZ58vrDyS9GPgMcCnwojQlsy2z37zpWK8FNgDLIuIokrx9s3oUVu/0B+NFLbwuz5Mkqa+jM7cjI+J96XpPOztiHOStX64F/jXJicBrM8tfSJLP/qmkY0jy7K26DvhgehJ0KfAfM+teQBLgpgEkvZukhV43BSyVdFhDXfZFxP+WtIrkB6mZbup9PfBWSW9My/8wnf9t3gS8ND2R+7z09rr05DIk7/PEDvdtQ8hB3sr0Dw395G+or4iIe0lObB4HfCPzmv8JHAHsBe4BvtlGeX9MkqL5AXArmRRFRDwCfAK4myTQ/Uvg25nX3gFsB34saW+67PeAD0t6FvgQyY9IMx3XOyK2A+8n+bHbDfwE6GgQV/of0TnAWmAXSVrnYySpJoDPAivTcyRf66QMGy7yRUPMzKrLLXkzswpzkDczqzAHeTOzCnOQNzOrsIEaDDU2NhYrVqzodzXMzIbKli1b9kbEeN66gQryK1asYGJiot/VMDMbKpJ+2Gyd0zVmZhXmIG9mVmEO8mZmFeYgb2ZWYQ7yZmYV5iBvZlZhDvJmZkWZnYWpKRigiR8d5M3MijA7C296EyxdCmvWJM8HgIO8mVkRpqfhO9+BmZnkfnq63zUCHOTNzIqxaBGceiocemhyv2hRv2sEDNi0BmZmQ0uCTZuSFvyiRcnzAeAgb2ZWlEMOgcWL+12Lg3SdrpG0TNImSTskbZf0++nyYyRtlPRYer+w++qamVk7isjJzwB/EBGvAN4AvF/SSuAy4PaIOAm4PX1uZmY91HWQj4jdEfFA+vhZYAdwPHA+cE262TXABd2WZWZm7Sm0d42kFcCrgXuBxRGxG5IfAiD3VLOkdZImJE1MD0iXIzOzqigsyEs6EvgK8IGIeKbV10XE+oioRURtfDz3wiZmZtahQoK8pOeRBPjPR8RX08VTkpak65cAe4ooy8zMWldE7xoBnwV2RMRfZFZtAC5OH18M3NhtWWZm1p4i+smfBrwTeFjSg+my/wZ8FLhO0nuBHwFvL6AsMzNrQ9dBPiL+EWg2tOusbvdvZmad89w1ZmYV5iBvZtXWyhzvAzgPfFEc5M2sumZm4LTT4Pjjm8/xPqDzwBfFQd7Mqml2Fk4/He65B/bvh29/O3+O9wGdB74oDvJmVk3T03D//Qeev+51+XO8D+g88EVxkDezasoG71NOSVryeXO81+eBn5yEzZsHZh74ong+eTOrpnYu4jGA88AXxUHezKqrwsG7VU7XmNloqHA3ybk4yJtZ9VW8m+RcHOTNrPoq3k1yLg7yZlZ9Fe8mORefeDWz4TY7O38PmnZ62lSMW/JmNrzaybXXe9qMUIAHB3kzGwSt9nxp3G6Ec+2tcpA3s/5qtTWet90I59pbpRigPqO1Wi0mJib6XQ0z66WpqSRwz8wkwXpyMn8AU7PtWsnJV5ykLRFRy1tX1IW8r5K0R9K2zLIrJD0l6cH0dl4RZZlZxbTaGh8bg1oNFiw4eLsRzbW3qqjeNVcDfwX8bcPyT0bEnxdUhplVUSs9X2Zn4cwzYWICVq2CO+5wUG9RIS35iLgT2FfEvsxsBM3XGs+eYL3/fti7t7f1G2Jln3i9VNLWNJ2zMG8DSeskTUiamPaZcTPL4xOsHSszyH8aeAlwMrAb+ETeRhGxPiJqEVEbHx8vsTpmNrQqPud7mUoL8hExFRH7I2IW+AywqqyyzGwE+ARrR0oL8pKWZJ5eCGxrtq2ZmZWjkN41kr4ArAHGJE0CfwSskXQyEMATwH8ooiwzM2tdIUE+Ii7KWfzZIvZtZmad87QGZmYV5iBvZlZhDvJmZhXmIG9mVmEO8mZmFeYgb2bla/WiIFY4B3kzK1c7l+izwjnIm1m5fIm+vnKQN7NyeQbJvirqoiFmZvlauSiIlcZB3szKV59B0nrO6RozswpzkDczqzAHeTOzCnOQNzOrMAd5s1HlUagjwUHebBR5FOrIKCTIS7pK0h5J2zLLjpG0UdJj6f3CIsoyswJ4FOrIKKolfzVwbsOyy4DbI+Ik4Pb0uZkNAo9CHRmFBPmIuBPY17D4fOCa9PE1wAVFlGVmHcjm32dnYc8euOMOmJyEzZs9CrXCyszJL46I3QDpfW5TQdI6SROSJqb9L6NZ8bL59zPOOPD4zDNhfNwBvuL6fuI1ItZHRC0iauPj4/2ujln1NObfnYsfKWUG+SlJSwDS+z0llmVmzTTm31vNxbuLZSWUOUHZBuBi4KPp/Y0llmVmzTTOAhkx/4yQ9RTPd76T/Bhs2pRMMmZDp6gulF8A7gZeJmlS0ntJgvtvSHoM+I30uZn1SrYlXp8FUjr4cTPuYlkZhbTkI+KiJqvOKmL/Ztamblvi9RRP/fXuYjm0PJ+82TCbnc1PveS1xNuZz90X+qgMJ9nMhlWzqQlmZ5MUTbeDnVpJ69jAc5A3G1Z5rfV64F+2LAn0P/qRBzuNOAd5s2GVNzVBNvDffXfSGneAH2nOyZsNq3refGrqQCDv5oRps/y+DTW35M2G3dq1SXpmzZokRbNpU/tz0njq4cpykDcbZlNTcNddSXrmrruS552cMHW/+MpykDcbZtKBaQciOk+zeOrhynKQNxsmjfPJLF4Mq1fDggXJfTt94bPq+X1PPVw5DvJmwyIvby4lQfmpp+Bb3+ouOLtffCU5yJsNi2Z5cwdnm4ODvNmwcN7cOuB+8mbDwvPJWAfckjcbFK1cpMOpGWuTg7zZIPBgJCuJg7zZIPBgJCuJg7zZIPBJVStJ6SdeJT0BPAvsB2YiolZ2mWYDLW8iMJ9UtZL0qiX/pog42QHeRt5cuXefVLUSOF1j1o1WesRkOfduPdaLIB/ArZK2SFrXg/LMeqOTHjHOvVuP9WIw1GkRsUvSImCjpO9FxJ31lWngXwewfPnyHlTHrCCdXCzbuXfrsdJb8hGxK73fA9wArGpYvz4iahFRGx8fL7s6ZsXptFXu3Lv1UKkteUkvAA6JiGfTx+cAHy6zTLOecavchkDZ6ZrFwA1KvvyHAtdGxDdLLtOsd+qtcrMBVWqQj4jvA/+qzDLMzKw5d6E0M6swB3kzswpzkDczqzAHeTOzCnOQNzOrMAd5M7MKc5A3M6swB3kzswpzkDczqzAHeTOzCnOQNzOrMAd5Gw3tXsHJrCIc5K3aZmdh9+7kyk3tXMHJrCJ6cWUos/6oX56vfvUmaP0KTmYV4SBv1ZW9PJ+UzP3u66raiHG6xqore3m+1athchI2b/YVnGykuCVv1eXL85k5yFvF+fJ8NuJKT9dIOlfSo5J2Srqs7PLMzOyAUoO8pAXAp4C3ACuBiyStLLNMMzM7oOx0zSpgZ3pBbyR9ETgfeKTIQrZtg7Vri9xj5wYl7et6HMz1OJjrcbBBqMc558Cf/mnx+y07yB8PPJl5Pgm8PruBpHXAOoDly5d3VMjznw8vf3mHNSzQoAymdD0O5noczPU42KDU46ijytlv2UE+7/fxoEMaEeuB9QC1Wq2jw/3rvw7XX9/JK23gzM66N4xZgco+8ToJLMs8XwrsKrlMG1b1EaqefsCsMGUH+fuBkySdIOkwYC2woeQybVhlR6jWpx8ws66UGuQjYga4FLgF2AFcFxHbyyzThlh2hOqpp8LYmGeONOtS6YOhIuJm4Oayy7EKyI5QHRuDM89MWvSnnposP8SzcJi1y381NljqI1T37nXqxqwADvI2mBpTN5450qwjnrvGBpMnFzMrhIO8DS5PLmbWNadrzMwqzEHezKzCHOStOLOz7tduNmAc5K0Y2SkJzjgDdu92sDcbAA7yVozslAR33QXLl3v+GbMB4CBvxaj3a1+wIOnu6EFMZgPBQd6KUe/XPjkJq1d7EJPZgHA/eSvOIYfAscd6EJPZAHFL3opXH8TUToB3zxyzUjjIW//5YiFmpXGQtwMaW9NFta7n248vFmJWGgd5SzS2pmdmimldt9JK94yTZqVRDFAOtFarxcTERL+rMZqmppJAPDOTBNvvfhde/eoDzycnO5ssrHG/zfbjC3ibdUzSloio5a0rrSUv6QpJT0l6ML2dV1ZZVoDG1vTKlcW0rlttpXdystbM5lV2F8pPRsSfl1yGFSFv/vYiukJ6XnizvnI/eTugcf72ouZz97zwZn1T9onXSyVtlXSVpIV5G0haJ2lC0sS0e1UMF/dtNxt4XQV5SbdJ2pZzOx/4NPAS4GRgN/CJvH1ExPqIqEVEbXx8vJvqWC+5b7vZUOgqXRMRZ7eynaTPADd1U5YNmLy+7U7JmA2cMnvXLMk8vRDYVlZZ1qW8tMt8qRj3bTcbCmXm5D8u6WFJW4E3Af+pxLKsU3lpl1ZSMdlZJzdvdq8ZswHlwVCjLm+wErQ2gMnMBkJfBkPZkMhLuzgVY1YZ7ic/6poNVvIAJrNKcEu+6lrpy543pYCnGTCrBAf5KnNfdrOR5yBfZXPN0+7RqmYjwUG+ypqdQHUL32xk+MRrlTU7qerRqmYjwy35qss7geoukmYjwy35UeQ53s1GhoP8qPIc72YjwekaM7MKc5A3M6swB3kzswpzkDczqzAH+UHhEahmVgIH+UHgEahmVhIH+UEw1xwzZmZd6CrIS3q7pO2SZiXVGtZ9UNJOSY9KenN31aw4j0A1s5J0OxhqG/A24H9lF0paCawFXgkcB9wm6aURsb/L8qrJI1DNrCRdteQjYkdEPJqz6nzgixHxXET8ANgJrOqmrMrzRTrMrARl5eSPB57MPJ9Ml/0KSeskTUiamHYu2sysUPOmayTdBhybs+ryiLix2ctyluX2DYyI9cB6gFqt5v6DZmYFmjfIR8TZHex3EliWeb4U2NXBfszMrAtlpWs2AGslHS7pBOAk4L6SyqomD44yswJ024XyQkmTwCnA1yXdAhAR24HrgEeAbwLvd8+aNnhwlJkVRDFALcVarRYTExP9rkb/TU0lAX5mJuk7Pznpud/NrClJWyKilrfOI14HkQdHmVlBfGWoQeTBUWZWEAf5QeXL85lZAZyu6TX3mjGzHnKQL1NjQHevGTPrMQf5suQFdE8pbGY95iBflryA7l4zZtZjDvJlyQvo9V4zk5OwebN7zZhZ6dy7pizNukG614yZ9ZCDfJkc0M2sz5yuKYK7RZrZgHKQ79bMDLzxje4WaWYDyUG+G7OzsHo13H33gV40jzziFr2ZDQwH+W5MT8P99x94fsQRcPLJbtGb2cBwkO/GokVw2mmwYAG89rXw85/D/v0e6GRmA8NBvhv1bpJPPQX33Zfk5j3QycwGiLtQdivbTdLTA5vZgOn28n9vl7Rd0qykWmb5Ckm/lPRgeruy+6qWqKgukPWA7wBvZgOi23TNNuBtwJ056x6PiJPT2yVdllMezwxpZhXWVZCPiB0R8WhRlemZbMvdM0OaWYWVeeL1BEnflfQtSaubbSRpnaQJSRPTvQiwjS33sTHPDGlmlTXviVdJtwHH5qy6PCJubPKy3cDyiHha0muBr0l6ZUQ807hhRKwH1gPUarXiRxHV53GvnwxtbLnv3esTpmZWWfO25CPi7Ih4Vc6tWYAnIp6LiKfTx1uAx4GXFlftFuXl2/OmAPYJUzOrqFK6UEoaB/ZFxH5JJwInAd8vo6w55eXbFy92y93MRka3XSgvlDQJnAJ8XdIt6arTga2SHgKuBy6JiH3dVbUDza7E5Ja7mY0IxQBNplWr1WJiYqLYnTbm5M3MKkbSloio5a2r/rQG9VZ7hOd8N7ORU/0gDx7wZGYjazSCvAc8mdmIGo0g3+wErJlZxY3GLJT1KYF9AtbMRsxoBHk4eEpgM7MRMRrpGjOzEeUgb2ZWYQ7yZmYV5iBvZlZhDvJmZhXmIG9mVmEDNUGZpGngh13sYgzYW1B1iuR6tcf1at+g1s31ak+n9XpxRIznrRioIN8tSRPNZmLrJ9erPa5X+wa1bq5Xe8qol9M1ZmYV5iBvZlZhVQvy6/tdgSZcr/a4Xu0b1Lq5Xu0pvF6VysmbmdnBqtaSNzOzDAd5M7MKG6ogL+ntkrZLmpVUa1j3QUk7JT0q6c1NXn+CpHslPSbpS5IOK6meX5L0YHp7QtKDTbZ7QtLD6XYFX8E8t7wrJD2Vqdt5TbY7Nz2OOyVd1oN6/Zmk70naKukGSUc32a4nx2u+9y/p8PQz3pl+n1aUVZdMmcskbZK0I/0b+P2cbdZI+lnm8/1Q2fXKlD3nZ6PEX6bHbKuk1/SgTi/LHIsHJT0j6QMN2/TkmEm6StIeSdsyy46RtDGNRxslLWzy2ovTbR6TdHHbhUfE0NyAVwAvAzYDtczylcBDwOHACcDjwIKc118HrE0fXwm8rwd1/gTwoSbrngDGenj8rgD+cJ5tFqTH70TgsPS4riy5XucAh6aPPwZ8rF/Hq5X3D/wecGX6eC3wpR58dkuA16SPXwj8U0691gA39er71M5nA5wHfAMQ8Abg3h7XbwHwY5JBQz0/ZsDpwGuAbZllHwcuSx9flve9B44Bvp/eL0wfL2yn7KFqyUfEjoh4NGfV+cAXI+K5iPgBsBNYld1AkoAzgevTRdcAF5RZ37TMdwBfKLOcgq0CdkbE9yPi/wBfJDm+pYmIWyNiJn16D7C0zPLm0cr7P5/k+wPJ9+ms9LMuTUTsjogH0sfPAjuA48sss2DnA38biXuAoyUt6WH5ZwGPR0Q3I+o7FhF3AvsaFme/R83i0ZuBjRGxLyJ+AmwEzm2n7KEK8nM4Hngy83ySX/0DeBHw00wwydumaKuBqYh4rMn6AG6VtEXSupLrUndp+u/yVU3+PWzlWJbpPSQtvjy9OF6tvP//v036ffoZyferJ9L00KuBe3NWnyLpIUnfkPTKXtWJ+T+bfn+v1tK8sdWvY7Y4InZD8iMO5F18uuvjNnCX/5N0G3BszqrLI+LGZi/LWdbYN7SVbVrWYj0vYu5W/GkRsUvSImCjpO+lv/gdm6tewKeBj5C874+QpJLe07iLnNd23c+2leMl6XJgBvh8k90UfrzyqpqzrNTvUjskHQl8BfhARDzTsPoBknTEP6fnW74GnNSLejH/Z9PPY3YY8NvAB3NW9/OYtaLr4zZwQT4izu7gZZPAsszzpcCuhm32kvyLeGja+srbpmXz1VPSocDbgNfOsY9d6f0eSTeQpAq6ClqtHj9JnwFuylnVyrEsvF7pCaW3AmdFmozM2UfhxytHK++/vs1k+jkfxa/+K144Sc8jCfCfj4ivNq7PBv2IuFnSX0sai4jSJ+Jq4bMp5XvVorcAD0TEVOOKfh4zYErSkojYnaau9uRsM0ly3qBuKck5yZZVJV2zAVib9no4geSX+L7sBmng2AT8TrroYqDZfwZFOBv4XkRM5q2U9AJJL6w/Jjn5uC1v26I05EAvbFLe/cBJSnoiHUbyb+6Gkut1LvBfgd+OiF802aZXx6uV97+B5PsDyffpjmY/TEVJc/6fBXZExF802ebY+rkBSatI/r6fLrNeaVmtfDYbgHelvWzeAPysnqrogab/UffrmKWy36Nm8egW4BxJC9P06jnpstaVfVa5yBtJYJoEngOmgFsy6y4n6RXxKPCWzPKbgePSxyeSBP+dwJeBw0us69XAJQ3LjgNuztTlofS2nSRtUfbx+zvgYWBr+gVb0liv9Pl5JL03Hu9RvXaS5B0fTG9XNtarl8cr7/0DHyb5EQJ4fvr92Zl+n07swTF6I8m/6Vszx+k84JL69wy4ND02D5GcwD617HrN9dk01E3Ap9Jj+jCZ3nEl1+3XSIL2UZllPT9mJD8yu4H/m8aw95Kcx7kdeCy9Pybdtgb8Tea170m/azuBd7dbtqc1MDOrsKqka8zMLIeDvJlZhTnIm5lVmIO8mVmFOcibmVWYg7yZWYU5yJuZVdj/A9pJHOWc/ZqnAAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "The dataset size of ds_train: 100\n", + "dict_keys(['data', 'label'])\n", + "The x label value shape: (16, 1)\n", + "The y label value shape: (16, 1)\n" + ] } ], "source": [ - "x = np.arange(-10, 10, 0.1)\n", - "y = x * (net.weight.set_data([0][0]).asnumpy()) + (net.bias.set_data([0]).asnumpy())\n", - "plt.scatter(x1, y1, color=\"red\", s=5)\n", - "plt.plot(x, y, \"blue\")\n", - "plt.title(\"Eval data and net\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "红色的点:为之前生成的50组验证数据集。\n", + "num_data = 1600\n", + "batch_size = 16\n", + "repeat_size = 1\n", "\n", - "蓝色的线:初始化的模型网络。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 定义损失函数" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们的网络模型表达式为:\n", + "ds_train = create_dataset(num_data, batch_size=batch_size, repeat_size=repeat_size) \n", + "print(\"The dataset size of ds_train:\", ds_train.get_dataset_size())\n", + "dict_datasets = ds_train.create_dict_iterator().get_next()\n", "\n", - "$$h(x)=wx+b\\tag{2}$$" + "print(dict_datasets.keys())\n", + "print(\"The x label value shape:\", dict_datasets[\"data\"].shape)\n", + "print(\"The y label value shape:\", dict_datasets[\"label\"].shape)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "一般地,数学上对线性回归模型采用均方差的方式来判断模型是否拟合得很好,即均方差的值$J(w)$值越小,函数模型便拟合得越好,验证数据代入后,预测得到的y值就越准确。公式2对应m个数据的均方差公式为:" + "通过定义的`create_dataset`将生成的1600个数据增强为了100组shape为16x1的数据集。" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "$$J(w)=\\frac{1}{m}\\sum_{i=1}^m(h(x_i)-y^{(i)})^2\\tag{3}$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "为了方便后续的计算,我们采用0.5倍的均方差的表达式来进行计算,均方差值整体缩小至0.5倍的计算方式对判断模型拟合的好坏没有影响。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$J(w)=\\frac{1}{2m}\\sum_{i=1}^m(h(x_i)-y^{(i)})^2\\tag{4}$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "公式4即为网络训练中的损失函数,其中参数:\n", - "\n", - "- $J(w)$为均方差。\n", + "## 定义训练网络\n", "\n", - "- $m$为样本数据的数量。\n", + "在MindSpore中使用`nn.Dense`生成单个数据输入,单个数据输出的线性函数模型:\n", "\n", - "- $h(x_i)$为第$i$个数据的$x_i$值代入模型网络(公式2)后的预测值。\n", + "$$f(x)=wx+b\\tag{1}$$\n", "\n", - "- $y^{(i)}$为第$i$个数据中的$y$值(label值)。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在MindSpore中定义损失函数的方法如下。" + "并使用Normal算子随机初始化权重$w$和$b$。" ] }, { "cell_type": "code", "execution_count": 6, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.441532Z", + "start_time": "2020-09-14T10:38:40.436780Z" + } + }, "outputs": [], "source": [ - "from mindspore.ops import operations as P\n", - "\n", - "class MyLoss(nn.loss.loss._Loss):\n", - " def __init__(self,reduction='mean'):\n", - " super().__init__(reduction)\n", - " self.square = P.Square()\n", - " def construct(self, data, label):\n", - " x = self.square(data-label) * 0.5\n", - " return self.get_loss(x)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "其中:\n", - "\n", - "- `nn.loss.loss._Loss`:是MindSpore自定义loss算子的一个基类。\n", + "from mindspore.common.initializer import Normal\n", + "from mindspore import nn\n", "\n", - "- `P.Square`:MindSpore训练的框架中的平方算子,算子需要注册过才能在框架的计算图中使用。" + "class LinearNet(nn.Cell):\n", + " def __init__(self):\n", + " super(LinearNet, self).__init__()\n", + " self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))\n", + " \n", + " def construct(self, x):\n", + " x = self.fc(x)\n", + " return x" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 损失函数与网络结合\n", - "\n", - "接下来我们需要将loss函数的表达式和网络net关联在一起,在MindSpore中需要`nn.WithLossCell`,实现方法如下:" + "调用网络查看初始化的模型参数。" ] }, { "cell_type": "code", "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "criterion = MyLoss()\n", - "loss_opeartion = nn.WithLossCell(net, criterion) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.456400Z", + "start_time": "2020-09-14T10:38:40.442544Z" + }, + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Parameter (name=fc.weight, value=Tensor(shape=[1, 1], dtype=Float32,\n", + "[[3.68014202e-002]])), Parameter (name=fc.bias, value=Tensor(shape=[1], dtype=Float32, [3.68014202e-002]))]\n" + ] + } + ], "source": [ - "其中:\n", - "\n", - "- `net`:网络模型。\n", - "\n", - "- `criterion`:即为实例化的loss函数。" + "net = LinearNet()\n", + "model_params = net.trainable_params()\n", + "print(model_params)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "上述从数据代入到计算出loss值的过程为AI训练中的前向传播过程。" + "初始化网络模型后,接下来将初始化的网络函数和训练数据集进行可视化,了解拟合前的模型函数情况。" ] }, { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.607733Z", + "start_time": "2020-09-14T10:38:40.457985Z" + }, + "scrolled": true + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAD8CAYAAAB3u9PLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3dd3hU1dbH8e9KQpGiICCGJqKIgAhoriKoFEUQewevXS/iFaXYQFRAsCEi2FAQCza4gooionSQooJ0UfqLtFClt2T2+8dMhhAmdWYyM8nv8zx5mNOXJ+NZ2fvsYs45RERE4iIdgIiIRAclBBERAZQQRETERwlBREQAJQQREfFRQhARESAECcHMqprZFDNbZmZLzayTb30vM9tgZgt8P22CD1dERMLFgu2HYGaJQKJz7nczKw3MA64HbgX2Ouf6Bx+miIiEW0KwJ3DObQI2+T7vMbNlQOVgzysiIvkr6BLCMSczqw5MB84BugL3ALuBucBjzrmdAY5pD7QHKFmy5Plnn312yOIRESmQ/voL9u6FUqWgVi3mzZu3zTlXIdjThiwhmFkpYBrwgnPuKzOrCGwDHNAHb7XSfVmdIykpyc2dOzck8YiIFEjJyVClCqSkQEICrF+PnXrqPOdcUrCnDkkrIzMrAowGPnPOfQXgnEt2zqU65zzAUOCCUFxLRKRQO+UUaNzYmwwaN/Yuh0jQ7xDMzIBhwDLn3IB06xN97xcAbgCWBHstEZFCzwymTIGtW73JwCxkpw46IQBNgDuBxWa2wLfuaaCdmTXAW2W0FngwBNcSEZG4OKhYMeSnDUUro5+BQClqXLDnFhGR/KOeyiIiAighiIiIjxKCiIgASggiIjFr54GdVB9YPWTnC0UrIxERyUfOOdqNbsfIpSNDel6VEEREYoXHw0fTBhH3fJw/GfRs2jNkp1cJQUQkBvyRvIS679bzLzc8tSFzHphD0fii9KZ3SK6hhCAiEsX2Hd5H7bdr8/fuv/3r1rwZT/WlP0B80ZBeS1VGIiJRquO4jpR6qZQ/GXy9qA6ubwLV6zY5OoaRx0OREP1xrxKCiEiUGfPnGK4feb1/ueO/OvJmmzfB4zl2DCOPB5o3px6cG4rrKiGIiESJtf+s5fRBp/uXq55YlWUPL6Nk0ZLeFRnHMNq6FWbNwgIPH5RrSggiIhF2OPUwjd5vxPzN8/3rljy0hLqn1M36QN9Q2G769JBMbKN3CCIikeDxQHIyvaf2oljfYv5k8OF1H+J6uuyTAfiHwl4Mi0IRkkoIIiL5zeNhyg0NaXHe0ef4bXVv44ubvsByO79BXBxHICUUYSkhiIjko+S9yZz62qlwnne5WApsemAZZU+L/HzyQVcZmVlVM5tiZsvMbKmZdfKtP9nMJpjZCt+/ZYMPV0QkNqV6Umn9aWtvMvCZMxQOvmCUvetBbxVShIXiHUIK8JhzrjbQCHjYzOoA3YBJzrmawCTfsohIofPWr2+R0CeBH1f9CMCAKwbg2m/kwuQEcA5mzfK2GIqwUMyYtgnY5Pu8x8yWAZWB64Bmvt0+BqYCTwV7PRGRiMjYByAH5m2cR9LQJP9y8+rN+enOn0iI8yWCxo29yaBx46MdzSIopO8QzKw60BD4BajoSxY45zaZWeT/a0VE8sLXAcz/8J4yxdsnIBO7Du6i6utV2XN4j3/dxq4bSSydeHQnXwuh3CaZcApZs1MzKwWMBjo753bn4rj2ZjbXzOZujYIik4jIcXwdwEhJybJ6xznHHaP/TZlXyviTwYQ7J+B6umOTQZq0jmZRkAwgRAnBzIrgTQafOee+8q1ONrNE3/ZEYEugY51zQ5xzSc65pAoVKoQiHBGR0PJ1ACMh4fjqHV9/gs8WfUrc83F8tuRzAJ5eWxX3bCqX17g8QkHnXtBVRuZtNDsMWOacG5Bu07fA3cDLvn/HBHstEZGIyKx6x+Phz6supHajuf5d6yXDb0OgmG2Cl7ceO9RElAtFCaEJcCfQwswW+H7a4E0ELc1sBdDStywiEpsyVO/sP7KfGgNPPyYZrGw7m0VLL6WYBShJxIBQtDL6mcwHVros2POLiESbLuO7MPCXgf7lUaPiuOnki+G5C6PuRXFuqKeyiAjkqFnp2OVjueaLa/zLD57/IIOvfBvrsO3ocWYxVU2UnhKCiEg2zUrX7VrHaQNP8y9XLFmRlY+upFTRUr4VsZkAMlJCEBEJ1Ky0YkWOpB7h4g8v5tcNv/p3XdhhIedWTDcfTR46rEUrDX8tIhKgWekL01+gaN+i/mQw9JqhuJ7u+GTQvDlUqQLNmkXFeETBUAlBRCRds9IZB/7i0ueP/q18w9k3MOrWUcRZgL+fMylZxColBBERYOuB7Zzy7tGRSOMsjuTHkylfonzmB6WVLKJoPKJgKCGISMGQx7p8j/Nw7RfX8v2K7/3rZt43k8ZVG2d/cBSORxQMvUMQkdiXx7r8d+e+S/zz8f5k8PKFPXDPeXKWDNJE2XhEwVAJQURiXy7r8udvms95Q87zL19S7RImfwwJfV+BxjOyHc20oFJCEJHYl8O6/N2HdlN9YHV2HtzpX/d3l7+pcqAItK9SYF4O51XhS4EiUvCk1eWvXw9Tpx5XfeOc494x93LSyyf5k8EP//4B19NR5cQqWY9mWoiohCAiBUNaXX4GI5aMoN3odv7lJxo/Qb+W/Y7dqYC9HM4rJQQRKZBWbF/BWW+d5V+uVa4WCzosoHhC8cAHZJJQChMlBBEpUA4cOUD9d+uzYscK/7rlHZdTs1zNCEYVG/QOQUQKjCcnPEmJF0v4k8GIm0bgejolgxxSCUFEYt74leO58rMr/cv3NriXYdcOwwrpu4C8CklCMLMPgKuBLc65c3zregH/AdJmo37aOTcuFNcTEQHYsHsDVV6v4l8++YSTWdNpDScWOzGCUcWuUJUQPgLeAoZnWP+6c65/iK4hIgJAiieFZh81Y+bfM/3rfn9gLg0TqkDR0hGMLLaF5B2Cc246sCMU5xIRyUq/mf0o0qeIPxkMvmow7tlUGt7etcAMQx0p4X6H0NHM7gLmAo8553Zm3MHM2gPtAapVqxbmcEQkVs1cN5OLP7zYv3z1WVczpu0Y77DUyckFahjqSAlnK6PBwBlAA2AT8FqgnZxzQ5xzSc65pAoVKoQxHBGJRdv3byeud9wxyWDL41v4rt13R+coUE/jkAhbCcE5l5z22cyGAmPDdS0RKXg8zsNN/7uJb/78xr9u2j3TuPS0S4/fWT2NQyJsCcHMEp1zm3yLNwBLwnUtESlYhs4bSvux7f3LL7R4gacveTrrg9TTOGihanb6BdAMKG9m64GeQDMzawA4YC3wYCiuJSIF16JNC6g/pKF/+cLKFzLj3hkUiS8SwagKj5AkBOdcuwCrh4Xi3CJS8O05tIcz3zyTLfu2+Nf93+2/Uu3MJFX/5CMNXSEiOefxeFv0OBeS45xztP+uPSe+fKI/GXz3ObheUK1OYzUhzWdKCCKSM3mcpjKz40b9MYq45+MY+vtQADpf2Ak3+VKuXhXvLRWkb0Iq+UJjGYlIzuRymsrMjlu18jfO/KKRf/MZZc9g8UOLOaHICXCFB7Zsgdtuy3b2Mwk9lRBEJGfy2tbfd9yhYvGc07noMclg2cPLWPnoSm8yAG9LoVNPzXL2MwkflRBEJGfy2tbfjB7PNuHFmdOB/QB8csMn3HHuHZkfoyakEaGEICI5l8sH9YRVE7ji0yv8y3ecewfDrx+uYamjlBKCiITcxj0bqTygsn+5dNHSrOuyjjLFy0QwKsmOEoKIhEyKJ4WWn7Rk6tqp/nW//ec3kiolRS4oyTG9VBaRkBgwewBF+hTxJ4M3Wr+B6+mUDGKISggi4u0bkMeB4X5Z/wuNhh1tOdTqjFZ8f/v3xMfFhzpKCTMlBJHCLq3jWFq7/ylTvC+Ps7HjwA4SX0vkcOph/7rNj22mYim1DopVqjISKewCdTjLgnOOW7+8lXL9yvmTweS7JuN6OiWDGKeEIFLY5aLD2UcLPiLu+Ti+/ONLAHo17YXr6Wh+evP8ilbCSFVGIoVdDjqcLd2ylHMGn+NfPj/xfGbdP4ui8UXzM1IJMyUEEcm0w9m+w/uo9VYtNuzZ4F+3ptMaqpepno/BSX4JSZWRmX1gZlvMbEm6dSeb2QQzW+H7t2woriUi+aPjuI6UeqmUPxl8fdvXuJ5OyaAAC9U7hI+A1hnWdQMmOedqApN8yyIS5b758xust/H2b28D0PFfHXE9HdeffX2EI5NwC9WMadPNrHqG1dfhnVYT4GNgKvBUKK4nIqG3ZucaarxRw79c9cSqLHt4GSWLloxgVJKfwvkOoaJzbhOAc26TmQVsumBm7YH2ANWqVQtjOCISyOHUwzR6vxHzN8/3r1vy0BLqnlI38AGZdWILonObRIeINzt1zg1xziU555IqVKgQ6XBEYl8uprnsOaUnxfoW8yeDj677CNfTZZ0MAs2altfZ1CSqhLOEkGxmib7SQSKwJdsjRCQ4Oex1PGXNFFoMb+Ffvq3ubXxx0xfZD0ud2axpeZ1NTaJKOEsI3wJ3+z7fDYwJ47VEBAI/mNOVGDbv3Yz1Nn8yKJ5QnB1P7mDEzSNyNkdBZp3Y8jqbmkSVkJQQzOwLvC+Qy5vZeqAn8DLwPzO7H1gH3BKKa4lIFtIezGklhPLloXlzUmfP5MqHSjPh5H/8u865fw4XVrkwd+fPrBNbXmdTk6gSqlZG7TLZdFkozi8iOZTxwbxlC28e/plHe3gAbzIYcMUAulzUJe/XyGzWNE17GfPUU1mkoPE9mOdunMu/hv7L30OoxY4y/DRgK/Hx+t9eAtM3Q6SA+efgP1QZUIV9R/b51228cyGJp9dTVY5kSQlBpCDweHBbtvDvmV35YskX/tUT7pzA5TUuj2BgEkuUEERincfDJ+3qcFedv/yrelzSg74t+kYwKIlFSggiMWzZ1mXUeacO1PEu10uG3575P4pVUq9/yT0lBJEYtP/Ifs555xzW/LPGv27l2/GcUbsJJFaNYGQSyyI+dIWI5E7n8Z0p+WJJfzIYdcso3LOpnLF4A0ydqhfHkmcqIYjEiLHLx3LNF9f4lx88/0EGXzX4aA9j9QGQICkhiES5dbvWcdrA0/zLFUtWZOWjKylVtFQEo5KCSAlBJEodST1Ckw+a8NvG3/zrFt0yhXq1m6paSMJC7xBEolDf6X0p2reoPxkMvfo93ORLqVe/pYaXlrBRCUEkikxfM5Wmw5v7l2+sfSNf3vIlcVu2wqyHNby0hJUSgkgU2LJvCxX7H33Ax3sg+YktlCvlmzQq4yimGl5awkAJQSSCPM7D1Z9fzQ8rf/CvmzkMGm9KgP96IO29sYaXlnyghCASIYN/G8x/x/3Xv/zKpDieXFQa9u6FJgFKARpeWsIs7AnBzNYCe4BUIMU5lxTua4pEs/mb5nPekPP8y5ckNmJyx99IOJIKCftgwQKoW1elAMl3+VVCaO6c25ZP1xKJSrsO7uK0gaex69Au/7r1XdZTuXQl+KzZ0fcDSgYSIaoyEgkz5xz3jrmXjxd+7F83/t/jaXVmq6M76f2ARIH8SAgO+MnMHPCec25I+o1m1h5oD1CtmkZolIJlxJIRtBt9dIbZJxs/ySstXzl+R70fkCiQHwmhiXNuo5mdAkwwsz+dc9PTNvoSxBCApKQklw/xiITd8u3LqfVWLf9yrXK1WNBhAcUTikcwKpGshT0hOOc2+v7dYmZfAxcA07M+SiQ2HThygPrv1mfFjhX+dcs7LqdmuZoRjEokZ8I6dIWZlTSz0mmfgSuAJeG8pkiueTyQnAzO5Wx9Jp746QlKvFjCnwxG3DQC19MpGUjMCPdYRhWBn81sIfAr8L1zbnyYrymScx4PNG8OVaocO0ZQZusD+GHFD1hvo//s/gDc1+A+PM95uO2c28Ifv0gIhbXKyDm3GqgfzmuIBGXrVm9zz4xjBCUnw8yZkJqa6dhB63evp+rrR2cnK3dCOVZ3Ws2JxU7M7/8KkZDQaKdSuKWNEZSQcHSMII8H2rY9Wiq46KJjeg0fST1C42GNj0kG8x+cz7YntykZSExTPwQp3AKNEbRli7dU4Jw3Ubz9tn/3V35+hW6TuvmXB181mA5JHSIRuUjIKSGIZOwDkH5k0ZIloWFD1rU4n9Oa/Orf5ZqzruGbtt8QZypkS8GhhCCSUVqp4Y8/2J9Un36XeOh3gTcZxFkcmx/bTIWSFSIcpEjoKSGIpOfxwNatuAoVGOkW82SXIvxd/BC3JVfglRd+5bSy1SMdoUjYKCGIpPE1NZ23eiadbi7JzDK7aVi9IZ9d2JNL6l+rMYakwFNCEPHZ/H9L6XHyDD5s7qiwbzfvNx3APZc+SnxcfKRDE8kXeiMmhd6hlEO8OvNVzhrRhE/OhcfmGMvnNeb+pp2VDKRQUQlBCi3nHGOXj6XrT11ZuWMlV591Na9d/ipn/beshqGWQkkJQQqlpVuW0uXHLkxYPYHa5WsfPz+BSCGkhCCFyo4DO+g1tRfv/PYOpYuVZlDrQTyU9BBF4otEOjSRiFNCkEIhxZPCe3Pf47mpz/HPwX948PwHeb7585QvUT7SoYlEDSUEKfAmrZ5Ep/GdWLp1Kc2rN2dQ60HUq1gv0mGJRB0lBCmwVu1YxeMTHuebP7/h9DKn89WtX3H92ddjZv4OaHp5LHKUmp1KgbPn0B66TexGnXfqMGHVBF667CX+ePgPbqh9w9FkkMO5DkQKk7CXEMysNTAIiAfed869HO5rSuHkcR6GLxxO90nd2bx3M3fXv5sXL3uRSqUrHbtjZnMgSKHhnPfXf+SI9+fwYe9P+uVo3hYuYU0IZhYPvA20BNYDv5nZt865P8J5XSl8Zv09i07jOzF341wa7SrNmNHxXFB9DVx76vE7px/NNG0OhAhxLroeNLnZNyUlYrdNwiTcJYQLgJW+mdMwsxHAdYASQpRxzltzkprq/Un/OeNyVttys2/Q10jxsGPPCiac8DyL7XNKuUpctWcYZw/czggXz2d/F8Hzn4OkFiuR4TxGapWpeK45SGqR4qTeYjm6/syZkf4tSSglJECRIt6fokWP/ZxxOa/bQnWejJ/jMlT2h+o1WLgTQmXg73TL64EL0+9gZu2B9gDVqlXDuQg/ZArpNXI4j3z0SDgAjfvDxS/7prnswYHZ3ZjoKckUO0icSyE+DuK/OYG4OIiPP/rjXTbi4zPbdvzntP8RjxwJw39KQv48SEL9IEtIOP7BJLEt3AkhUN465tHjnBsCDAEwS3Kx/gWLiyPLh0xWD53stqU9mII5ZzDXj4ZrxMU5vl01ih6Tu/D3vg3ctBRenRTP6X88AhVLeX8JnmKwdbdaEInkUrgTwnqgarrlKsDGzHZOTIT27WP3QRYXp+dPOM3fNJ9O4zsxY90M6leszyczytF03B/HvwfIOAOaiORIuBPCb0BNMzsd2AC0BW7PbOdKlaBXrzBHJDFny74tPDP5Gd7//X3KlSjHe1e/x/0N7ye+vakvgUgIhTUhOOdSzKwj8CPeZqcfOOeWhvOaEiNy0DHscOph3vr1LXpP683+I/vp3KgzzzV9jjLFyxzdSSUBkZAJez8E59w4YFy4ryMxJK1jWFqzzylTjnk76Zxj3IpxdP2pK8u3L6dNzTYMuGIAtcrXimDQIgWfhq6Q/JdFx7BlW5fR9aeujF85nlrlavH97d/TpmabCAcsUjjEeJseiUlpHcMSEvwvhHce2Enn8Z05991zmf33bAZcMYBFDy3KWTLweCA5OQbbzopEF5UQJP+ZeauJtm4ltXw5hs57j2cmP8OOAztof357+jTvQ4WSFXJ2rmyqn0Qk55QQJDLi4piy/w86D+3MouRFND2tKQNbD6TBqQ1ydx6NSyQSMvpTSvLdmp1ruOl/N9FieAt2HdzFqFtGMeXuKblPBhCw+klE8kYlBMk3ew/v5aUZL/Ha7NeIj4unb/O+dL2oKycUOSHvJ01X/aT+CCLBUUKQsPM4D58u+pRuE7uxae8m7jz3Tl667CUqn1g5NBdQz2SRkFBCkLD6Zf0vPDr+UX7d8CsXVL6Ar277ikZVGkU6LBEJQAlBwmLD7g10n9SdTxZ9QmKpRD6+/mPuOPcO4kyvrUSilRKC5FwOhps4mHKQAbMH8OKMFzniOUL3Jt3oXusBSlepofp9kSinP9fkqKw6eGUzD7FzjtF/jKb227XpMbkHrc5sxbKHlvJin1mUrnG25i4WiQFKCOKV3cTzgdr7+yzcvJAWw1tw85c3U7poaSbdNYnRt46mRkrpTI8RkeijhCBeWTzwgYDt/bfu20qHsR04b8h5LE5ezDtt3uH3B3+nxektMj1GRKKX3iGIV3YTz6dr73+kXFne/mUQvab2Yu/hvTxywSP0bNqTsieUzfQY9REQiX5KCOKVk4d3XBw/7P6drqO68ue2P2l1RisGtBpAnQp1Mj+v+giIxIywVRmZWS8z22BmC3w/GsM42qU9vAMkg7+2/cVVn19Fm8/bkOpJZWy7sfzw7x+yTgYiElPCXUJ43TnXP8zXkDD65+A/9JnWhzd+fYMSRUrQv2V/HrnwEYrGF410aCISYqoykoBSPakMmz+MZyY/w7b923jgvAfo26Ivp5TUi2GRgircCaGjmd0FzAUec87tzLiDmbUH2gNUq1YtzOFITkxbO41O4zuxMHkhl1S7hEGtB9EwsWGkwxKRMDMXxCxTZjYRODXAph7AHGAb4IA+QKJz7r6szpeUlOTmzp2b53gkOGv/WcuTE57kyz++pNpJ1Xi15avcUucWTK2DRKKamc1zziUFe56gSgjOuctzsp+ZDQXGBnMtCaEMQ1DsO7yPl39+mf6z+2MYvZv15vHGj1OiSIlIRyoi+ShsVUZmluic2+RbvAFYEq5rSS6km3LSNb6Iz9/4D09N6s6GPRu4vd7tvHzZy1Q9qWqkoxSRCAjnO4R+ZtYAb5XRWuDBMF5LcsrXI/m3U1LoVHMGs7+ZwfmJ5zPy5pE0qdYk0tGJSASFLSE45+4M17kl7zadkEr3+8vxcWIyFQ8V4YNr3+XuBvdoWGoRUbPTwuJgykEGzhnICzNe4HCVwzxVryNPt+rLicVPOnbHHAxxLSIFkxJCAeecY8xfY3jsp8dYvXM119W6jv5X9OfMk888fud07xdo3Ng7lEWcSg4ihYUSQgG2OHkxnX/szOQ1k6lboS4T7pzA5TWyaBgWaMRTjUMkUmjoz78CaPv+7Tz8/cM0eK8B8zfN560r32JBhwVZJwPQcNUihZxKCAXIkdQjDJ47mF5Te7H70G7+m/RfejXrRbkS5XJ2Ag1XLVKoKSEUED+t+onO4zuzbNsyLq9xOQNbDaTuKXVzfyINVy1SaCkhxLgV21fw2E+P8d3y7zij7BmMaTuGa866RsNNiEiuKSHEqF0Hd9F3el8G/TKI4gnF6Xd5Px698FGKJRSLdGgiEqOUEGJMqieVjxZ8xNOTn2brvq3c2+BeXrjsBU4tFWiMQRGRnFNCiCE/r/uZTuM78fum32lctTHf3/49SZWCHuBQRARQQogJ63at48kJTzJy6UiqnFiFz2/8nLbntNV7AhEJKSWEKLb/yH76zexHv5n9cDh6Nu3JE42foGTRkpEOTUQKICWEKOScY+TSkTwx4QnW717PbXVvo1/LflQ7Kd2MchpzSERCTD2Vo8y8jfO45MNLaDe6HRVKVGD6PdMZcfOI45NB8+ZQpQo0a+ZdFhEJkkoIUWLz3s30mNSDDxd8SIWSFXj/mve5p8E9xMfFH7+zxhwSkTAIqoRgZreY2VIz85hZUoZt3c1spZn9ZWatgguz4DqUcoh+M/tx1ptn8cmiT3jsosdY3nE59593f+BkABpzSETCItgSwhLgRuC99CvNrA7QFqgLVAImmtlZzrnUIK8X29LV+zvgu+Xf0fXHrqzauYprzrqG1654jZrlamZ/Ho05JCJhEFRCcM4tAwI1f7wOGOGcOwSsMbOVwAXA7GCuF9PSzTWwtGV9urQry4TVE6ldvjbj/z2eVmfmshClMYdEJMTC9Q6hMjAn3fJ637rjmFl7oD1AtWrVAu0SeaFo0bN1Kzt+n0nPlqkM/tc8Sq8/iTdav0GHpA4UiS8S2nhFRPIg24RgZhOBQOMi9HDOjcnssADrXKAdnXNDgCEASUlJAfeJqBDMIpbiSeG9//uS5zoZ/8RDh42J9B64kPIlK4QpaBGR3Ms2ITjnsplVJaD1QNV0y1WAjXk4T+QF2aJn4uqJdB7fmaVbl9KiZgsG/utZ6tVuqnp/EYk64eqH8C3Q1syKmdnpQE3g1zBdK7zy2KJn1Y5VXD/ielp+0pL9R/bz9W1fM/GuidSr00zJQESiUlDvEMzsBuBNoALwvZktcM61cs4tNbP/AX8AKcDDMdvCKJctevYc2sMLM17g9TmvUySuCC9d9hKdG3WmeELxfApYRCRvzLnoqbZPSkpyc+fOjXQYeeJxHoYvHE73Sd3ZvHczd9e/mxcve5FKpStFOjQRKeDMbJ5zLuihj9VTOQRm/T2LTuM7MXfjXBpVacSYtmO4oPIFkQ5LRCRXlBCyk0WT0/W71/PUxKf4fPHnVCpdiU9v+JTb692uYalFJCYpIWQlkyan+4/sp/+s/rwy8xU8zsMzlzzDUxc/RamipSIdsYhInikhZCVDk1O3ZQtfbp/OExOeYN2uddxS5xb6texH9TLVIx2piEjQlBCyktbkdNYs5l9xLp1+uJUZ62ZQv2J9hl8/nKbVm0Y6QhGRkFFCyCjDO4MtY0fSY9zjDPvzc8ptK8d7V7/H/Q2zGIlURCRGKSGkl+6dweEmjXjzhet4fkYf9h/ZT5dGXXi26bOUKV4m0lGKiISFEkJ6W7fiZs3k+xqpdD33Z1ZM/Jk2Ndsw4IoB1CpfK9LRiYiEVewnhBDOLbzMttPloRP5sdxOau07gXHtRnHlWW1CFKiISHSL7TmVQzS38M4DO+k8vjP13j2XOZU8vN74eRa/+I+SgYgUKrFdQghyJNIUTwpD5w3l2SnPsvPgTv5z3n/o07wPFTQstYgUQrGdENI1C83t3MKT10ym8/jOLN6ymKanNWVQ60HUP7V+GIMVEYlusZ0Q8ri8PoQAAAo/SURBVDC38Oqdq3liwhN8tewrqpepzqhbRnFj7Rs13ISIFHqxnRAgx3ML7zm0h5d+fokBswcQHxdP3+Z96XpRV04ockI+BCkiEv1iPyFkw+M8fLroU7pN7MamvZu489w7eemyl6h8YsApnkVECq2gWhmZ2S1mttTMPGaWlG59dTM7YGYLfD/vBh9q7s1ZP4eLhl3E3d/cTdWTqjL7/tkMv2G4koGISADBlhCWADcC7wXYtso51yDI8+eOr0/ChuJH6Da5O58u+pTEUol8fP3H3HHuHcRZ3HH7hqL/gohIQRBUQnDOLQOi44Wsx8OByy5lgJvNi5dCatEiPH3x03S/pPvxw1JnMqy1iEhhFs53CKeb2XxgN/CMc25GuC7knGP0nA94osFM1paBG5cZr/aZTo2amcxaFmT/BRGRgijbhGBmE4FTA2zq4Zwbk8lhm4BqzrntZnY+8I2Z1XXO7Q5w/vZAe4Bq1arlPHKfhZsX0ml8J6b93zTqJZRk8icHaF75YjjzX5kfFET/BRGRgirbhOCcuzy3J3XOHQIO+T7PM7NVwFnA3AD7DgGGACQlJbmcXmPrvq08O+VZhv4+lLLFyzL4qsE80OA+Eh7fmf17gTz0XxARKejCUmVkZhWAHc65VDOrAdQEVofi3IdTD/P2r2/Te1pv9h3ZxyMXPELPpj0pe0JZ7w45rfrJYf8FEZHCIqiEYGY3AG8CFYDvzWyBc64VcCnwvJmlAKlAB+fcjmCD/WHFD3T5sQt/bf+LVme04vVWr1O7Qu1gTysiIgTfyuhr4OsA60cDo4M5d3p/bfuLrj91ZdyKcdQ8uSZj242lTc020dG6SUSkgIjqnsr/HPyH56c9z5u/vkmJIiXo37I/j1z4CEXji0Y6NBGRAicqE0KqJ5Vh84fRY3IPtu/fzgPnPUDfFn05paRaA4mIhEvUJYRpa6fRaXwnFiYv5JJqlzCo9SAaJjb0blTvYhGRsImq7rmrd66m2cfN2HlwJyNvHsm0e6YdmwxCMDuaiIgEZs7luOl/2MVVjnO9P+/N440fP35Y6uRkbzJISYGEBFi/Xs1GRUQAM5vnnEvKfs+sRVUJ4ZxTzuHZps8GnqMgrXdxQoJ6F4uIhEFUvUPIsvWQeheLiIRVVCWEbKl3sYhI2ERVlZGIiESOEoKIiABKCCIi4qOEICIigBKCiIj4KCGIiAighCAiIj5KCCIiAgSZEMzsVTP708wWmdnXZlYm3bbuZrbSzP4ys1bBhyoiIuEUbAlhAnCOc+5cYDnQHcDM6gBtgbpAa+AdM4sP8loiIhJGQSUE59xPzrkU3+IcoIrv83XACOfcIefcGmAlcEEw1xIRkfAK5VhG9wEjfZ8r400Qadb71h3HzNoD7X2Lh8xsSQhjCpfywLZIB5EDijO0FGfoxEKMEDtx1grFSbJNCGY2ETg1wKYezrkxvn16ACnAZ2mHBdg/4MQLzrkhwBDfeeaGYkzvcFOcoaU4QysW4oyFGCG24gzFebJNCM65y7MJ5G7gauAyd3S2nfVA1XS7VQE25jVIEREJv2BbGbUGngKudc7tT7fpW6CtmRUzs9OBmsCvwVxLRETCK9h3CG8BxYAJ5p2wZo5zroNzbqmZ/Q/4A29V0sPOudQcnG9IkPHkF8UZWooztGIhzliIEQpZnFE1p7KIiESOeiqLiAighCAiIj75nhDM7BYzW2pmHjNLyrAt2+EuzOx0M/vFzFaY2UgzK5oPMY80swW+n7VmtiCT/daa2WLffiFpBpbLOHuZ2YZ0sbbJZL/Wvnu80sy6RSDOTIc8ybBfvt/P7O6Nr6HESN/2X8ysen7ElSGGqmY2xcyW+f5f6hRgn2Zmtivdd+G5/I7TF0eWv0PzesN3PxeZ2XkRiLFWuvu0wMx2m1nnDPtE5H6a2QdmtiV9/ywzO9nMJviegRPMrGwmx97t22eFrzVo9pxz+foD1MbbiWIqkJRufR1gId6X1KcDq4D4AMf/D2jr+/wu8FA+x/8a8Fwm29YC5fP7nqa7fi/g8Wz2iffd2xpAUd89r5PPcV4BJPg+vwK8Eg33Myf3Bvgv8K7vc1tgZAR+z4nAeb7PpfEOG5MxzmbA2PyOLbe/Q6AN8APevkuNgF8iHG88sBk4LRruJ3ApcB6wJN26fkA33+dugf7/AU4GVvv+Lev7XDa76+V7CcE5t8w591eATdkOd2HepkwtgFG+VR8D14cz3gDXvxX4Ir+uGQYXACudc6udc4eBEXjvfb5xmQ95Emk5uTfX4f3egfd7eJnve5FvnHObnHO/+z7vAZaRyUgAMeA6YLjzmgOUMbPECMZzGbDKOfd/EYzBzzk3HdiRYXX672Bmz8BWwATn3A7n3E684861zu560fQOoTLwd7rlQMNdlAP+SfcwyXRIjDC5BEh2zq3IZLsDfjKzeb4hOSKho6/o/UEmRcmc3Of8dB/evxADye/7mZN749/H9z3chfd7GRG+KquGwC8BNl9kZgvN7Aczq5uvgR2V3e8w2r6Pbcn8D75ouJ8AFZ1zm8D7xwFwSoB98nRfQzmWkZ/lYLiLQIcFWJexTWyOh8TIrRzG3I6sSwdNnHMbzewUvH0z/vRl+JDJKk5gMNAH7z3pg7d6676MpwhwbMjbHufkftrxQ55kFPb7mUFEv4O5ZWalgNFAZ+fc7gybf8db7bHX9y7pG7wdRPNbdr/DaLqfRYFr8Y3anEG03M+cytN9DUtCcNkMd5GJnAx3sQ1vkTLB99dZyIbEyC5mM0sAbgTOz+IcG33/bjGzr/FWQYT0AZbTe2tmQ4GxATbly7AiObifgYY8yXiOsN/PDHJyb9L2We/7TpzE8UX6sDOzIniTwWfOua8ybk+fIJxz48zsHTMr75zL14HacvA7jKZhbq4EfnfOJWfcEC330yfZzBKdc5t81WtbAuyzHu97jzRV8L63zVI0VRllO9yF78ExBbjZt+puILMSR6hdDvzpnFsfaKOZlTSz0mmf8b44zdeRWzPUvd6QyfV/A2qat7VWUbxF5G/zI740lvmQJ+n3icT9zMm9+Rbv9w6838PJmSW0cPG9sxgGLHPODchkn1PT3m2Y2QV4/1/fnn9R5vh3+C1wl6+1USNgV1p1SARkWgMQDfcznfTfwcyegT8CV5hZWV/V8RW+dVmLwFvzG/Bmr0NAMvBjum098Lby+Au4Mt36cUAl3+caeBPFSuBLoFg+xf0R0CHDukrAuHRxLfT9LMVbNZLf9/YTYDGwyPelScwYp2+5Dd6WKasiFOdKvPWbC3w/72aMM1L3M9C9AZ7Hm7wAivu+dyt938MaEbh/F+Mt/i9Kdw/bAB3SvqNAR999W4j3xX3jCMQZ8HeYIU4D3vbd78Wka3mYz7GWwPuAPynduojfT7wJahNwxPfcvB/vO6tJwArfvyf79k0C3k937H2+7+lK4N6cXE9DV4iICBBdVUYiIhJBSggiIgIoIYiIiI8SgoiIAEoIIiLio4QgIiKAEoKIiPj8P/7eCZh1Y1QaAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], "source": [ - "## 定义反向传播网络" + "from mindspore import Tensor\n", + "\n", + "x_model_label = np.array([-10, 10, 0.1])\n", + "y_model_label = (x_model_label * Tensor(model_params[0]).asnumpy()[0][0] + \n", + " Tensor(model_params[1]).asnumpy()[0])\n", + "\n", + "plt.axis([-10, 10, -20, 25])\n", + "plt.scatter(x_eval_label, y_eval_label, color=\"red\", s=5)\n", + "plt.plot(x_model_label, y_model_label, color=\"blue\")\n", + "plt.plot(x_target_label, y_target_label, color=\"green\")\n", + "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "有了损失函数后,我们如何使得损失函数最小呢?我们可以将公式1代入到损失函数公式4中展开:\n", - "\n", - "$$J(w,b)=\\frac{1}{2m}\\sum_{i=1}^m(wx_i+b-y^{(i)})^2\\tag{5}$$" + "从上图中可以看出,蓝色线条的初始化模型函数与绿色线条的目标函数还是有较大的差别的。" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "公式5可以将$J(w)$看作为凹函数,对权重值$w$微分可求得:\n", - "\n", - "$$\\frac{\\partial{J(w)}}{\\partial{w}}=\\frac{1}{m}\\sum_{i=1}^mx_i(wx_i+b-y^{(i)})\\tag{6}$$\n" + "## 定义前向传播网络与反向传播网络并关联" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "由凹函数的特性可以知道,当公式6等于0时,损失函数有最小值:\n", + "接下来需要定义模型的损失函数,这里采用均方差的方法用于判断拟合的效果如何,即均方差值越小,拟合的效果越好,其损失损失函数公式为:\n", "\n", - "$$\\sum_{i=1}^mx_i(wx_i+b-y^{(i)})=0\\tag{7}$$ \n", + "$$J(w)=\\frac{1}{2m}\\sum_{i=1}^m(h(x_i)-y^{(i)})^2\\tag{2}$$\n", "\n", - "假设有一个$w_{min}$使得公式7成立。我们如何将初始的权重$w_{s}$逐步的变成$w_{min}$,在这里采取迭代法,也就是梯度下降方法\n", + "假设训练数据第$i$个数据为$(x_i,y^{(i)})$,公式2中的参数解释如下:\n", "\n", - "当权重$w_{s}w_{min}$,权重值需要左移即权重值变小接近$w_{min}$,才能使得损失函数逐步的变小,由凹函数的性质可知,在$w_{s}$处的导数为正(损失函数在$w_{min}$右边单调上升),公式8的值为正。其权重的更新公式为:\n", - "\n", - "$$w_{ud}=w_{s}-\\alpha\\frac{\\partial{J(w_{s})}}{\\partial{w}}\\tag{10}$$\n" + "net = LinearNet()\n", + "net_loss = nn.loss.MSELoss()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "当$w_{s}=w_{min}$时,到$\\frac{\\partial{J(w_{s})}}{\\partial{w}}$=0,即梯度消失,其表达式也可写为公式9的样式。\n", + "### 定义反向传播网络\n", "\n", - "在考虑了全区间的情况后,可以得出权重$w$的更新公式即为:\n", + "反向传播网络的目标是不断变换权重值,使得loss值取得最小值,一般的在线性网络中采用权重更新公式:\n", "\n", - "$$w_{ud}=w_{s}-\\alpha\\frac{\\partial{J(w_{s})}}{\\partial{w}}\\tag{11}$$\n", + "$$w_{t}=w_{t-1}-\\alpha\\frac{\\partial{J(w_{t-1})}}{\\partial{w}}\\tag{3}$$\n", "\n", - "当权重$w$在更新的过程中假如临近$w_{min}$在增加或者减少一个$\\Delta{w}$,从左边或者右边越过了$w_{min}$,公式11都会使权重往反的方向移动,那么最终$w_{s}$的值会在$w_{min}$附近来回迭代,在实际训练中我们也是这样采用迭代的方式取得最优权重$w$,使得损失函数无限逼近局部最小值。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "同理:对于公式5中的另一个权重$b$容易得出其更新公式为:\n", + "公式3参数解释:\n", "\n", - "$$b_{ud}=b_{s}-\\alpha\\frac{\\partial{J(b_{s})}}{\\partial{b}}\\tag{12}$$\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "当所有的权重更新完成后,将新的权重赋值给初始权重:即$w_{s}$=$w_{ud}$,$b_{s}$=$b_{ud}$。将新的初始权重传递回到模型函数中,这样就完成了反向传播的过程。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> 当遇到多项式的回归模型时,上述梯度方法也适用,由于权重数量的增加,需要将权重的名称更新为$w_0,w_1,w_2,...,w_n$,引入矩阵的表达方式,公式将会更加简洁,这里就不多介绍了。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 实现梯度函数" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在MindSpore中的所有要编入计算图的类都需要继承`nn.Cell`算子,MindSpore的梯度计算函数采用如下方式。" + "- $w_{t}$为迭代后的权重值。\n", + "- $w_{t-1}$为迭代前的权重值。\n", + "- $\\alpha$为学习率。\n", + "- $\\frac{\\partial{J(w_{t-1}\\ )}}{\\partial{w}}$为损失函数对权重$w_{t-1}$的微分。\n", + "\n", + "函数中所有的权重值更新完成后,将值传入到模型函数中,这个过程就是反向传播过程,实现此过程需要使用MindSpore中的优化器函数,如下:" ] }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, + "execution_count": 10, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.629217Z", + "start_time": "2020-09-14T10:38:40.616392Z" + } + }, "outputs": [], "source": [ - "from mindspore.ops import composite as C\n", - "\n", - "class GradWrap(nn.Cell):\n", - " \"\"\" GradWrap definition \"\"\"\n", - " def __init__(self, network):\n", - " super().__init__(auto_prefix=False)\n", - " self.network = network\n", - " self.weights = ms.ParameterTuple(filter(lambda x: x.requires_grad,\n", - " network.get_parameters()))\n", - "\n", - " def construct(self, data, label):\n", - " weights = self.weights\n", - " return C.GradOperation(get_by_list=True) \\\n", - " (self.network, weights)(data, label)\n" + "opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "上述代码中`GradWrap`实现的是对各个权重的微分$\\frac{\\partial{J(w)}}{\\partial{w}}$,其展开式子参考公式6。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 反向传播更新权重" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`nn.RMSProp`为完成权重更新的函数,更新方式大致为公式11,但是考虑的因素更多,具体信息请参考[官网说明](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html?highlight=rmsprop#mindspore.nn.RMSProp)。" + "### 关联前向和反向传播网络\n", + "\n", + "定义完成前向传播和反向传播后,在MindSpore中需要调用`Model`函数,将前面定义的网络,损失函数,优化器函数关联起来,使之变成完整的计算网络。" ] }, { "cell_type": "code", - "execution_count": 9, - "metadata": {}, + "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.645718Z", + "start_time": "2020-09-14T10:38:40.630789Z" + } + }, "outputs": [], "source": [ - "train_network = GradWrap(loss_opeartion) \n", - "train_network.set_train()\n", - "optim = nn.RMSProp(params=net.trainable_params(),learning_rate=0.02)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过以上操作,我们就完成了前向传播网络和反向传播网络的定义,接下来可以加载训练数据进行线性拟合了。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 定义模型拟合过程可视化函数" + "from mindspore.train import Model\n", + "\n", + "model = Model(net, net_loss, opt)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "定义一个可视化函数`plot_model_and_datasets`,将模型函数和验证数据集打印出来,观察其变化。" + "## 拟合过程可视化准备\n", + "\n", + "### 定义绘图函数\n", + "\n", + "为了使得整个训练过程更容易理解,需要将训练过程的测试数据、目标函数和模型网络进行可视化,这里定义了可视化函数,将在每个step训练结束后调用,展示模型网络的拟合过程。" ] }, { "cell_type": "code", - "execution_count": 10, - "metadata": {}, + "execution_count": 12, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.680586Z", + "start_time": "2020-09-14T10:38:40.646738Z" + } + }, "outputs": [], "source": [ - "import time \n", + "import matplotlib.pyplot as plt\n", + "import time\n", "\n", - "def plot_model_and_datasets(weight, bias, data_x, data_y):\n", + "def plot_model_and_datasets(net, eval_data):\n", + " weight = net.trainable_params()[0]\n", + " bias = net.trainable_params()[1]\n", " x = np.arange(-10, 10, 0.1)\n", - " y = x * ((weight[0][0]).asnumpy()) + ((bias[0]).asnumpy())\n", - " plt.scatter(x1,y1,color=\"red\",s=5)\n", - " plt.scatter(data_x.asnumpy(), data_y.asnumpy(), color=\"black\", s=5)\n", - " plt.plot(x, y, \"blue\")\n", + " y = x * Tensor(weight).asnumpy()[0][0] + Tensor(bias).asnumpy()[0]\n", + " x1, y1 = zip(*eval_data)\n", + " x_target = x\n", + " y_target = x_target * 2 + 3\n", + " \n", " plt.axis([-11, 11, -20, 25])\n", + " plt.scatter(x1, y1, color=\"red\", s=5)\n", + " plt.plot(x, y, color=\"blue\")\n", + " plt.plot(x_target, y_target, color=\"green\")\n", " plt.show()\n", - " time.sleep(0.02)" + " time.sleep(0.2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "上述函数的参数:\n", - "\n", - "- `weight`:模型函数的权重,即$w$。\n", + "### 定义回调函数\n", "\n", - "- `bias`:模型函数的权重,即$b$。\n", + "MindSpore提供的工具,可对模型训练过程进行自定义控制,这里在`step_end`中调用可视化函数,展示拟合过程。更多的使用可参考[官网说明](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/customized_debugging_information.html#callback)\n", "\n", - "- `data_x`:训练数据的x值。\n", - "\n", - "- `data_y`:训练数据的y值。" + "- `display.clear_output`:清除打印内容,实现动态拟合效果。" ] }, { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> 可视化过程中,红色的点是验证数据集,黑色的点是单个batch的训练数据,蓝色的线条是正在训练的回归模型。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "code", + "execution_count": 13, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T10:38:40.706063Z", + "start_time": "2020-09-14T10:38:40.681635Z" + } + }, + "outputs": [], "source": [ - "## 执行训练" + "from IPython import display\n", + "from mindspore.train.callback import Callback\n", + "\n", + "class ImageShowCallback(Callback):\n", + " def __init__(self, net, eval_data):\n", + " self.net = net\n", + " self.eval_data = eval_data\n", + " \n", + " def step_end(self, run_context):\n", + " plot_model_and_datasets(self.net, self.eval_data)\n", + " display.clear_output(wait=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "其训练过程如下:\n", + "## 执行训练\n", "\n", - "1. 设置训练的迭代次数`step_size`。\n", - "2. 设置单次迭代的训练数据量`batch_size`。\n", - "3. 正向传播训练`grads`。\n", - "4. 反向传播训练`optim`。\n", - "5. 图形展示模型函数和数据集。\n", - "6. 清除本轮迭代的输出`display.clear_output`,起到动态可视化效果。\n", + "完成以上过程后,可以使用训练数`ds_train`对模型训练,这里调用`model.train`进行,其中参数解释:\n", "\n", - "迭代完成后,输出网络模型的权重值$w和b$。" + "- `epoch`:训练迭代的整个数据集的次数。\n", + "- `ds_train`:训练数据集。\n", + "- `callbacks`:训练过程中需要调用的回调函数。\n", + "- `dataset_sink_model`:数据集下沉模式,支持Ascend、GPU计算平台,本例为CPU计算平台设置为False。" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 14, "metadata": { - "scrolled": true + "ExecuteTime": { + "end_time": "2020-09-14T10:47:22.917679Z", + "start_time": "2020-09-14T10:38:40.707096Z" + } }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "loss_value: 0.42879593\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD6CAYAAABEUDf/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3deXxU1f3/8dcniWKLVEEzgbKIWytRkSWgBNCgKJafBbW14lJtQaCodf1ZQani169rxaUqUlBcqRY3QAURMKgQQAOyxwWXliCGYRFUVAz3fP+4N3aIk5BkZjKTyfv5eOSRyb135n5yM/nk5NxzPsecc4iISHrKSHYAIiKSOEryIiJpTEleRCSNKcmLiKQxJXkRkTSmJC8iksZiTvJm1tbMCs2sxMxWm9nlwfYxZrbezJYFH/1jD1dERGrDYh0nb2atgFbOuaVm1gxYApwO/A74yjl3V01f68ADD3Tt27ePKR4RkcZmyZIlm5xz2dH2ZcX64s65DcCG4PGXZlYCtK7La7Vv357i4uJYQxIRaVTM7N9V7Ytrn7yZtQc6A4uDTZea2Qozm2Rmzat4zjAzKzaz4nA4HM9wREQavbgleTPbF3geuMI5tx14CDgU6ITf0h8b7XnOuQnOuTznXF52dtT/NkREpI7ikuTNbC/8BD/ZOfcCgHOuzDm3yznnAROB7vE4l4iI1Fw8RtcY8AhQ4py7O2J7q4jDzgBWxXouERGpnZhvvAI9gd8DK81sWbDtOuAcM+sEOOBTYHgcziUiIrUQj9E18wGLsmtGrK8tIiKx0YxXEZE0piQvIpJEnudRVlZGohZwUpIXEUkSz/Po06cPbdq0oaCgAM/z4n4OJXkRkSQJh8MUFRVRXl5OUVERiZgQqiQvIpIkoVCIHj16kpl5CPn5+YRCobifIx5DKEVEpA4++MAwKyQnx+OllzLwpx3Fl1ryIiL1bOdOuOUWOOYYWLHCuPnmTJo1i3+CByV5EZG4qOkomcWLIS8PRo+GAQOgpAQGD4YENOIBJXkRkZjVZJTMV1/BFVdAjx6wZQtMnQpTpkDLlomNTUleRCRGexolM3MmHHkk3HcfjBgBa9bAwIH1E5uSvIhIjEKhEPn5+WRlZe02SiYchvPPh/79oWlTmD8fHnwQfvaz+otNo2tERGJkZhQWFhIuKyNkBg6efAquvBK2b4cbb4RRo6BJk/qPTS15EZEYVNxwNefIGTSIT9v0pt8BxVxwARx+OLz7LowZk5wED0ryIiJ1FnnD9YReBYx9qxtH7VrGwi+O4P5btjN/vt8Xn0zqrhERqaP/3nDN5a1F9/AWeZxmLzOu22O0HfVs9CLs9UwteRGROmrWLESrVo8CS9hrr0N4+p8e0z/rRttFzyZu4HstxWP5v7ZmVmhmJWa22swuD7a3MLPZZvZh8Ll57OGKiKSGwkI45hhj3brzGTToezZsaM6gczKwljkpk+AhPi35cuBq51wH4DjgEjPLBUYCc51zhwNzg69FRBq0rVth6FA48UTwPJg9G55++icccEDqJPZIMSd559wG59zS4PGXQAnQGhgIPB4c9jhweqznEhFJFufgueegQwd49FG45hpYuRL69k12ZNWL641XM2sPdAYWAznOuQ3g/yEws6g1NM1sGDAMoF27dvEMR0QkLtavh0sugWnToHNnmDEDunRJdlQ1E7cbr2a2L/A8cIVzbntNn+ecm+Ccy3PO5WVnZ8crHBGRmHkePPQQ5ObCrFlw553w9tsNJ8FDnFryZrYXfoKf7Jx7IdhcZmatglZ8K2BjPM4lIlIfSkr8vvcFC+Ckk+Af/4BDD012VLUXj9E1BjwClDjn7o7YNR24MHh8ITAt1nOJiCTazp1w883QqZNfSOzRR/2bqw0xwUN8WvI9gd8DK81sWbDtOuB2YIqZDQH+A5wVh3OJiCTMwoV+6331ahg0CO69F3Jykh1VbGJO8s65+VQ9r+ukWF9fRCSRPM/jk082ce+92Tz4oNG6Nbz0Epx2WrIjiw/NeBWRtOWVl1O2ahUuyiIe4Cf4jh1Hcdhh3/LAA46LL3asWZM+CR6U5EUkTXnl5fQ54ADaHH00Bc2b45WX77Z/40Y488xvWb36DuBLMunFX6//nGbNkhNvoijJi0haCq9ZQ9H27ZQDRdu3E16zBvAnNT3+OHTo4Jg+PQu4AehCPgv9WvBpRkleRNJSKDubfPwbj/nB1x9/DKecAn/4Axx22PdkZHQFbiaLnUw59lisod9ljUJJXkTSkrVsSWHv3pRmZjKnVwFjn2rJUUfB4sUwbhwUFe1Fz54t/CX7evQgp6gopQqLxYvqyYtIejIjY948Pnt9K/2vbcHSvxi//rWf4Nu0AQiW7AuHCYVCWBomeFBLXkTS1I4dcO2oDLqdegDr1xtTpvi1Z/wE78vIyCAnJydtEzyoJS8iaej112HYMPjoIxgyBP52h0fz8jAQIiWWa6pHasmLSNrYsgUGD/ZrzZj5yf7hCR7Nz+zjN+ELCvyqY42IkryINHjOwZQpfq33J56AkSNhxQro0wcIh6GoCMrL/c/hcLLDrVdK8iLSoK1bBwMHwtlnQ9u2UFwMt90GP/lJcEAoBPn5kJXlfw5FXdoibalPXkQaHM/zKCsL8/xz2YwaBbs8Y+xY47LL/Fy+GzN/QdZw2E/waXyTNRq15EWk4fA8vA0b6N79D/z85x/x58syOO7ruaw66hyuusL7cYKvkJHhl5NsZAke1JIXkVTgeXtuaXse351wCtcvOJ4l7mHgSzK4gCd5kpbvZvnPT8MZq7FSS15Eksvz/Dukexj9UjTjCzrP/ztj3Q1k8yyZmUfTa7/p5GRmNsq+9pqKS5I3s0lmttHMVkVsG2Nm681sWfDRPx7nEpE0s4fRL9u3w6WXOHoNaM7XTVrwSsav+bz3P1hfupR5mzdj69fDvHmNsiumJuLVkn8MODXK9nucc52CjxlxOpeIpJNqRr+89BLk5jrGjXNcxv2szvsD/UsnkPHGG+S0bIllZjbavvaaikufvHPuTTNrH4/XEpFGJsrol88/h8sv98e+H3VEOc9nFnDsriJYnOXfRFVSr7FE98lfamYrgu6c5tEOMLNhZlZsZsXhRjZJQUQCwegXhzFpkj+paepUf0HtJcuyOLZnVqMd5x6rRCb5h4BDgU7ABmBstIOccxOcc3nOubzs7OwEhiMiqWztWujb1681c/TRsHw5jB4NezcJWvqlpep7r4OEJXnnXJlzbpdzzgMmAt0TdS4RabjKy+HOO/3EXlwM48f7ufyIIyIOasTj3GOVsHHyZtbKObch+PIMYFV1x4tI47NkCVx0ESxbBqefDg88AK1bJzuq9BKXJG9mTwMFwIFmVgrcCBSYWSfAAZ8Cw+NxLhFp+HbsgBtvhLvv9rvYn38ezjwz2VGlp3iNrjknyuZH4vHaIpJeZs+G4cPhk09g6FC/q2b//ZMdVfrSjFcRqRebN/sLaJ9yij9QZt48mDBBCT7RlORFJKGcg2ee8YdFTp4M113n13o/4YRkR9Y4qECZiCTMf/4DI0bAjBnQrRvMmQMdOyY7qsZFLXkRibtdu+D++yE31++WueceWLhQCT4Z1JIXkbhatcofFrl4MfTrB+PHebRvGoaMxreIdipQS15EfuCvuFSGc67Wz/3uO7jhBujSxZ+9+tRTMPMVj/Z/bLyLaKcCJXkRAfwE36dPH9q0aUNBQQFeLRLy/PnQqZNfa+bss6GkBM47D2xT415EOxUoyYsIAOFwmKKiIsrLyykqKqImBQO3bfNvrPbuDd98A6++Ck8+CT+UoWrki2inAiV5EQEgFAqRn59PVlYW+fn5hPaQkKdN82+sTpgAV17p98X361fpIFNxsWTTjVcRAcDMKCwsJBwOEwqFsCoS8oYNcNll8Nxz/miZqVP94ZFVqiguJkmhlryI/CAjI4OcnJyoCd45ePhhf1LTSy/Brbf6VSOrTfCSdGrJi8geffABDBsGb7zhz1SdMAF+8YtkRyU1oZa8iFTp++/httv8bplly/zk/vrrSvANiVryIo2V5+22rmpl77zjT2pasQJ+8xt/BmurVkmIU2KilrxIY+R50Cf6JKWvv4arr4bjjoNNm+DFF/2brErwDVNcknywUPdGM1sVsa2Fmc02sw+Dz1EX8haRJAhHn6Q0axYcdZS/mMewYbBmjb9ikzRc8WrJPwacWmnbSGCuc+5wYG7wtYikgkqTlDZlhLjgAjj1VGjSBN58Ex56CPbbL9mBSqzikuSdc28CWyptHgg8Hjx+HFB7QCRZPA/KyvzykGVl/rbCQty6UiYPnUeHXOPpp2H0aP8Ga+/eyQ1X4ieRffI5FQt5B581n1kkGSr631u3hgMO+KEf/t//hv5/zOH83xuHHgpLl/q1Z/bZJ9kBSzwlfXSNmQ0DhgG0a9cuydGIpKGK/vddu2DbNnaRwf3z8xh9FGBw331wySWQmZnsQCUREtmSLzOzVgDB543RDnLOTXDO5Tnn8rJ/qGokInFT0f+emcmKffPpwSKu9MZy/AnG6tV+iYKoCb6ii6cOZYcldSQyyU8HLgweXwhMS+C5RKQqZnw7s5DRf/6Crt/O59MDuvLPyY5XXjEOOqiK51QzxFIalngNoXwaWAj80sxKzWwIcDtwspl9CJwcfC0i9SVoib8xz3FM5wxuuXdfzjvPKHk/g3POteoLQlYxxFIanrj0yTvnzqli10nxeH0RqSXP44vev+bahaczwQ3l4IMdr71mnHxyDZ9f0cVTVKQ68A1c0m+8ikgdVVOW4IXHtnNp0UTKyOFqu5ub5pxH00NqUe63og58NWUPpGFQWQORhqiKPvPPSj3O/H/f8psh+xNq+jWLM3tyV+9pND24Di3xijrwSvANmpK8SENUqc/cKwszYbxHh/Y7mDnDcfvB43knfDB566dpRaZGTklepCGKKEvwfqez6TMoxPARGXT13mElR3Ptuj+z1/bNaomLkrxIg2TGzlmF3HLVJo5Z8QQrVsAjDzvm9hrDYVn/rv3NUo2JT1tK8iIN0OLF0DXPGH3nfgzY+Rwlu37J4At3YfPqsGi2xsSnNSV5kQbkq6/giiugRw/YGi5nGgOYwtm0/PJDeO+9ut0s1Zj4tKYkL9JAzJwJRx7puO8+GPEnx5r3Mxmw35v+zv32g9zcur1wpbLDGhOfXpTkRVJcOAznnQf9+0PTTf9mfuYJPLi6gJ/9DH/pppUrYcsWvxVfFxVj4mvbzSMNgpK8SIpyDp58Ejp0gGefhRuv/op3vzuSnrve/G+3SlaWv5RTXRN8BY2JT1tK8iIp6JNPoF8/uOACOPxwePddGPO3pjTpmaduFakVlTUQSSHl5fD3v8Nf/+o3ru+/H0aMqCgFrFIDUntK8iKpwPNYPm8rF13bguJi47TTYNw4aNu20nEV3SoiNaTuGpEk++Zrj1Ht/0nXk/bjP8u38sw/PaZPj5LgRepASV4kiQoLoePRHrevO58LeIIS7wjOPjGsnhiJGyV5kSTYuhWGDoUTTwTPMpl99FVMyhpOi54ddENV4irhffJm9inwJbALKHfO5SX6nCKpyu3yeP7R7Vw6ej82bTL+8he48Ubjp/vcBeFrdUNV4q6+brz2cc5tqqdziSSM53mEw2FCoRBmVu3CHZWtX+dxSecFTNvcm877fsCMRYfRJa/in2ndUJXEUHeNSA15nkefPn1o06YNBQUFeOXlNSrs5Xnw0EPQIRdmbc7jTq7h7W860qWtasRI4tVHknfAa2a2xMyGVd5pZsPMrNjMisMqjCQpLBwOU1RURHl5OUVFRYTfe2+Phb1KSuD44+Hii6H7scaqboO5Jutesnoeq753qRf1keR7Oue6AL8CLjGz4yN3OucmOOfynHN52dnZ9RCOSN2EQiHy8/PJysoiPz+fUG5ulYW9du6Em2+GTp1gzRp49FGYPds4dNFk1YiRepXwPnnn3GfB541m9iLQHXgz0ecViTczo7CwcPc++SgzUBcu9EfOrF4NgwbBvfdGdLeb+t6lfiW0JW9mTc2sWcVj4BRgVSLPKZJIGRkZ5OTk+Ane3/BDYa8vv4TLLoOePWHbNnjpJXj6aeV0Sa5Et+RzgBeDX4gs4J/OuVcTfE6RevfKK36NmdJSuOQSuPVWaNYs2VGJJDjJO+c+Bo5J5DlEkmnjRrj8cnjmGX/NjgUL/FWbRFKFhlCK1IFz8Pjjfq33F16Am27yywErwUuqURVKkVr66CP4059gzhy//33iRD/Zi6QiteRFaqi8HO66C44+GhYv9ksBv/mmErykNrXkRWrg3Xfhootg6VIYMAAefNCf6CqS6tSSF6nGjh1w7bXQrRusX++vtTp1qhK8NBxqyYtUYe5cGD7c74MfMgT+9jdo3jzZUYnUjlryIpVs2QKDB0Pfvv4k1tdfh4cfVoKXhklJXiTgHEyZ4t9IfeIJGDkSVqzwC02KNFTqrhEB1q3zZ6q+9BJ07QqzZvnFxUQaOrXkpXHwPCgr85vrlTY/+KA/W3XOHBg7FhYtUoKX9KEkL+nN82DDBn9Rj0qLe6xeDb16waWX+jNVV62Cq67yKweLpAu9nSV9eZ7foR4s7OEB4QUL2G/dRm5/tOUPRcQefxx+/3uVd5f0pCQv6Ssc3i3B9wEW7DqWJrnfsmMHnHsu3HOPFmiS9KbuGklfodAPKzd9nHcib9mD7OItduwwJk/eyuTJSvCS/tSSl/QVrNw0/antXHzdfjjnMHuA/PwZnHPOzGRHJ1IvEt6SN7NTzex9M1trZiMTfT6RCp9/Dr8blMHAC/eneXOjqAg2bDibt96a+d+VnUTSXKKX/8sEHsRfxDsXOMfMchN5ThHnYNIkf1LTtGn+gtpLlkCPHpWW7hNpBBLdku8OrHXOfeyc2wk8AwxM8DmlEVu71i9HMGSIXxJ4+XIYPRr23jvZkYkkR6KTfGtgXcTXpcE2kbj6/nu44w4/sRcXw/jxMG8eHHFEsiMTSa5E33iN9n/xblMOzWwYMAygXbt2CQ5HUp7n+UMfQ6EaD1xfssSv9b5sGZx+OjzwALRWU0IESHxLvhRoG/F1G+CzyAOccxOcc3nOubzs7OwEhyMprWLyUqWZqVXZsQOuuQa6d/dvsj7/PLz4ohK8SKREJ/l3gMPN7GAz2xsYBExP8DmloYqYvERRkf91FWbPhqOO8pfjGzIESkrgzDPrMVaRBiKhSd45Vw5cCswCSoApzrnViTynNGARk5fIz4cDD/xRUbHNm+EPf4BTTvEPmzcPJkyA/fdPWtQiKS3hk6GcczOAGYk+j6SBYPIS4bCf4E880W/R5+fjXi/kmSkZXH45bN0K110Hf/0r7LNPsoMWSW2a8SqpJSMDcnL8FnzQdfOfBesY0e97ZsxtQrdufkngjh2THahIw6DaNZKaQiF29ejF/RmXk+tWMW/h3txzDyxcqAQvUhtqyUtKWrXauGjn6yz2jH79HOPHG+3bJzsqkYZHLXlJKd9+CzfcAF26wNq1xlNPwcyZSvAidaWWvKSMt96CoUPh/ffh/PPh7rtBUydEYqOWvCTdtm0wYgQcf7zfkn/1VXjySSV4kXhQkpf4qWKx7OpMm+Yvoj1hAlx5pb/Oar9+CYxRpJFRkpf4qChJ0Lq1P5Fp165qD9+wAX77W7/WzIEHwqJFfvfMvvvWU7wijYSSvMRHOAwLFvjJfdEi6N07au0Z5+Dhh/1a7y+/DLfe6leN7NYtCTGLNAJK8hIfodDumfqdd35Ue+aDD/zG/tCh0KkTrFgBo0bBXnvVc6wijYiSvMSHGcyfDz16/Lf2TLBK9vffw223+ZOYli3z+99ffx1+8YskxyzSCGgIpcRPZqaf6CPqwb/zjl/rfcUK+M1v4P77oVWrKM+tQx15EdkzteQlvoLaM1/vMK66Co47DjZt8uu8P/dcNQm+FnXkRaTmlOSlalGGRHqeR1lZGa7SMMnI7bNm+bXe77kHhg11rJm3kdMHVjOsshZ15EWkdpTk5b8ik3qU1rXnefTp04c2bdpQUFCAF7S4K7a3bn0MLVu+xqmnQpMm8OY8j4dKCtgvt3X1LfTKdeSDvnwRiZ1VbpElU15enisuLk52GI1TRVIP6rfzzDPQrp3fus7KgtJSyoA2bdpQXl5OVlYWpaWl5OTk8PnnZbRufQ2eNxbYnyuv/I5bb92XfbaV+X8kIl6DnJyqz68+eZE6MbMlzrm8aPsS1pI3szFmtt7MlgUf/RN1LomDoMvEKy+nbMECf7X1Sq3rUChEfn4+WVlZ5OfnEwqF+PRT+OMfQ3jeE8DHdO06jLFjm/qLedSmhV5RR14JXiSuEtaSN7MxwFfOubtq+hy15JPIObwTTqDPW29RZEZ+794Uzp1LxubNu7WuPc8jHA5zwAEhHnjAuP56f9ctt3icdVaYVq1CWGSiVgtdJOGqa8lrCKX4zAj/618UtWtHeXk5RUVFhDdvJqdS90pGRgZlZTn8+tf+fKf+/WHcODjooAwgSldMRQtdRJIi0TdeLzWzFWY2ycyaRzvAzIaZWbGZFYc1qiKpQi1b/qg7JtK338Lo0dC1K3z6KfzzKY+XHynjoHapc19HRHYXU3eNmc0BWkbZdT2wCNgEOOBmoJVzbnB1r6fumuSr6I4JhXbvdnnjDRg2zC9NcOGFMPZvHgf8NuJGbWGh32oXkXqXsO4a51zfGgYwEXg5lnNJ/cjIyNiti+aLL+Avf4GJE+Hgg+G11+Dkk4GyKGPb1S0jknISObomcm7jGcCqRJ1LYhStDrzn8cKkL8jNdTzyCFx9NaxcGSR40Nh2kQYikTde7zSzTvjdNZ8CwxN4LqmryuPjCwv57DO4tNN8Xtx8PJ2afsj0hYeS171Se8DM76LRyBmRlJawlrxz7vfOuaOdcx2dcwOccxsSdS6JQURJAW/BQibc/RUdcmHm5m7czrW8/W1H8g6q4oa4xraLpDzdKWvsgm6X9zNzKWj6DsOv+Rld84yV3YZwbdbd7NWzu7piRBowJfl0VoM1V3d+b9xyciEdM1exMqMjjzwCc+cahy16yi9DMG+eWuoiDZiSfLqqosBYZAXJxYv9Me+j/5rBwIFGSYkxeHCQ09UVI5IWlOTTVaXyvV5Z2Q8VJHv3/hWXD/2aHj0cW7fCtGkwZQq0jDbjQUQaNCX5dFVpiGPYjKKiIsrL+7JgwT/4+8NNGdFyKmtWeQwYkOxgRSRRVLsmXVUa4mhhaN58BuHwyfyUNcyiJ73Cb8N3pUStOSMiaUEt+XSWkYEL5fDkU0ZurvHFF325+uqv2Nzzz/TKeluTmEQaAbXk09gnn8Dw4TB7NvToARMnGkceuS94szWJSaSRUEs+DZWXw913++usLlwIDzwA8+fDkUcGB2jkjEijoZZ8mlm2DC66CJYsgdNO82u9t22b7KhEJFnUkk9Blcez18Q338CoUZCXB+vW+Uu0Tp+uBC/S2CnJpxjP834Yz15QUIDneXt8TmEhdOwIt98OF1wAJSVw9tnqjRERJfmUEw6Hg/HswRJ81ayWtXUrDB0KJ57oT3CdMwcmTYIWLeoxYBFJaUryqSKoMxPKzq52CT7wS9E89xx06ACPPuov6rFyJZx0UhLiFpGUphuvqSCiprvl51M4dy7hzZt/tAQf+DXDLrnE72/v3BlmzIAuXZIUt4ikvJha8mZ2lpmtNjPPzPIq7RtlZmvN7H0z6xdbmGmuUp2ZjM2bycnJ2S3Bex489BDk5vrj3u+8E95+WwleRKoXa3fNKuBM4M3IjWaWCwwCjgROBcaZWWaM50pfe1hKr6QEjj8eLr4Yunf3u2auucY/XESkOrEu5F0C/KhLARgIPOOc+w74xMzWAt2BhbGcL21VsZTezp3+iJlbboGmTf3+9wsv1KgZEam5RLUFWwOLIr4uDbb9iJkNA4YBtGvXLkHhNAAVs1ADCxf6I2dWr4ZBg+Dee3fbLSJSI3vsrjGzOWa2KsrHwOqeFmVb1Jk9zrkJzrk851xednZ2TeNOW19+CZddBj17wrZt8NJL8PTTSvAiUjd7bMk75/rW4XVLgci5lm2Az+rwOo3KK6/AiBH/HUFz663QrFmyoxKRhixR4+SnA4PMrImZHQwcDrydoHM1eBs3wjnn+LVmmjWDBQvg/vs8mu2ofn1WEZE9iXUI5RlmVgr0AF4xs1kAzrnVwBRgDfAqcIlzbleswaYb5+Cxx/xJTS+8ADfdBO++Cz2O/fH6rCIidWG1KYKVaHl5ea64uDjZYdSLjz7ya73Pnev3v0+c6Cd7AMrK/ARfXu6PkywtVae8iFTJzJY45/Ki7VNZg3pWXg533QVHH+1PZho3Dt58MyLBwx7HzYuI1JSm0ySY53mEw2FCoRDLlhkXXQRLl8KAAfDgg36D/UeqGDcvIlJbasknUEXZ4NatD6ddu6fp1s2xfj08+yxMnVpFgq+g1ZtEJA7Ukk+gcFkZ8+c3wfPepbT0UM499xse+HsTmpeHgRDRpxOIiMSPWvIJsmWTx8jOS/G81wCPjh2v4Kkn9qb5mRo1IyL1R0k+zpyDKVOgQwfHk2X9uJbb+CSjM8tmjcQ2bdqt2iTVLAgiIhIPSvJxtG6df0P17LOh7UEZFHcZzu1ZN9C+V1csJ0ejZkSk3qlPPg4qar2PHAm7dsHYsXDZZUZWxkQI37r7CBmNmhGReqQkH6PVq/1qkQsXwsknw/jxcMghFXszfjyJKSPKNhGRBFF3TR199x2MGQOdOzvef8/j8fu+YNarLiLBi4gkn5J8HRQV+eur3nQTnLX/HEq2tuSCK1pgfQo0YkZEUoqSfC1s3+6XAO7VC77+GmZM3srkLb8iRNgfVlNUBGvWqHKkiKQMJfkamj7dX0T7oYf8RT1Wr4ZfnbO/P0oG/JuoP/0pdOqkMfAikjKU5Pfg88/hd7+DgQOheXP/Buu998K+++In9nnz4LPPYPlyv3m/a5fGwItIylCSr4JzMGmSXx1y2jT43/+FJUvg2GMrHZiRAa1awVFH+TWDNQZeRFJIrIuGnGVmq83MM7O8iO3tzewbM1sWfIyPPdT6s3Yt9O0LQ4b4JYGXL4frr4e9967mSRWVI0tL/da9xsCLSAqIdZz8KuBM4JXqPaIAAAjPSURBVB9R9n3knOsU4+vXD8+DcJjvm4e4+x5jzBg/oY8f74+Bz6jpn0KNgReRFBNTknfOlQBYQ2u1BkmdUMjvl+nThyULvuWifZ5i2deHc8YZcP/90Lp1sgMVEYlNIvvkDzazd83sDTPrncDz1I63+/qpOz4p45q3BtB9VxFlXzfl+Ue+4IUXlOBFJD3ssSVvZnOAllF2Xe+cm1bF0zYA7Zxzm82sKzDVzI50zm2P8vrDgGEA7dq1q3nkNRXZajfzHweVIGfP/wnDT8zmE3c1w2wid/SYyv5/fDn+MYiIJMkeW/LOub7OuaOifFSV4HHOfeec2xw8XgJ8BPyiimMnOOfynHN52dnZdf0+oqvUasfzIBRic7dTudCe4BTvVbKaZDLvdY9/bBjA/vNf1g1TEUkrCSlQZmbZwBbn3C4zOwQ4HPg4EeeqVkSrnaIi3MYwzxTmcPna6WzNhOuucfz1BmOffQzQDVMRST+xDqE8w8xKgR7AK2Y2K9h1PLDCzJYDzwF/cs5tiS3UOoio3/6fLqdz2pAQ554L7dsbS5YYt9xq7LNPvUclIlJvzKVQnZW8vDxXXFwc19fc9b3HuDu/YtRtzXDOuOUW+POfITMzrqcREUkaM1vinMuLti+t68mvWgUXXZTB4sU/o98pjvH/G6Z93oHqdxeRRiMtyxp8+y3ccINfDnjtWnjqCY+Z3xTQPv/nKh4mIo1K2iX5t97yC0HefDMMGgQlJXDeKWFsoRbQFpHGJ22S/LZtMGIEHH+835J/9VV48knIzkYLaItIo5UWffLFxX4p4M8/hyuvhP/5n6AUcIWK4mFaQFtEGpm0SPKHHAJHHglTp0K3blUcpOJhItIIpUWSb9ECXnst2VGIiKSetOmTFxGRH1OSFxFJY0ryIiJpTEleRCSNKcmLiKQxJXkRkTSmJC8iksaU5EVE0lhK1ZM3szDw7xhe4kBgU5zCiSfFVTuKq3YUV+2lamx1jesg51zU9VNTKsnHysyKqyqcn0yKq3YUV+0ortpL1dgSEZe6a0RE0piSvIhIGku3JD8h2QFUQXHVjuKqHcVVe6kaW9zjSqs+eRER2V26teRFRCSCkryISBprUEnezM4ys9Vm5plZXqV9o8xsrZm9b2b9qnj+wWa22Mw+NLN/mdneCYrzX2a2LPj41MyWVXHcp2a2MjiuOBGxVDrfGDNbHxFb/yqOOzW4jmvNbGQ9xPU3M3vPzFaY2Ytmtn8Vx9XL9drT929mTYKf8drg/dQ+UbFEnLOtmRWaWUnwO3B5lGMKzGxbxM/3hkTHFZy32p+L+f4eXK8VZtalHmL6ZcR1WGZm283sikrH1Nv1MrNJZrbRzFZFbGthZrODfDTbzJpX8dwLg2M+NLMLa31y51yD+QA6AL8E5gF5EdtzgeVAE+Bg4CMgM8rzpwCDgsfjgRH1EPNY4IYq9n0KHFiP128M8P/3cExmcP0OAfYOrmtuguM6BcgKHt8B3JGs61WT7x+4GBgfPB4E/KsefnatgC7B42bAB1HiKgBerq/3U01/LkB/YCZgwHHA4nqOLxP4HH/CUFKuF3A80AVYFbHtTmBk8HhktPc90AL4OPjcPHjcvDbnblAteedciXPu/Si7BgLPOOe+c859AqwFukceYGYGnAg8F2x6HDg9kfEG5/wd8HQizxNn3YG1zrmPnXM7gWfwr2/COOdec86VB18uAtok8nx7UJPvfyD++wf899NJwc86YZxzG5xzS4PHXwIlQOtEnjOOBgJPON8iYH8za1WP5z8J+Mg5F8ts+pg4594EtlTaHPk+qiof9QNmO+e2OOe2ArOBU2tz7gaV5KvRGlgX8XUpP/4FOAD4IiKZRDsm3noDZc65D6vY74DXzGyJmQ1LcCwVLg3+ZZ5Uxb+HNbmWiTQYv9UXTX1cr5p8/z8cE7yftuG/v+pF0D3UGVgcZXcPM1tuZjPN7Mh6CmlPP5dkv6cGUXVDKxnXq0KOc24D+H/EgVCUY2K+dim3kLeZzQFaRtl1vXNuWlVPi7Kt8tjQmhxTYzWM8xyqb8X3dM59ZmYhYLaZvRf8xa+z6uICHgJuxv++b8bvShpc+SWiPDfmcbY1uV5mdj1QDkyu4mXifr2ihRplW0LfS7VhZvsCzwNXOOe2V9q9FL9L4qvgfstU4PB6CGtPP5dkXq+9gQHAqCi7k3W9aiPma5dySd4517cOTysF2kZ83Qb4rNIxm/D/TcwKWl/RjqmxPcVpZlnAmUDXal7js+DzRjN7Eb+rIKakVdPrZ2YTgZej7KrJtYx7XMENpdOAk1zQGRnlNeJ+vaKoyfdfcUxp8HPejx//Kx53ZrYXfoKf7Jx7ofL+yKTvnJthZuPM7EDnXEILcdXg55KQ91QN/QpY6pwrq7wjWdcrQpmZtXLObQi6rzZGOaYU/95BhTb49yRrLF26a6YDg4JRDwfj/zV+O/KAIHEUAr8NNl0IVPWfQTz0Bd5zzpVG22lmTc2sWcVj/JuPq6IdGy+V+kHPqOJ87wCHmz8SaW/8f3WnJziuU4FrgQHOuR1VHFNf16sm3/90/PcP+O+n16v6wxQvQZ//I0CJc+7uKo5pWXFvwMy64/9+b05wXDX5uUwHLghG2RwHbKvopqgHVf43nYzrVUnk+6iqfDQLOMXMmgfdq6cE22quPu4sx+sDPzGVAt8BZcCsiH3X44+KeB/4VcT2GcDPg8eH4Cf/tcCzQJMExvoY8KdK234OzIiIZXnwsRq/2yLR1+9JYCWwIniDtaocV/B1f/zRGx/VU1xr8fsdlwUf4yvHVZ/XK9r3D/wP/h8hgH2C98/a4P10SD1co174/6aviLhO/YE/VbzPgEuDa7Mc/wZ2fj3EFfXnUikuAx4MrudKIkbGJTi2n+In7f0itiXleuH/odkAfB/ksCH493HmAh8Gn1sEx+YBD0c8d3DwXlsL/LG251ZZAxGRNJYu3TUiIhKFkryISBpTkhcRSWNK8iIiaUxJXkQkjSnJi4ikMSV5EZE09n8dJUn9W3RDugAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD8CAYAAACSCdTiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3deZzN5fvH8dc1Y18jOwmpDEoyWVqlkvaviqRSX2UIiVZL31LaqFQkW0obSYSQsiZEluxbFBrE2JdsM+f+/XGOn2mcMTPOOXNmzryfj8c85pzPdl/zmeNyz/25F3POISIikSkq3AGIiEjoKMmLiEQwJXkRkQimJC8iEsGU5EVEIpiSvIhIBAs4yZvZeWY208zWmNkqM3vSt72nmW01s6W+r1sDD1dERDLCAu0nb2ZlgbLOuSVmVhhYDPwHaA4ccs69HXiYIiJyNnIFegHn3HZgu+/1QTNbA5QP9LoiIhK4gGvy/7qYWSVgNlATeAp4BDgALAKeds7t9XNOHBAHULBgwTrVqlULWjwiIlnWunVw6BAUKgQXXxzQpRYvXrzLOVfS376gJXkzKwT8BLzmnBtrZqWBXYADeuFt0ml9pmvExsa6RYsWBSUeEZEsa8cOqFABEhMhVy6Ij4fSpc/6cma22DkX629fUHrXmFluYAzwpXNuLIBzbodzLsk55wGGAnWDUZaISLZXqhRceaU3wV95pfd9iATcJm9mBgwD1jjn+ibbXtbXXg/QFFgZaFkiIhHBDGbOhIQEb4I3C1lRASd54CrgIWCFmS31besO3G9ml+FtrtkEtA1CWSIikSEqKqAmmvQKRu+aOYC//4YmB3ptEREJjEa8iohEMCV5EZEIpiQvIhImmzfDs89CUlLoylCSFxHJZB4PDBgANWvChwMdK1aEriwleRGRTLR2LVx7LXTs6KhyxygqvXE5lartC1l5SvIiIpngxAl4/XWoVcuxcssmavW+g+UXtyB/3mh2/7M7ZOUGo5+8iIicweLF8OijsGx5ErWuf5Lf63/I74eMvk3e4Yn6ncgVFbpUrJq8iEiIHDkCzz8P9epB/InlXPhaPZZdO4BrNztWfWh0ueCBkCZ4UJIXEQmJn36CWrWgT98j1HiyG/tb1GFfri2MWFWNyaOiqVTjqlNz1ng83knLgjgr8ElK8iIiQXTgADz+ODRsCAdLTKfcq5ewvMibPHTpQ6ztuJb7v1qFxW+FWbO8c9Z4PHD99d5ZKRs29L4PIrXJi4gEyaRJ0K4dbN27m+rdn2Z1nk+pWqAq05tPp1HlRqcOTD5nTUICzJvnnXZ43jzv+yDOaaOavIhIgBIS4IEH4PbbHa7mCM7pEcP6fF/S/eruLG+3/N8JPqUQTzusmryIyFlyDr76Cjp1cuxjE1Vfas8Gm0LdknUZesc0Li19adoXCfG0w0ryIiJnIT7e2/Y+cXIi513bncNXvsPfSUa/296nfd0OREdFp/9iIZx2WM01IiIZ4PHAoEFQvTpMXfEbFV6sy18N3+KGPz2sHuB4olLzjCX4EAs4yZvZeWY208zWmNkqM3vSt724mU01s99934sFHq6ISPisX+/tCPN4p38o1vxZEltfwYmC2/h6RTUmjITz9jm4776g95AJRDBq8onA0865GKA+0MHMqgNdgenOuQuB6b73IiLZTmIi9Onj7fe+aO+PlHixJlvOe5vWtVuzpsMamvWfgeXK5W2kP9lDJosIxspQ24HtvtcHzWwNUB64C2joO+xTYBbwfKDliYgEhceTroedS5d6pyRYsjaBCnFPEV/8CyqccxHf3D6L6ypd5z0on/P2jJk3L+QLc2dUUNvkzawSUBtYAJQ+uZC373vW+alFJGdLxwCko0ehRw+oE+v4veBnFO4Ww44So/jftf9jWbtlpxI8nOohEx9/apBTFhG03jVmVggYA3R2zh2wdP6QZhYHxAFUrFgxWOGIiKQujQFIc+d6a+/rdm6kbJc4theawZXlrmTI7UOoUaqG/2tm0sLcGRWUmryZ5cab4L90zo31bd5hZmV9+8sCO/2d65wb4pyLdc7FlixZMhjhiIicWSoDkA4ehCc6Oq6+7jg7qvYmb4dqHMo9gw/XVeXnh39KPcFnYQHX5M1bZR8GrHHO9U22awLwMPCm7/v4QMsSEQkKPwOQpkyBtm0dWxIXUqLN3ewqs5X/rDU+mATlj2yCd3dnyZp6WoJRk78KeAhoZGZLfV+34k3uN5nZ78BNvvciIlmDr3ll9x6jVSu45a5DHGzQhajH6pG74FbGfh3Ft5vrU/5IaKYbyCzB6F0zB0itAf6GQK8vIhIKzsHo0dCxI+wu/j1Fuz3OXtvM4/FleWPkTorWuQpmzIBdu0Iy3UBm0bQGIhK5UukmuW0btG8P46ftoFjLznjKf0W5EjFMumMOV1VoAK8mOycbNtEkp2kNRCQy+ekm6Rx89BHEVHdM2v4x+Z+N4XDFsbzc8GV+a/sbV1W86lQvmWxac09JNXkRiUwpukluXLiHNt1KMHPZ7xR9JI7EYrNoUPEahtwxhGolqnnPSecAqexENXkRiUy+bpJJ0Xl4p+J71LyhMPOiXydXp0ugzG8MuX0Isx6Z9e8EH8IVmsJFNXkRiUxmrOg3k0cfSWJhwhIKd4jlYIGV3Fv9Xvo16UfZwmX/fXyIV2gKF9XkRSTiHDsGL70Eta88zIrKT2NtGlC0zD7GtxjP6GajT0/wEPIVmsJFNXkRydoy2E4+f753SoLVid9RoEt7juTeSocrOvDaDa9RJG+R1E8M8QpN4aKavIhkXRloJz90CDp3hgaNt/NnbHNoeSdVyp3DvKYT6X9LvzMn+JMirGcNKMmLSFbmr53cj6lToeYlHt6fM4Q8XWLwXDiB165/lcVfn0P9OndF1IPUjFKSF5GsK4128r17oXVraNxyLTtvbQh3tOXKKrVZ/vhyul/8GHnmzk/zP4hIpzZ5Ecm6ztBOPnYstH/iODsvepPoDq+RL39BPmg8jP9e9l/MzDtvQRZdyCMzKcmLSNaWYp72v//2zjcz5te55GsRhyuymmY1W/Deze9RulCyLo8R+iA1o9RcIyLZgnMwfDhUq7Wfb0+0h0evplSFQ0xqOYmR94z8d4I/KQIfpGaUavIikuX9+Se0bQtT//qWPK07Qv6/6Vy3M70a9aJQnkLhDi9LU5IXkSwrKQk++AC6vr6VEzc+AVd9S0zpWgy9YxxXlL8i3OFlC0ryIpIlrV4NrR/1sCBxMLniupIr73Fea/gmTzV4itzRucMdXrYRrDVePzaznWa2Mtm2nma2NcVqUSIiZ3T8OLzyCtS6cRWLL70Gbm9Pwwvrsqr9Sp6/+nkl+AwKVk1+OPAB8FmK7e86594OUhkiEuEWLoRHHjvK6nNfJyruTc7JX4R3G3/CQ2WbYMWy/2Rh4RCUmrxzbjawJxjXEpGc559/4JlnoF7z2axvdBlc14uWte5jbftVtOr8CXbeeTl61GogQt2FsqOZLfc15xTzd4CZxZnZIjNblJBDR6SJ5GQzZ0L1y/fxzvo43CPXUa7iMaY8MIXPm35OyX9I17QGkrpQJvmBwAXAZcB24B1/BznnhjjnYp1zsSVLlgxhOCKSlezbB23iHI06jib+rhii6gzjmQbPsLrDSm6uerP3oAid/jczhax3jXNux8nXZjYUmBiqskQke5kwAeKe+YsdsR2g+XdcUvpyht01icvLXv7vAzVqNWAhS/JmVtY5t933timw8kzHi0jk27kTOnZKYvSfHxLVvDv58nl4tdHbPFn/SXJFpZKOUkxrIBkTlCRvZiOBhkAJM4sHXgIamtllgAM2AW2DUZaIZD/OwRdfQMdeyznYMA5uXcANlW9m8B0DqVyscrjDi2hBSfLOufv9bB4WjGuLSPa2eTO0efwIU4/3wlq8RbGjjv7LqnJ/j4lYLo3HDDVNUCYip/N4YMcObxX8LM/xeGDAAKh2y0ymXXQpXPMGrVYksf6DJFp+uwG79lp1icwESvIi8m8ZWHIvtXPWrvbQoNEeOk5tzdH7GlGxomPag1MZ/nc9zj3iO2fhQnWJzARK8iLyb+lccs/fOScS4bU5V1PzgVEsrBdDVO3PeP6qrqx5YgU3XHAjzJkDDRqoS2QmUoOYiPzbyb7pGVlRqVQpllzyMA/+cQ9rbu8PF77OZSWvYPjdP1KrTK1Tx0VHexO9ukRmGiV5Efm3DPZNP3IEXnrZw9v5q0PHe8mX13jzpvfoWLcj0VHRp5+gLpGZSkleRE6XzkQ8ezY89OxSttRqA40XcVOlWxl614ecf875mRCkpIeSvIhk2IED8HTXf/jo95ehyTsUy1uCQXeNoln1Zt5FtCXL0INXEcmQSZPggsZT+Sj3JXB1Hx6p9V82dllD8xrNleCzINXkRSRdEhKg3VO7GHv4KbjlcyoWuIhPm82kYaWG4Q5NzkA1eZGcJoMDnZyDESMcVf7zBWPLxhBVayTdrnyBdV2WKcFnA0ryIjlJBgc6xcfDDff+wQPfN+FQ44e49LyqLHv8N16/qRf5cuXLnJglIEryIjlJOgc6eTwwYGAiVVu9zcyYmuSt+gv9mnzAkg5zqFmqZiYHLYFQm7xITpKOgU6//w73dVnMbxXawHW/cWOFO/mk2QAqFKkQhoAlUEryIjnJGQY6JSbCm+8cpufsF0mKfY+iuUrx0T3fcE/M3eo1k40pyYvkNH4GOi1bBvc8P4WNMe2g7mYerNaW/ne9yTn5zglTkBIswVo05GPgdmCnc66mb1txYBRQCe+iIc2dc3uDUZ6IBMfRo9Dt1Z28v64LrsEIyuepxsiWP3PN+VeHOzQJkmA9eB0ONEmxrSsw3Tl3ITDd915Esog5cxyV7x7OeydisBqjeb5uTzY+u1QJPsIEa2Wo2WZWKcXmu/AuCQjwKTALeD4Y5YnI2Tt4ENq/sIEv9rWFejOoXvgqvnloKDElY8IdmoRAKLtQlj65kLfvu9/5Ss0szswWmdmiBC0gIBJSEyef4LyWb/JF4UvIU2kR7984iBVdZvtP8P4GTZ3NilESVmHvJ++cG+Kci3XOxZYsWTLc4YhkP+lIvLt3w21xv3LHd7Hsj+3GdeVv5c9n1tDpqrZEmZ804G/Q1NmsGCVhF8okv8PMygL4vu8MYVkiOVMaidc5+Oyrg5zXpjOTy9WncOndjGr6LbMeH0O5wuVSv66/QVNns2KUhF0ok/wE4GHf64eB8SEsSyRn8pd4fTX7bVsdDR6eyMO/1uDIpf24r0p74rutpvml/0n7uicHTSVfps/fNsnygtWFciTeh6wlzCweeAl4E/jazB4FtgDNglGWiCSTcgRriRK4htfTd2lFujX5hxM1xlLaajC61VyuqdQg/ddNbdBUBlaMkqwhWL1r7k9l1w3BuL6IpCJFMt6wYBd3HI5lbfuPsdyH6Vy1K71bvEye6DwZv7a/1aG0dF+2oxGvItldVBRJJUrT/e11vL22LZ47f+KiTWUZt/1CYl55XTXuHC7svWtEJDBLlh3n/Idepc+BWkSXW0afqwaz5o3FxHy/QAleVJMXya6OHfHw+Esz+eTwk3DxKuoXbs7YNu9TtnCZcIcmWYiSvEg2NH32Ppq904m9tb+ggKcEQ28ZT8u6d4Y7LMmClORFspHDh6HFy+OYmNQRam/jzgUV+WJ2PIWfrRfu0CSLUpu8SDbx1aRtlOp4DxMLNuXc/OcyY0E9xk/bSuHYq9RnXVKlmrxIFrd7j4c7Xx7CvALPYxWOE1flDT5o+TS5LVp91iVNSvIiWVi/EWt4ZnYcJ8rO4XxPIya1G0yNslVPHaA+65IGJXmRLGjz1mPc8tobrCnxOtHnFqbnZZ/w4p0Paxk+yTAleZEsxDnoMXgOvde0wVN6LbWi7mfSQ90pX7WGmmTkrOjBq0gWsWzdPs5r3443dlxD7vxHGHLNRJZO20r56rU1ta+cNdXkRcIsMdER9+43DN/xJK7UDhrlf4pxz79C4f2HTp9hUm3wkkGqyYuE0YxF8ZTq9B8++ac5hQ8VYOK865n+zFsUzldQU/tKUKgmLxIGR44m0azPQCYd7Q7FE2nx4+V8Nn8JuaM2n6qxpzbdr0gGKMmLZLKvZqyk9bg2HDl3PqV31GHC+C3UTdwIFn16jV1T+0qAQp7kzWwTcBBIAhKdc7GhLlMkK9pz4Ci39X6V+dG9iSpwDl1KDuKdVztgiUneJpmlS6GGetFIcGVWTf5659yuTCpLJMt579ufeO7nOE4UXc9F/7Ti+y7vUKX0uTBqxKlVnZTgJQTUXCMSQpv+3kuTvs+yruAwckVV4a1LfuSZu286dYDa3CXEMiPJO+BHM3PAYOfckOQ7zSwOiAOoWLFiJoQjEnrOOZ777Gv6rn4ST/5d1D3+HJP+9xIlihb494Fqc5cQy4wkf5VzbpuZlQKmmtla59zskzt9SX8IQGxsrMuEeERCasnGLdw+sD3bC08i/7E6DG38PQ/cUDvcYUkOFfIk75zb5vu+08y+BeoCs898lkj2k5iUxCMDP+DL7T0gL9xi7/LNmx0pkE+tohI+IR0MZWYFzazwyddAY2BlKMsUOSseD+zY4Z085kzbUjFl6TJKdG3Al7s7c87+a5nadBWTX+ysBC9hF+oRr6WBOWa2DPgVmOScmxLiMkUyxuOB66+HChVOzRHjb5sfh48doXGfbtzybR3222YeLjiShPcmcWPs+Zn6I4ikJqTVDOfcH0CtUJYhErCEhNPniAGYOxeSkrzf/cwbM/yn6Tw+uS1HC2ykfEJrJj75FpddXDwMP4BI6jR3jYi/OWJKlIBChbz7CxXyvvf5e/9u6vR6hP/OupHjx43nS8/grwHDlOAlS1KDoYi/OWJ27fKumg3e7wkJuNKlee27EfSc35mkXPuotrs7k7u9QOUK+cMbv8gZKMmLwOn91U/W7n/+GRITWfPgPdx2ZWH+jP6B3Pvq8tbVQ+nS8tLwxSuSTkryIv6YwVdfkVixAk9eUYmB9X7DJUZT/0A/vuvVnhLnRoc7QpF0UZIXScnjgYQEZh/cyj1tK7Gr5B8U+P0mhrb4iJa3a1S2ZC9K8iLJeTwcvuFaWuX5m7H1N0G+kty+93NGDmxJocLqpyDZj5K8SDJfz/yG1rXWc7hYAsUW38OoNr256Y4Lwh2WyFlTkhcBdh5K4J4hTzHn4BeQdBGPDL+NwRW2kOf2KuEOTSQgSvKSoznneH/W5zw3/SlORB2g3B8vMuHp56nzwkFN/ysRQUlecqzfd23kzqHtWHt8GlE7ruS5i4fw+qc1iI4GKJDW6SLZgpK85Dgnkk7QfWJf+i7uiScxNxdt+ZBJL7el6gV6sCqRR0lecpRftizknuFt2O6WkeuPpvS+tj9Pv1ZerTISsZTkJUc4dPwQ7b7+H19u6AcHy1B391i+7dOUcuXCHZlIaCnJS8Qbt2oyD49+nAO2hfyrHmfgvW/QqnlRzHlgh9ZXlcimRkiJWDsO7eDGQffT9JvbOJBQiFu2zSF+8Ic8fJ8vwadjvniR7C7kSd7MmpjZOjPbYGZdQ12eiHOOD3/5mPPfimH61rEUXdKTCbcvYfLgqyh+cjbg1OaQF4kwIW2uMbNoYABwExAPLDSzCc651aEsV3Ku9bvX0+yztiw/MAv+uoYHv7uWgRf/QqGbc//7wJOzTM6bd2oOeZEIFOo2+brABt8KUZjZV8BdgJK8BI/Hw/G/t/Ly8k9585dX8RzPR5mlAxk99UuuTnoN9uU6fWUnf3PIi0SgUCf58sBfyd7HA/WSH2BmcUAcQMWKmuFPMsjj4Zc763BfxT/5q/R+bG0znrzofd4cW4Z8TUbCvFyp19RTziEvEoFCneT9VY/cv944NwQYAhAbG+v8HC/i18FjB3lyVBc+iV0GB8pzwYiXGDOwFbUanes9QDV1kZAn+XjgvGTvKwDbQlym5ADj107gv990YG/iVqJ/fZyeM3PT9YrvyHV951MHqaYuEvIkvxC40MwqA1uBFkDLEJcpEWz7we20/qYTU7Z8AztqUjt+NF+/V5eqRVVjF/EnpF0onXOJQEfgB2AN8LVzblUoy5RsyuOBHTvA+W+x8zgPgxYOoUrfGKb88R1557zGh7WWsGhcfape5KuxK8GLnCbkI16dc5OByaEuR7Ixj29g0snujDNneptafNbuWssDo+JYsutn+PN6Gh4azOefXEiFCmGMWSSb0IhXCb9UBiYdSzzGi9NfoeYHtVjy10oKTR/GiJunM2O0ErxIemnuGgk/PwOT5m6Zy4Nft2HT4TWwsgV3F3yPwWNLU6LEGa7jW4BbbfMipyjJS/glG5i0v0henh7fnmHLBsG+ipw7fxKf/e9Wbr01jWuk0eQjklMpyUvWEBXFt3vm0ebjjuw++jcs6EKbqq/w9g+FKFIkHef7a/JR90kRJXkJv60HttJ2fEcm/TEO/q5FxaXj+KLPFVxzTQYuorloRPxSkpew8TgPgxYN4pkpXTly/AQ2qzfPXtOFl2fmJl++DF5Mc9GI+KUkL2Gxaucq/js2joU75sHGG4nZOIgvP7iA2rUDuKhGuIqcRk+mJFMdTTzK/2a8SK2BtVm0aR25vvuU12N+ZNmsABO8iPilmrxkmtmbZ/PfsXH8cWAdLHuQevv68umXJbn44nBHJhK5VJOXwKQxHQHA3iN7eWx8G64bfh1/bjlOvtFTGFB3APOmllCCFwkxJXlJH3/J3HPmdVKdc4xeNZoL349h2JJPYO6z3Lh+OesKfkT7/51LVKPTzxGR4FKSl7SllszPsE7qX/v/4vYRd9L8m+bs2VSeoqMW8sXDffhh2GEqLhmntVVFMomSvKQttWR+sm96rlOrLyV5kui/oD/V+lfn+7Uz4Id3aLZ/Aetn1+aBB8BKn36OiISOHrxK2lIbaJSib/qKnSt5dHwbFm5fABtuptTCgQztU5k770x2LfVnF8lUSvKStjMl5qgojhQvQq8ZPegz9y3ckWIw6UvirryfPguMokX9XE/92UUyTciSvJn1BNoAJxtdu/vmlpfsKJXEPOPPGbSZ0JY/9m2A3x6h0vq3+eTDc2nYMPNDFJHThbom/65z7u0QlyFhsPuf3Tw79Vk+WfoJ0fsvwCZM45m7b6DnCChQINzRichJaq6RDHHO8dXKr3hi8pPsObIH5nSl2q4XGf5VfmJjwx2diKQU6t41Hc1suZl9bGbF/B1gZnFmtsjMFiWoO12WtmnfJm4bcRstx7Zk36ZKRH+0hF4N32DJr0rwIlmVuTOMVEzzZLNpQBk/u3oA84FdgAN6AWWdc63PdL3Y2Fi3aNGis45HQiPRk0j/Bf3pMeMFjh8zkn58nfpRHfh4WDQxMeGOTkTMbLFzzm9VK6DmGufcjekMYCgwMZCyJJOkWEJv6d9LeWzCYyzevpjojbeRZ+qH9O5ekfbtITo63MGKSFpC2bumrHNuu+9tU2BlqMqSIEm2hN4/V9fj5W5X8s78vkQdLQETRtGofDOG/GJUqhTuQEUkvUL54LWPmV2Gt7lmE9A2hGVJMPhGtk6tmEi7S+fxxy9ziVr6GAXm9+H9N4vRqpXGLolkNyFL8s65h0J1bQmNXYWieOrR4nxedid591SC4R9zd2xD+v8GZfw9eRGRLE9z1wjOOT5f9jnVBsTwZZk92M89OGfMKsb2bcjo0b4En44phUUk61GSz+H+2PsHN39xM63GteLwlovwDPyN1pVeZc2K/DRt6jsojSmFRSTr0mCoHCrRk8i7v7zLS7NeIvF4Lvj+A8rsepyhX0ZxY8o+U/5modTcMyLZgmryOdDibYu5YugVPDftOdyGxiS+v5qnrunAyhV+Ejz4nVJYRLIH1eRzkMPHD/PizBd5b8F75D1RGsaMoUpUUz7+wahX7wwnanpgkWxLST6HmLJhCu0mtmPz/s3kW9mWE1PepOcz59CtG+TJk44LaHpgkWxJST7C7Ty8ky4/dGHEihEUOlINRv7MpWWuZtgvULNmuKMTkVBTko9QzjmGLx3O0z8+zYGjh8gzryeJ87rS95W8dOqkKQlEcgol+Qi0Yc8G2k5sy4w/Z1Bk39UkfTGE6y6NYegyqFIl3NGJSGZSko8gJ5JO8Pa8t3ll9iuQmIdcUwbBmjZ89E4UrVvrealITqQkHyEWxC+gzXdtWLFzBedsu4d9I/vxnxvKMWAMlCsX7uhEJFyU5LO5g8cO8sKMF+j/a38KuXJEjR5Hnl13MfpjuOce1d5Fcjol+Wxs4vqJtJ/UnvgD8RRd3559Y17n4RZF6NsXihfHO/3ATvVtF8nJNOI1G/r70N/c98193DHyDg7vKYIbNpeicz9gyvgiDB+eLMFrvhmRHE9JPhvxOA9DFw8lZkAM364eR9HFvdjzxhI6NW3AypVw883JDvY334yI5DgBJXkza2Zmq8zMY2axKfZ1M7MNZrbOzG5O7RqSPut2reP6T68nbmIceffW4kS/5ZT9/QXmzs7D++9DoUIpTtB8MyJC4G3yK4G7gcHJN5pZdaAFUAMoB0wzs4ucc0kBlpczJFtn9bjnBL3n9ObVn18ltytA4ZkfsWtua17oavToAfnypXINzTcjIgS+kPcaADs9gdwFfOWcOwb8aWYbgLrAL4GUlyMkW2f1lyY1aXPLCVYlrKL83vvYOuw96lxchmGLoFatdFxL882I5HihapMvD/yV7H28b9tpzCzOzBaZ2aKESGk3DmQVpYQEDiyaS4fGiVxVZynbd+2nwLiJ7B78FW+9VIb589OZ4EVESEeSN7NpZrbSz9ddZzrNzza/Gc85N8Q5F+uciy1ZsmR64866AuzVMm7PPKp3imbgFVB+6f3seW0NVxS9jRUr4JlnvE3sIiLplWbKcM75W0YiLfHAecneVwC2ncV1sp+zXEVp28FtPPH9E4xdM5ayBS4lz/AhHNhVl8H9jcce87a8iIhkVKhSxwSghZnlNbPKwIXAryEqK2vJYK8Wj/MwaNEgYgbEMGndZCqsfYPtPRdxU/V6rFplxMUpwYvI2Qvoj38zawr0B0oCk8xsqXPuZufcKjP7GlgNJAIdckzPmgz0almdsJq47+KY+9dcKrtG/DVwMEc9VRn5Jdx3nzrEiEjgzJ3Nw8EQiY2NdSRXKCYAAA0ZSURBVIsWLQp3GCF3LPEYb8x5g9d/fp380YUpPLcvWye34oEHjPfegxIlwh2hiGQnZrbYORfrb58e42Wynzf/TNzEONbuWku14w+w9v2+FClWikmT4NZbwx2diEQatfYG0xm6Tu47uo+237Xl2uHXsu/gUUpP/Z61r39B+4dLsWqVEryIhIaSfLCk0nXSOceY1WOoPqA6H/32ETUPPM3fL66kaEITZs+GAQOgSJHwhi4ikUvNNcHip+tkfP4TdJjcgQnrJlA5X22Kjf6ONWvq0O05ePHFM0xJICISJErywXKy6+S8eSRd2YCBm76m+4wenPAkcsn2t1gxtDOXXZqLqQuhdu1wBysiOYWaa4LB44GdO2HGDFYsm8rVD53giSmdqBhVnzxDV7J++DO88Voufv1VCV5EMpdq8oHytcUfXTCXV+8vR+8q2ymS+xwuWf8FK0a05OqrjY9+gIsvDnegIpITKckHKiGBWVvnEBfn4fdz/+IKdz+r3unHn0dLMGAAtGunEasiEj45N/0EMlOkz54je3hsQXeuf8jDUctDzDdDWPjSCK67ogSrVkH79krwIhJeOTMFBThTpHOOUStHETMghuFLP+UanuPvj/awc/tjfPEFTJoEFSuGJnQRkYzImc01ZzlTJMCW/VtoP6k9k36fREzRWIpO+4Gff76MFi3g/fe1yp6IZC05syZ/FuufJnmSeH/++1QfUJ1Zm2Zx/bF3WfvMfA5vvIzx42HkSCV4Ecl6cmZNPoPrny77exltvmvDwm0LqVf8FrZ/NJCZy8+nbVvo3RuKFs2kuEVEMihn1uTh1PqnZ0jwR04coeu0rtQZUodN+zbTaM9IFnSaRO7D5zNzJgwapAQvIllbzqzJp8O0P6bRbmI7Nu7dyI3ntmZl37eYtbk4zz4LPXtCgQLhjlBEJG0B1eTNrJmZrTIzj5nFJtteycyOmNlS39egwEPNHLv/2c0j4x7hps9vwnmiaLh5BtOeGEbJQsVZsAD69FGCF5HsI9Ca/ErgbmCwn30bnXOXBXj90PN4ICEBV7IkI1aOpPMPndl3dB93ntOdn197gfj9+enVC557DvLk8sCO9LXji4hkBQEleefcGgDLrgnP11/+z9Vzefz+Ivxw7l5ql6xHtflDmTD2Eho0gGHDICbm1LHMm+ftkTNzpkY6iUiWF8osVdnMfjOzn8zsmhCWc9YSd2znnaQ51IxLYm6hvTTL9Rbru87ltx8uoV8/+PlnX4IH/33rRUSyuDRr8mY2DSjjZ1cP59z4VE7bDlR0zu02szrAODOr4Zw74Of6cUAcQMVMHCa6ZPsS2nzXhiU3ebh+fSEOzRrD6G2NadwYBg+GSpVSnJBsKuH09q0XEQm3NJO8c+7GjF7UOXcMOOZ7vdjMNgIXAaet0u2cGwIMAe9C3hktK6MOHz/MS7Ne4t3571KqQCla5hrFN2PupWABY/hwaNUqleb2DPatFxHJCkLShdLMSgJ7nHNJZlYFuBD4IxRlZcQPG36g3aR2bNq3ibvPj2P9h28yYmEx7r0X+veHMv7+XknuZN96EZFsItAulE3NLB5oAEwysx98u64FlpvZMuAboJ1zbk9goZ69hMMJPDj2QZp82YQ8UXlpeewnxj82mF1/FWPsWBg9Oh0JXkQkGwq0d823wLd+to8BxgRy7WBwzvHZss946senOHjsII9UepE5b3ZnxNq8PPoovPUWFCsW7ihFREInYke8btyzkbYT2zL9z+nUK3sllVYMZfiL1alcGaZOhRsz/KRBRCT7ibgkfyLpBH1/6UvPn3qSJzoP7c//kPH/a8vCbVE89RS88goULBjuKEVEMkdEJfmFWxfS5rs2LNuxjNuqNCXPtP582K08NWo4xgzZTb0mxdUrRkRylIgYsnno+CG6TOlC/WH1SfgngafKjWXBU2OZOLI8PV/ysKTYjdS7s8xZrQIlIpKdRUSSX75jOf1+7ccDF7fl0tmr6RvXlCpVYMkSeOnxBPLMn62RqiKSI0VEc0398lfSq8QGesdV5sQJ6NsXOnWC6GjAaaSqiORcEZHkZ8yAHh0q06gRDB0KVaok26mRqiKSg0VEkr/hBpgyBRo3TiWHa6SqiORQEZHkzeDmm8MdhYhI1hMRD15FRMQ/JXkRkQimJC8iEsGU5EVEIpiSvIhIBFOSFxGJYEryIiIRLNCVod4ys7VmttzMvjWzc5Lt62ZmG8xsnZmpF7uISBgEWpOfCtR0zl0KrAe6AZhZdaAFUANoAnxoZtEBliUiIhkUUJJ3zv3onEv0vZ0PVPC9vgv4yjl3zDn3J7ABqBtIWSIiknHBnNagNTDK97o83qR/Urxv22nMLA6I8709ZGbrAoihBLArgPNDRXFljOLKGMWVMZEY1/mp7UgzyZvZNKCMn109nHPjfcf0ABKBL0+e5ud45+/6zrkhwJC04kgPM1vknIsNxrWCSXFljOLKGMWVMTktrjSTvHPujEtem9nDwO3ADc65k4k8Hjgv2WEVgG1nG6SIiJydQHvXNAGeB+50zv2TbNcEoIWZ5TWzysCFwK+BlCUiIhkXaJv8B0BeYKp5J3Kf75xr55xbZWZfA6vxNuN0cM4lBVhWegSl2ScEFFfGKK6MUVwZk6PislMtLCIiEmk04lVEJIIpyYuIRLBsleTNrJmZrTIzj5nFptiX5jQKZlbZzBaY2e9mNsrM8oQozlFmttT3tcnMlqZy3CYzW+E7blEoYklRXk8z25ostltTOa6J7z5uMLOumRBXqtNjpDgu5PcrrZ/d15lglG//AjOrFIo4/JR7npnNNLM1vn8DT/o5pqGZ7U/2+30xk2I74+/FvPr57tlyM7s8E2K6ONl9WGpmB8ysc4pjMuV+mdnHZrbTzFYm21bczKb6ctFUMyuWyrkP+4753deTMeOcc9nmC4gBLgZmAbHJtlcHluF9CFwZ2AhE+zn/a6CF7/Ug4PFMiPkd4MVU9m0CSmTi/esJPJPGMdG++1cFyOO7r9VDHFdjIJfvdW+gdzjuV3p+dqA9MMj3ugUwKpN+d2WBy32vC+OdRiRlbA2BiZn1eUrv7wW4Ffge7/iZ+sCCTI4vGvgbOD8c9wu4FrgcWJlsWx+gq+91V3+feaA48IfvezHf62IZLT9b1eSdc2ucc/5GxKY5jYJ5u/80Ar7xbfoU+E8o4/WV2RwYGcpygqwusME594dz7jjwFd77GzIu9ekxMlt6fva78H52wPtZusH3ew4p59x259wS3+uDwBpSGUWeBd0FfOa85gPnmFnZTCz/BmCjc25zJpb5/5xzs4E9KTYn/xyllotuBqY65/Y45/binSusSUbLz1ZJ/gzKA38le+9vGoVzgX3JkkmqUy0E0TXADufc76nsd8CPZrbYN71DZujo+5P541T+REzPvQyl1nhrff6E+n6l52f//2N8n6X9eD9bmcbXRFQbWOBndwMzW2Zm35tZjUwKKa3fS7g/Uy1IvaIVjvsFUNo5tx28/4EDpfwcE5T7Fsy5a4LC0jGNgr/T/GxL2Tc03VMtpEc647yfM9fir3LObTOzUnjHGqz1/a9/1s4UFzAQ6IX35+6FtympdcpL+Dk34H626blfdvr0GCkF/X6lDNPPtpB+jjLKzAoBY4DOzrkDKXYvwdskccj3vGUc3oGIoZbW7yVs98z33O1OfDPkphCu+5VeQblvWS7JuzSmUUhFeqZR2IX3z8RcvhpYQFMtpBWnmeUC7gbqnOEa23zfd5rZt3ibCwJKWum9f2Y2FJjoZ1dIpqRIx/3yNz1GymsE/X6lkJ6f/eQx8b7fcVFO/1M8JMwsN94E/6VzbmzK/cmTvnNuspl9aGYlnHMhnYwrHb+XcE5zcguwxDm3I+WOcN0vnx1mVtY5t93XdLXTzzHxeJ8bnFQB7/PIDImU5po0p1HwJY6ZwL2+TQ8Dqf1lEAw3Amudc/H+dppZQTMrfPI13oePK/0dGywp2kGbplLeQuBC8/ZEyoP3T90JIY4rtekxkh+TGfcrPT/7BLyfHfB+lmak9p9SMPna/YcBa5xzfVM5pszJ5wNmVhfvv+/dIY4rPb+XCUArXy+b+sD+k00VmSDVv6bDcb+SSf45Si0X/QA0NrNivqbVxr5tGRPqJ8vB/MKbmOKBY8AO4Idk+3rg7RmxDrgl2fbJQDnf6yp4k/8GYDSQN4SxDgfapdhWDpicLJZlvq9VeJstQn3/PgdWAMt9H7KyKePyvb8Vb++NjZkU1wa8bY9LfV+DUsaVWffL388OvIL3PyCAfL7PzgbfZ6lKqO+Pr9yr8f6pvjzZfboVaHfycwZ09N2bZXgfYF+ZCXH5/b2kiMuAAb57uoJkPeNCHFsBvEm7aLJtmX6/8P4nsx044ctfj+J9jjMd+N33vbjv2Fjgo2TntvZ91jYA/z2b8jWtgYhIBIuU5hoREfFDSV5EJIIpyYuIRDAleRGRCKYkLyISwZTkRUQimJK8iEgE+z9Ig4J+qLqGCAAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -677,49 +573,41 @@ "name": "stdout", "output_type": "stream", "text": [ - "weight: 1.9990227 bias: 2.9115517\n" + "Parameter (name=fc.weight, value=[[2.0405223]]) \n", + "Parameter (name=fc.bias, value=[2.9574146])\n" ] } ], "source": [ - "from IPython import display\n", "\n", - "step_size = 200\n", - "batch_size = 16\n", + "from mindspore.train.callback import LossMonitor\n", "\n", - "for i in range(step_size):\n", - " data_x,data_y = get_data(batch_size)\n", - " grads = train_network(data_x,data_y) \n", - " optim(grads)\n", - " plot_model_and_datasets(net.weight.data, \n", - " net.bias.data, data_x, data_y)\n", - " display.clear_output(wait=True)\n", + "epoch = 1\n", + "imageshow_cb = ImageShowCallback(net, eval_data)\n", + "model.train(epoch, ds_train, callbacks=[imageshow_cb], dataset_sink_mode=False)\n", "\n", - "output = net(eval_x)\n", - "loss_output = criterion(output, eval_label)\n", - "print(\"loss_value:\", loss_output.asnumpy())\n", - "plot_model_and_datasets(net.weight.data, net.bias.data, data_x,data_y)\n", - "print(\"weight:\", net.weight.set_data([0][0]), \"bias:\", net.bias.set_data([0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "可以看到最终得到的线性拟合的权重值非常接近目标函数权重weight=2、bias=3。" + "plot_model_and_datasets(net,eval_data)\n", + "print(net.trainable_params()[0], \"\\n%s\" % net.trainable_params()[1])" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2020-09-14T04:00:18.787349Z", + "start_time": "2020-09-14T04:00:18.784236Z" + } + }, "source": [ - "## 总结" + "训练完成后打印出最终模型的权重参数,其中weight接近于2.0,bias接近于3.0,模型训练完成,符合预期。" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "## 总结\n", + "\n", "本次体验我们了解了线性拟合的算法原理,并在MindSpore框架下实现了相应的算法定义,了解了线性拟合这类的线性回归模型在MindSpore中的训练过程,并最终拟合出了一条接近目标函数的模型函数。另外有兴趣的可以调整数据集的生成区间从(-10,10)扩展到(-100,100),看看权重值是否更接近目标函数;调整学习率大小,看看拟合的效率是否有变化;当然也可以探索如何使用MindSpore拟合$f(x)=ax^2+bx+c$这类的二次函数或者更高次的函数。" ] } @@ -745,4 +633,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/notebook/loading_dataset.ipynb b/tutorials/notebook/loading_dataset.ipynb index d1865e5fd7e5ef2a2db65bae5fc165cfc6feec09..61f899959097a3471b7734ac711f103657c3bf9d 100644 --- a/tutorials/notebook/loading_dataset.ipynb +++ b/tutorials/notebook/loading_dataset.ipynb @@ -8,7 +8,7 @@ "\n", "## 概述\n", "\n", - "MindSpore可以帮助你加载常见的数据集、特定数据格式的数据集或自定义的数据集。加载数据集时,需先导入所需要依赖的库`mindspore.dataset`。\n", + "MindSpore可以帮助你加载常用的数据集、特定数据格式的数据集或自定义的数据集。加载数据集时,需先导入所需要依赖的库`mindspore.dataset`。\n", "\n", "接下来,以加载数常用数据集(CIFAR-10数据集)、特定格式数据集以及自定义数据集为例来体验MindSpore加载数据集操作。" ] @@ -90,9 +90,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 加载常见的数据集\n", + "## 加载常用的数据集\n", "\n", - "MindSpore可以加载常见的标准数据集。支持的数据集如下表:\n", + "MindSpore可以加载常用的标准数据集。支持的数据集如下表:\n", "\n", "| 数据集: | 简要说明 |\n", "| :---------: | :-------------:|\n", @@ -103,7 +103,7 @@ "| PASCAL-VOC | 数据内容多样,可用于训练计算机视觉模型(分类、定位、检测、分割、动作识别等)。|\n", "| CelebA | CelebA人脸数据集包含上万个名人身份的人脸图片,每张图片有40个特征标记,常用于人脸相关的训练任务。 |\n", "\n", - "加载常见数据集的详细步骤如下,以创建`CIFAR-10`对象为例,用于加载支持的数据集。\n", + "加载常用数据集的详细步骤如下,以创建`CIFAR-10`对象为例,用于加载支持的数据集。\n", "\n", "1. 使用二进制格式的数据集(CIFAR-10 binary version),配置数据集目录,定义需要加载的数据集实例。" ] @@ -283,7 +283,7 @@ "count = 0\n", "for data in cifar10_dataset.create_dict_iterator():\n", "# In CIFAR-10 dataset, each dictionary of data has keys \"image\" and \"label\".\n", - " image = data[\"image\"]\n", + " image = data[\"image\"].asnumpy()\n", " print(f\"The data of image {count+1} is below:\")\n", " print(image)\n", " plt.figure(count)\n", @@ -308,7 +308,7 @@ "\n", "MindSpore天然支持读取MindSpore数据格式——`MindRecord`存储的数据集,在性能和特性上有更好的支持。 \n", "\n", - "> 阅读[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/converting_datasets.html),了解如何将数据集转换为MindSpore数据格式。\n", + "> 阅读[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_conversion.html),了解如何将数据集转换为MindSpore数据格式。\n", "\n", "可以通过`MindDataset`对象对数据集进行读取。详细方法如下所示:" ] @@ -407,7 +407,7 @@ "## 加载自定义数据集\n", "\n", "现实场景中,数据集的种类多种多样,对于自定义数据集或者目前不支持直接加载的数据集,有两种方法可以处理。\n", - "一种方法是将数据集转成MindRecord格式(请参考[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/converting_datasets.html)章节),另一种方法是通过`GeneratorDataset`对象加载,以下将展示如何使用`GeneratorDataset`。\n", + "一种方法是将数据集转成MindRecord格式(请参考[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_conversion.html)章节),另一种方法是通过`GeneratorDataset`对象加载,以下将展示如何使用`GeneratorDataset`。\n", "\n", "1. 定义一个可迭代的对象,用于生成数据集。以下展示了两种示例,一种是含有`yield`返回值的自定义函数,另一种是含有`__getitem__`的自定义类。两种示例都将产生一个含有从0到9数字的数据集。\n", " \n", @@ -491,27 +491,27 @@ "output_type": "stream", "text": [ "dataset1:\n", - "[array([0], dtype=int32)]\n", - "[array([1], dtype=int32)]\n", - "[array([2], dtype=int32)]\n", - "[array([3], dtype=int32)]\n", - "[array([4], dtype=int32)]\n", - "[array([5], dtype=int32)]\n", - "[array([6], dtype=int32)]\n", - "[array([7], dtype=int32)]\n", - "[array([8], dtype=int32)]\n", - "[array([9], dtype=int32)]\n", + "[Tensor(shape=[1], dtype=Int32, value= [0])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [1])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [2])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [3])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [4])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [5])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [6])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [7])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [8])]\n", + "[Tensor(shape=[1], dtype=Int32, value= [9])]\n", "dataset2:\n", - "[array([0], dtype=int64)]\n", - "[array([1], dtype=int64)]\n", - "[array([2], dtype=int64)]\n", - "[array([3], dtype=int64)]\n", - "[array([4], dtype=int64)]\n", - "[array([5], dtype=int64)]\n", - "[array([6], dtype=int64)]\n", - "[array([7], dtype=int64)]\n", - "[array([8], dtype=int64)]\n", - "[array([9], dtype=int64)]\n" + "[Tensor(shape=[1], dtype=Int64, value= [0])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [1])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [2])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [3])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [4])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [5])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [6])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [7])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [8])]\n", + "[Tensor(shape=[1], dtype=Int64, value= [9])]\n" ] } ], @@ -617,4 +617,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb b/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb index 39bffb88e5b016f15bd05e5057c4450eaf9d103f..2e9bf71b08b1113b9a8fcf4cbcf59fafeff7f08d 100644 --- a/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb +++ b/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb @@ -142,9 +142,9 @@ "outputs": [], "source": [ "import mindspore.dataset as ds\n", - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "from mindspore.dataset.transforms.vision import Inter\n", + "from mindspore.dataset.vision import Inter\n", "from mindspore.common import dtype as mstype\n", "\n", "\n", @@ -177,11 +177,11 @@ " type_cast_op = C.TypeCast(mstype.int32)\n", "\n", " # using map method to apply operations to a dataset\n", - " mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=resize_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", " \n", " # process the generated dataset\n", " buffer_size = 10000\n", @@ -368,11 +368,11 @@ "2. 下面代码为上面的 `create_dataset` 函数中作数据预处理与数据增强的相关操作。可以从数据图中清晰地看到数据处理的流程。通过查看数据图,可以帮助分析是否存在不恰当的数据处理流程。\n", "\n", "```\n", - "mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - "mnist_ds = mnist_ds.map(input_columns=\"image\", operations=resize_op, num_parallel_workers=num_parallel_workers)\n", - "mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=num_parallel_workers)\n", - "mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)\n", - "mnist_ds = mnist_ds.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)\n", + "mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + "mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + "mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + "mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + "mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", "\n", "mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script\n", "mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n", @@ -418,4 +418,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb index 082a64b00e3f4f48d27703742e137aa6f11e6d64..689d0a1bcd80ab0a4fcf3c14e1dbc310b35aa840 100644 --- a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb @@ -157,7 +157,7 @@ "source": [ "import mindspore.dataset as ds\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "from mindspore.common import dtype as mstype\n", "\n", "\n", @@ -177,14 +177,14 @@ " random_horizontal_op = CV.RandomHorizontalFlip()\n", " channel_swap_op = CV.HWC2CHW()\n", " typecast_op = C.TypeCast(mstype.int32)\n", - " cifar_ds = cifar_ds.map(input_columns=\"label\", operations=typecast_op)\n", + " cifar_ds = cifar_ds.map(operations=typecast_op, input_columns=\"label\")\n", " if status == \"train\":\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=random_crop_op)\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=random_horizontal_op)\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=resize_op)\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=rescale_op)\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=normalize_op)\n", - " cifar_ds = cifar_ds.map(input_columns=\"image\", operations=channel_swap_op)\n", + " cifar_ds = cifar_ds.map(operations=random_crop_op, input_columns=\"image\")\n", + " cifar_ds = cifar_ds.map(operations=random_horizontal_op, input_columns=\"image\")\n", + " cifar_ds = cifar_ds.map(operations=resize_op, input_columns=\"image\")\n", + " cifar_ds = cifar_ds.map(operations=rescale_op, input_columns=\"image\")\n", + " cifar_ds = cifar_ds.map(operations=normalize_op, input_columns=\"image\")\n", + " cifar_ds = cifar_ds.map(operations=channel_swap_op, input_columns=\"image\")\n", "\n", " cifar_ds = cifar_ds.shuffle(buffer_size=1000)\n", " cifar_ds = cifar_ds.batch(batch_size, drop_remainder=True)\n", @@ -240,8 +240,8 @@ "ds_iterator = ds_train.create_dict_iterator()\n", "ds_iterator.get_next()\n", "batch_1 = ds_iterator.get_next()\n", - "batch_image = batch_1[\"image\"]\n", - "batch_label = batch_1[\"label\"]\n", + "batch_image = batch_1[\"image\"].asnumpy()\n", + "batch_label = batch_1[\"label\"].asnumpy()\n", "%matplotlib inline\n", "plt.figure(dpi=144)\n", "for i,image in enumerate(batch_image):\n", @@ -305,10 +305,10 @@ "\n", "当前支持的Summary算子:\n", "\n", - "- [ScalarSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=scalarsummary#mindspore.ops.operations.ScalarSummary): 记录标量数据\n", - "- [TensorSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=tensorsummary#mindspore.ops.operations.TensorSummary): 记录张量数据\n", - "- [ImageSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=imagesummary#mindspore.ops.operations.ImageSummary): 记录图片数据\n", - "- [HistogramSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=histogramsummar#mindspore.ops.operations.HistogramSummary): 将张量数据转为直方图数据记录" + "- [ScalarSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=scalarsummary#mindspore.ops.ScalarSummary): 记录标量数据\n", + "- [TensorSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=tensorsummary#mindspore.ops.TensorSummary): 记录张量数据\n", + "- [ImageSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=imagesummary#mindspore.ops.ImageSummary): 记录图片数据\n", + "- [HistogramSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=histogramsummar#mindspore.ops.HistogramSummary): 将张量数据转为直方图数据记录" ] }, { diff --git a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb index b76c1500a9b8676a02f7fda84fdac21a8fae9fda..d8a27986e8305b21408dc974927cadc8a791be15 100644 --- a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb @@ -165,9 +165,9 @@ "metadata": {}, "outputs": [], "source": [ - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "from mindspore.dataset.transforms.vision import Inter\n", + "from mindspore.dataset.vision import Inter\n", "from mindspore.common import dtype as mstype\n", "import mindspore.dataset as ds\n", "\n", @@ -200,11 +200,11 @@ " type_cast_op = C.TypeCast(mstype.int32)\n", "\n", " # using map method to apply operations to a dataset\n", - " mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=resize_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", " \n", " # process the generated dataset\n", " buffer_size = 10000\n", @@ -493,4 +493,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/notebook/mixed_precision.ipynb b/tutorials/notebook/mixed_precision.ipynb index 149d1ebf8a7d53e9d159029703272b483c8a05b4..69aa651439c59bdde6ce5b0babaf7ccfe6ef23c3 100644 --- a/tutorials/notebook/mixed_precision.ipynb +++ b/tutorials/notebook/mixed_precision.ipynb @@ -169,7 +169,7 @@ "datas = dict1.get_next()\n", "image = datas[\"image\"]\n", "print(\"the tensor of image is:\", image.shape)\n", - "plt.imshow(np.array(image))\n", + "plt.imshow(np.array(image.asnumpy()))\n", "plt.show()" ] }, @@ -196,7 +196,7 @@ "import os\n", "import mindspore.common.dtype as mstype\n", "import mindspore.dataset.engine as de\n", - "import mindspore.dataset.transforms.vision.c_transforms as C\n", + "import mindspore.dataset.vision.c_transforms as C\n", "import mindspore.dataset.transforms.c_transforms as C2\n", "\n", "def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target=\"GPU\"):\n", @@ -220,8 +220,8 @@ "\n", " type_cast_op = C2.TypeCast(mstype.int32)\n", "\n", - " ds = ds.map(input_columns=\"label\", num_parallel_workers=8, operations=type_cast_op)\n", - " ds = ds.map(input_columns=\"image\", num_parallel_workers=8, operations=trans)\n", + " ds = ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=8)\n", + " ds = ds.map(operations=trans, input_columns=\"image\", num_parallel_workers=8)\n", "\n", " # apply batch operations\n", " ds = ds.batch(batch_size, drop_remainder=True)\n", @@ -282,7 +282,7 @@ "print(\"the cifar dataset size is:\", ds.get_dataset_size())\n", "dict1 = ds.create_dict_iterator()\n", "datas = dict1.get_next()\n", - "image = datas[\"image\"]\n", + "image = datas[\"image\"].asnumpy()\n", "single_pic = np.transpose(image[0], (1,2,0))\n", "print(\"the tensor of image is:\", image.shape)\n", "plt.imshow(np.array(single_pic))\n", @@ -991,4 +991,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/notebook/model_security.ipynb b/tutorials/notebook/model_security.ipynb index d958155df781b04021d7fa920f1c03b65b69f6a9..46965199e2f60df5452003230e85c9fe2dcae96f 100644 --- a/tutorials/notebook/model_security.ipynb +++ b/tutorials/notebook/model_security.ipynb @@ -184,9 +184,9 @@ "outputs": [], "source": [ "import mindspore.dataset as ds\n", - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "from mindspore.dataset.transforms.vision import Inter\n", + "from mindspore.dataset.vision import Inter\n", "from mindspore.common import dtype as mstype\n", "\n", "\n", @@ -214,16 +214,16 @@ " # apply map operations on images\n", " if not sparse:\n", " one_hot_enco = C.OneHot(10)\n", - " ds1 = ds1.map(input_columns=\"label\", operations=one_hot_enco,\n", + " ds1 = ds1.map(operations=one_hot_enco, input_columns=\"label\",\n", " num_parallel_workers=num_parallel_workers)\n", " type_cast_op = C.TypeCast(mstype.float32)\n", - " ds1 = ds1.map(input_columns=\"label\", operations=type_cast_op,\n", + " ds1 = ds1.map(operations=type_cast_op, input_columns=\"label\",\n", " num_parallel_workers=num_parallel_workers)\n", - " ds1 = ds1.map(input_columns=\"image\", operations=resize_op,\n", + " ds1 = ds1.map(operations=resize_op, input_columns=\"image\",\n", " num_parallel_workers=num_parallel_workers)\n", - " ds1 = ds1.map(input_columns=\"image\", operations=rescale_op,\n", + " ds1 = ds1.map(operations=rescale_op,input_columns=\"image\",\n", " num_parallel_workers=num_parallel_workers)\n", - " ds1 = ds1.map(input_columns=\"image\", operations=hwc2chw_op,\n", + " ds1 = ds1.map(operations=hwc2chw_op, input_columns=\"image\",\n", " num_parallel_workers=num_parallel_workers)\n", "\n", " # apply DatasetOps\n", @@ -281,8 +281,8 @@ "ds_iterator = ds_train.create_dict_iterator()\n", "ds_iterator.get_next()\n", "batch_1 = ds_iterator.get_next()\n", - "batch_image = batch_1[\"image\"]\n", - "batch_label = batch_1[\"label\"]\n", + "batch_image = batch_1[\"image\"].asnumpy()\n", + "batch_label = batch_1[\"label\"].asnumpy()\n", "%matplotlib inline\n", "plt.figure(dpi=144)\n", "for i,image in enumerate(batch_image):\n", @@ -506,8 +506,8 @@ "i = 0\n", "for data in ds_test.create_tuple_iterator():\n", " i += 1\n", - " images = data[0].astype(np.float32)\n", - " labels = data[1]\n", + " images = data[0].asnumpy().astype(np.float32)\n", + " labels = data[1].asnumpy()\n", " test_images.append(images)\n", " test_labels.append(labels)\n", " pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),\n", @@ -579,7 +579,7 @@ "source": [ "### 攻击模型\n", "\n", - "调用MindArmour提供的FGSM接口(`FastGradientSignMethod`),使用被攻击前抽取的96张数据图像`test_images`作为被攻击数据集,保存被攻击后数据集图像到当前notebook目录下的`ada_data`文件中。其中,参数`eps`为攻击对数据范围产生的单步对抗性摄动的比例,该值越大,则攻击程度越大。关于`FastGradientSignMethod`的详细使用说明,可参考[官方API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindarmour/mindarmour.attacks.html?highlight=fastgradientsignmethod#mindarmour.attacks.FastGradientSignMethod)。" + "调用MindArmour提供的FGSM接口(`FastGradientSignMethod`),使用被攻击前抽取的96张数据图像`test_images`作为被攻击数据集,保存被攻击后数据集图像到当前notebook目录下的`ada_data`文件中。其中,参数`eps`为攻击对数据范围产生的单步对抗性摄动的比例,该值越大,则攻击程度越大。关于`FastGradientSignMethod`的详细使用说明,可参考[官方API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindarmour/mindarmour.adv_robustness.attacks.html?#mindarmour.adv_robustness.attacks.FastGradientSignMethod)。" ] }, { @@ -589,7 +589,7 @@ "outputs": [], "source": [ "import time\n", - "from mindarmour.attacks.gradient_method import FastGradientSignMethod\n", + "from mindarmour.adv_robustness.attacks import FastGradientSignMethod\n", "\n", "\n", "# attacking\n", @@ -635,7 +635,7 @@ ], "source": [ "from scipy.special import softmax\n", - "from mindarmour.evaluations.attack_evaluation import AttackEvaluate\n", + "from mindarmour.adv_robustness.evaluations import AttackEvaluate\n", "\n", "\n", "pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()\n", @@ -749,7 +749,7 @@ ], "source": [ "from mindspore.nn import SoftmaxCrossEntropyWithLogits\n", - "from mindarmour.defenses import NaturalAdversarialDefense\n", + "from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense\n", "\n", "\n", "loss = SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean')\n", @@ -843,4 +843,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/nlp_application.ipynb b/tutorials/notebook/nlp_application.ipynb index 02cf130217ea5634fb6d87cc6059dc4c71809427..d2df6a718d19955f19505f1d7c82a8a91ce638fd 100644 --- a/tutorials/notebook/nlp_application.ipynb +++ b/tutorials/notebook/nlp_application.ipynb @@ -652,8 +652,8 @@ ], "source": [ "iterator = ds_train.create_dict_iterator().get_next()\n", - "first_batch_label = iterator[\"label\"]\n", - "first_batch_first_feature = iterator[\"feature\"][0]\n", + "first_batch_label = iterator[\"label\"].asnumpy()\n", + "first_batch_first_feature = iterator[\"feature\"].asnumpy()[0]\n", "print(f\"The first batch contains label below:\\n{first_batch_label}\\n\")\n", "print(f\"The feature of the first item in the first batch is below vector:\\n{first_batch_first_feature}\")" ] @@ -673,17 +673,43 @@ "metadata": {}, "outputs": [], "source": [ + "import math\n", + "\n", "import numpy as np\n", - "from mindspore import Tensor, nn, context\n", - "from mindspore.ops import operations as P\n", - "from mindspore.train.serialization import load_param_into_net, load_checkpoint" + "\n", + "from mindspore import Tensor, nn, context, Parameter, ParameterTuple\n", + "from mindspore.common.initializer import initializer\n", + "from mindspore.ops import operations as P" ] }, + { + "cell_type": "markdown", + "source": [ + "2. 定义需要单层LSTM小算子堆叠的设备类型。" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "STACK_LSTM_DEVICE = [\"CPU\"]" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "2. 定义`lstm_default_state`函数来初始化网络参数及网络状态。" + "3. 定义`lstm_default_state`函数来初始化网络参数及网络状态。" ] }, { @@ -695,38 +721,144 @@ "# Initialize short-term memory (h) and long-term memory (c) to 0\n", "def lstm_default_state(batch_size, hidden_size, num_layers, bidirectional):\n", " \"\"\"init default input.\"\"\"\n", - " num_directions = 1\n", - " if bidirectional:\n", - " num_directions = 2\n", - "\n", - " if context.get_context(\"device_target\") == \"CPU\":\n", - " h_list = []\n", - " c_list = []\n", - " i = 0\n", - " while i < num_layers:\n", - " hi = Tensor(np.zeros((num_directions, batch_size, hidden_size)).astype(np.float32))\n", - " h_list.append(hi)\n", - " ci = Tensor(np.zeros((num_directions, batch_size, hidden_size)).astype(np.float32))\n", - " c_list.append(ci)\n", - " i = i + 1\n", - " h = tuple(h_list)\n", - " c = tuple(c_list)\n", - " return h, c\n", - "\n", - " h = Tensor(\n", - " np.zeros((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32))\n", - " c = Tensor(\n", - " np.zeros((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32))\n", + " num_directions = 2 if bidirectional else 1\n", + " h = Tensor(np.zeros((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32))\n", + " c = Tensor(np.zeros((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32))\n", " return h, c" ] }, + { + "cell_type": "markdown", + "source": [ + "4. 定义`stack_lstm_default_state`函数来初始化小算子堆叠需要的初始化网络参数及网络状态。" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "def stack_lstm_default_state(batch_size, hidden_size, num_layers, bidirectional):\n", + " \"\"\"init default input.\"\"\"\n", + " num_directions = 2 if bidirectional else 1\n", + "\n", + " h_list = c_list = []\n", + " for _ in range(num_layers):\n", + " h_list.append(Tensor(np.zeros((num_directions, batch_size, hidden_size)).astype(np.float32)))\n", + " c_list.append(Tensor(np.zeros((num_directions, batch_size, hidden_size)).astype(np.float32)))\n", + " h, c = tuple(h_list), tuple(c_list)\n", + " return h, c\n" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "3. 使用`Cell`方法,定义网络结构(`SentimentNet`网络)。" + "5. 针对CPU场景,自定义单层LSTM小算子堆叠,来实现多层LSTM大算子功能。、" ] }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "class StackLSTM(nn.Cell):\n", + " \"\"\"\n", + " Stack multi-layers LSTM together.\n", + " \"\"\"\n", + "\n", + " def __init__(self,\n", + " input_size,\n", + " hidden_size,\n", + " num_layers=1,\n", + " has_bias=True,\n", + " batch_first=False,\n", + " dropout=0.0,\n", + " bidirectional=False):\n", + " super(StackLSTM, self).__init__()\n", + " self.num_layers = num_layers\n", + " self.batch_first = batch_first\n", + " self.transpose = P.Transpose()\n", + "\n", + " # direction number\n", + " num_directions = 2 if bidirectional else 1\n", + "\n", + " # input_size list\n", + " input_size_list = [input_size]\n", + " for i in range(num_layers - 1):\n", + " input_size_list.append(hidden_size * num_directions)\n", + "\n", + " # layers\n", + " layers = []\n", + " for i in range(num_layers):\n", + " layers.append(nn.LSTMCell(input_size=input_size_list[i],\n", + " hidden_size=hidden_size,\n", + " has_bias=has_bias,\n", + " batch_first=batch_first,\n", + " bidirectional=bidirectional,\n", + " dropout=dropout))\n", + "\n", + " # weights\n", + " weights = []\n", + " for i in range(num_layers):\n", + " # weight size\n", + " weight_size = (input_size_list[i] + hidden_size) * num_directions * hidden_size * 4\n", + " if has_bias:\n", + " bias_size = num_directions * hidden_size * 4\n", + " weight_size = weight_size + bias_size\n", + "\n", + " # numpy weight\n", + " stdv = 1 / math.sqrt(hidden_size)\n", + " w_np = np.random.uniform(-stdv, stdv, (weight_size, 1, 1)).astype(np.float32)\n", + "\n", + " # lstm weight\n", + " weights.append(Parameter(initializer(Tensor(w_np), w_np.shape), name=\"weight\" + str(i)))\n", + "\n", + " #\n", + " self.lstms = layers\n", + " self.weight = ParameterTuple(tuple(weights))\n", + "\n", + " def construct(self, x, hx):\n", + " \"\"\"construct\"\"\"\n", + " if self.batch_first:\n", + " x = self.transpose(x, (1, 0, 2))\n", + " # stack lstm\n", + " h, c = hx\n", + " hn = cn = None\n", + " for i in range(self.num_layers):\n", + " x, hn, cn, _, _ = self.lstms[i](x, h[i], c[i], self.weight[i])\n", + " if self.batch_first:\n", + " x = self.transpose(x, (1, 0, 2))\n", + " return x, (hn, cn)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "6. 使用`Cell`方法,定义网络结构(`SentimentNet`网络)。" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, { "cell_type": "code", "execution_count": 11, @@ -753,14 +885,25 @@ " self.embedding.embedding_table.requires_grad = False\n", " self.trans = P.Transpose()\n", " self.perm = (1, 0, 2)\n", - " self.encoder = nn.LSTM(input_size=embed_size,\n", - " hidden_size=num_hiddens,\n", - " num_layers=num_layers,\n", - " has_bias=True,\n", - " bidirectional=bidirectional,\n", - " dropout=0.0)\n", "\n", - " self.h, self.c = lstm_default_state(batch_size, num_hiddens, num_layers, bidirectional)\n", + " if context.get_context(\"device_target\") in STACK_LSTM_DEVICE:\n", + " # stack lstm by user\n", + " self.encoder = StackLSTM(input_size=embed_size,\n", + " hidden_size=num_hiddens,\n", + " num_layers=num_layers,\n", + " has_bias=True,\n", + " bidirectional=bidirectional,\n", + " dropout=0.0)\n", + " self.h, self.c = stack_lstm_default_state(batch_size, num_hiddens, num_layers, bidirectional)\n", + " else:\n", + " # standard lstm\n", + " self.encoder = nn.LSTM(input_size=embed_size,\n", + " hidden_size=num_hiddens,\n", + " num_layers=num_layers,\n", + " has_bias=True,\n", + " bidirectional=bidirectional,\n", + " dropout=0.0)\n", + " self.h, self.c = lstm_default_state(batch_size, num_hiddens, num_layers, bidirectional)\n", "\n", " self.concat = P.Concat(1)\n", " if bidirectional:\n", @@ -783,7 +926,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "4. 实例化`SentimentNet`,创建网络,此步骤用时约1分钟。" + "7. 实例化`SentimentNet`,创建网络,此步骤用时约1分钟。" ] }, { @@ -976,4 +1119,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/optimize_the_performance_of_data_preparation/optimize_the_performance_of_data_preparation.ipynb b/tutorials/notebook/optimize_the_performance_of_data_preparation/optimize_the_performance_of_data_preparation.ipynb index dd9aa3ad20d655cf532ca9f3742c4434112d5e04..659c0fc204e3dc09c3cf5302e254ca8ed5c12965 100644 --- a/tutorials/notebook/optimize_the_performance_of_data_preparation/optimize_the_performance_of_data_preparation.ipynb +++ b/tutorials/notebook/optimize_the_performance_of_data_preparation/optimize_the_performance_of_data_preparation.ipynb @@ -148,7 +148,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "MindSpore为用户提供了多种数据加载方式,其中包括常用数据集加载、用户自定义数据集加载、MindSpore数据格式加载,详情内容请参考[加载数据集](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/loading_the_datasets.html)。对于数据集加载,底层实现方式的不同,会导致数据集加载的性能存在差异,如下所示:" + "MindSpore为用户提供了多种数据加载方式,其中包括常用数据集加载、用户自定义数据集加载、MindSpore数据格式加载,详情内容请参考[加载数据集](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html)。对于数据集加载,底层实现方式的不同,会导致数据集加载的性能存在差异,如下所示:" ] }, { @@ -181,7 +181,7 @@ "source": [ "数据加载性能优化建议如下:\n", "- 已经支持的数据集格式优选内置加载算子,具体内容请参考[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](#多线程优化方案)。\n", - "- 不支持的数据集格式,优选转换为MindSpore数据格式后再使用`MindDataset`类进行加载,具体内容请参考[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/converting_datasets.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](#多线程优化方案)。\n", + "- 不支持的数据集格式,优选转换为MindSpore数据格式后再使用`MindDataset`类进行加载,具体内容请参考[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_conversion.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](#多线程优化方案)。\n", "- 不支持的数据集格式,算法快速验证场景,优选用户自定义`GeneratorDataset`类实现,如果性能仍无法满足需求,则可采取多进程并发方案,请参考本文[多进程优化方案](#多进程优化方案)。" ] }, @@ -210,7 +210,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'image': array([[[235, 235, 235],\n", + "{'image': Tensor(shape=[32, 32, 3], dtype=UInt8, value=\n", + " [[[235, 235, 235],\n", " [230, 230, 230],\n", " [234, 234, 234],\n", " ...,\n", @@ -258,7 +259,7 @@ " ...,\n", " [120, 120, 119],\n", " [146, 146, 146],\n", - " [177, 174, 190]]], dtype=uint8), 'label': array(9, dtype=uint32)}\n" + " [177, 174, 190]]]), 'label': Tensor(shape=[], dtype=UInt32, value= 9)}\n" ] } ], @@ -287,7 +288,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': array([255, 216, 255, ..., 63, 255, 217], dtype=uint8), 'id': array(30474, dtype=int64), 'label': array(2, dtype=int64)}\n" + "{'data': Tensor(shape=[1431], dtype=UInt8, value= [255, 216, 255, ..., 63, 255, 217]), 'id': Tensor(shape=[], dtype=Int64, value= 30474), 'label': Tensor(shape=[], dtype=Int64, value= 2)}\n" ] } ], @@ -323,7 +324,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': array([0], dtype=int64)}\n" + "{'data': Tensor(shape=[1], dtype=Int64, value= [0])}\n" ] } ], @@ -349,7 +350,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "shuffle操作主要是对有序的数据集或者进行过repeat的数据集进行混洗,MindSpore专门为用户提供了`shuffle`函数,其中设定的`buffer_size`参数越大,混洗程度越大,但时间、计算资源消耗也会大。该接口支持用户在整个pipeline的任何时候都可以对数据进行混洗,具体内容请参考[shuffle处理](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_processing_and_augmentation.html#shuffle)。但是因为底层的实现方式不同,该方式的性能不如直接在[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)中设置`shuffle`参数直接对数据进行混洗。" + "shuffle操作主要是对有序的数据集或者进行过repeat的数据集进行混洗,MindSpore专门为用户提供了`shuffle`函数,其中设定的`buffer_size`参数越大,混洗程度越大,但时间、计算资源消耗也会大。该接口支持用户在整个pipeline的任何时候都可以对数据进行混洗,具体内容请参考[shuffle处理](https://www.mindspore.cn/api/zh-CN/master/programming_guide/augmentation.html)。但是因为底层的实现方式不同,该方式的性能不如直接在[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)中设置`shuffle`参数直接对数据进行混洗。" ] }, { @@ -400,7 +401,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'image': array([[[254, 254, 254],\n", + "{'image': Tensor(shape=[32, 32, 3], dtype=UInt8, value=\n", + " [[[254, 254, 254],\n", " [255, 255, 254],\n", " [255, 255, 254],\n", " ...,\n", @@ -448,7 +450,7 @@ " ...,\n", " [ 64, 61, 63],\n", " [ 63, 58, 60],\n", - " [ 61, 56, 58]]], dtype=uint8), 'label': array(9, dtype=uint32)}\n" + " [ 61, 56, 58]]]), 'label': Tensor(shape=[], dtype=UInt32, value= 9)}\n" ] } ], @@ -526,7 +528,7 @@ "- 使用内置Python算子(`py_transforms`模块)进行数据增强。\n", "- 用户可根据自己的需求,自定义Python函数进行数据增强。\n", "\n", - "具体的内容请参考[数据增强](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_processing_and_augmentation.html#id3)。因为底层的实现方式不同,所以性能还是有一定的差异,如下所示:" + "具体的内容请参考[数据增强](https://www.mindspore.cn/api/zh-CN/master/programming_guide/augmentation.html#id3)。因为底层的实现方式不同,所以性能还是有一定的差异,如下所示:" ] }, { @@ -600,7 +602,7 @@ ], "source": [ "import mindspore.dataset.transforms.c_transforms as c_transforms\n", - "import mindspore.dataset.transforms.vision.c_transforms as C\n", + "import mindspore.dataset.vision.c_transforms as C\n", "import matplotlib.pyplot as plt\n", "cifar10_path = \"./dataset/Cifar10Data/cifar-10-batches-bin/\"\n", "\n", @@ -608,10 +610,10 @@ "cifar10_dataset = ds.Cifar10Dataset(cifar10_path,num_parallel_workers=4)\n", "transforms = C.RandomResizedCrop((800,800))\n", "# apply the transform to the dataset through dataset.map()\n", - "cifar10_dataset = cifar10_dataset.map(input_columns=\"image\",operations=transforms,num_parallel_workers=4)\n", + "cifar10_dataset = cifar10_dataset.map(operations=transforms,input_columns=\"image\",num_parallel_workers=4)\n", "\n", "data = next(cifar10_dataset.create_dict_iterator())\n", - "plt.imshow(data[\"image\"])\n", + "plt.imshow(data[\"image\"].asnumpy())\n", "plt.show()" ] }, @@ -657,7 +659,7 @@ " print(data[\"data\"])\n", "\n", "func = lambda x:x**2\n", - "ds4 = ds3.map(input_columns=\"data\",operations=func,python_multiprocessing=True,num_parallel_workers=4)\n", + "ds4 = ds3.map(operations=func,input_columns=\"data\",python_multiprocessing=True,num_parallel_workers=4)\n", "print(\"after map:\")\n", "for data in ds4.create_dict_iterator():\n", " print(data[\"data\"])" @@ -737,7 +739,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "提供某些融合算子,这些算子将两个或多个算子的功能聚合到一个算子中。具体内容请参考[数据增强算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.transforms.vision.html),与它们各自组件的流水线相比,这种融合算子提供了更好的性能。如图所示:" + "提供某些融合算子,这些算子将两个或多个算子的功能聚合到一个算子中。具体内容请参考[数据增强算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html),与它们各自组件的流水线相比,这种融合算子提供了更好的性能。如图所示:" ] }, { @@ -769,4 +771,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/notebook/quick_start.ipynb b/tutorials/notebook/quick_start.ipynb index 80dec971e2e6a692f62f0a1c18a580772c2c3a9f..99757efd668765bbf059c0cb8ed1a1b24023d4de 100644 --- a/tutorials/notebook/quick_start.ipynb +++ b/tutorials/notebook/quick_start.ipynb @@ -250,15 +250,15 @@ "\n", "dic_ds = mnist_ds.create_dict_iterator()\n", "item = dic_ds.get_next()\n", - "img = item[\"image\"]\n", - "label = item[\"label\"]\n", + "img = item[\"image\"].asnumpy()\n", + "label = item[\"label\"].asnumpy()\n", "\n", "print(\"The item of mnist_ds:\", item.keys())\n", "print(\"Tensor of image in item:\", img.shape) \n", "print(\"The label of item:\", label)\n", "\n", "plt.imshow(np.squeeze(img))\n", - "plt.title(\"number:%s\"% item[\"label\"])\n", + "plt.title(\"number:%s\"% item[\"label\"].asnumpy())\n", "plt.show()" ] }, @@ -336,11 +336,11 @@ " type_cast_op = C.TypeCast(mstype.int32)\n", "\n", " # using map to apply operations to a dataset\n", - " mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=resize_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", " \n", " # process the generated dataset\n", " buffer_size = 10000\n", @@ -1062,8 +1062,8 @@ "source": [ "ds_test = create_dataset(test_data_path).create_dict_iterator()\n", "data = ds_test.get_next()\n", - "images = data[\"image\"]\n", - "labels = data[\"label\"]\n", + "images = data[\"image\"].asnumpy()\n", + "labels = data[\"label\"].asnumpy()\n", "\n", "output = model.predict(Tensor(data['image']))\n", "prb = output.asnumpy()\n", diff --git a/tutorials/notebook/synchronization_training_and_evaluation.ipynb b/tutorials/notebook/synchronization_training_and_evaluation.ipynb index 80f857391986c557ac75db948419f81a400a3473..8c22d397b8a86c04a7d178190ce9d0f91748261b 100644 --- a/tutorials/notebook/synchronization_training_and_evaluation.ipynb +++ b/tutorials/notebook/synchronization_training_and_evaluation.ipynb @@ -94,9 +94,9 @@ "source": [ "import os\n", "import mindspore.dataset as ds\n", - "import mindspore.dataset.transforms.vision.c_transforms as CV\n", + "import mindspore.dataset.vision.c_transforms as CV\n", "import mindspore.dataset.transforms.c_transforms as C\n", - "from mindspore.dataset.transforms.vision import Inter\n", + "from mindspore.dataset.vision import Inter\n", "from mindspore.common import dtype as mstype\n", "\n", "def create_dataset(data_path, batch_size=32, repeat_size=1,\n", @@ -112,9 +112,9 @@ " type_cast_op = C.TypeCast(mstype.int32) \n", "\n", " # apply map operations on images\n", - " mnist_ds = mnist_ds.map(input_columns=\"label\", operations=type_cast_op, num_parallel_workers=num_parallel_workers)\n", - " mnist_ds = mnist_ds.map(input_columns=\"image\", operations=[resize_op,rescale_op,rescale_nml_op,hwc2chw_op],\n", - " num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n", + " mnist_ds = mnist_ds.map(operations=[resize_op,rescale_op,rescale_nml_op,hwc2chw_op],\n", + " input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n", "\n", " # apply DatasetOps\n", " buffer_size = 10000\n", diff --git a/tutorials/source_en/_static/logo_source.png b/tutorials/source_en/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/tutorials/source_en/_static/logo_source.png and b/tutorials/source_en/_static/logo_source.png differ diff --git a/tutorials/source_en/advanced_use/auto_augmentation.md b/tutorials/source_en/advanced_use/auto_augmentation.md new file mode 100644 index 0000000000000000000000000000000000000000..f16244960aa5571755a27c480671ea7e2447f1b6 --- /dev/null +++ b/tutorials/source_en/advanced_use/auto_augmentation.md @@ -0,0 +1 @@ +# Auto Augmentation diff --git a/tutorials/source_en/advanced_use/computer_vision_application.md b/tutorials/source_en/advanced_use/computer_vision_application.md index 8ead2a76fd2e0f4abca0363f9cd3947462b07221..950540f3ae8168cbdc67cc32499b9bf92ab397cd 100644 --- a/tutorials/source_en/advanced_use/computer_vision_application.md +++ b/tutorials/source_en/advanced_use/computer_vision_application.md @@ -22,13 +22,13 @@ ## Overview -Computer vision is the most widely researched and mature technology field of deep learning, and is widely used in scenarios such as mobile phone photographing, intelligent security protection, and automated driving. Since AlexNet won the ImageNet competition in 2012, deep learning has greatly promoted the development of the computer vision field. Almost all the most advanced computer vision algorithms are related to deep learning. Deep neural network can extract image features layer by layer and retain local invariance. It is widely used in visual tasks such as classification, detection, segmentation, tracking, retrieval, recognition, promotion, and reconstruction. +Computer vision is one of the most widely researched and mature technology fields of deep learning, and is widely applied to scenarios such as mobile phone photographing, intelligent security protection, and automated driving. Since AlexNet won the ImageNet competition in 2012, deep learning has greatly promoted the development of the computer vision field. Almost all the most advanced computer vision algorithms are related to deep learning. Deep neural network can extract image features layer by layer and retain local invariance. It is widely used in visual tasks such as classification, detection, segmentation, tracking, retrieval, recognition, promotion, and reconstruction. This chapter describes how to apply MindSpore to computer vision scenarios based on image classification tasks. ## Image Classification -Image classification is the most basic computer vision application and belongs to the supervised learning category. For example, determine the class of a digital image, such as cat, dog, airplane, or car. The function is as follows: +Image classification is one of the most basic computer vision applications and belongs to the supervised learning category. For example, determine the class of a digital image, such as cat, dog, airplane, or car. The function is as follows: ```python def classify(image): @@ -49,9 +49,9 @@ MindSpore supports the following image classification networks: LeNet, AlexNet, Figure 1: CIFAR-10 dataset [1] -Figure 1 shows that the CIFAR-10 dataset contains 10 classes of 60,000 images. Each class contains 6000 images. 50,000 images are for training and 10,000 images are for testing. The size of each image is 32 x 32 pixels. +The CIFAR-10 dataset contains 10 classes of 60,000 images. Each class contains 6000 images. 50,000 images are for training and 10,000 images are for testing. The size of each image is 32 x 32 pixels. -Generally, a training indicator of image classification is accuracy, that is, a ratio of a quantity of accurately predicted examples to a total quantity of predicted examples. +Generally, a training indicator of image classification is accuracy, that is, a ratio of the quantity of accurately predicted examples to the total quantity of predicted examples. Next, let's use MindSpore to solve the image classification task. The overall process is as follows: 1. Download the CIFAR-10 dataset. @@ -61,12 +61,12 @@ Next, let's use MindSpore to solve the image classification task. The overall pr 5. Call the high-level `Model` API to train and save the model file. 6. Load the saved model for inference. -> This example is for the hardware platform of the Ascend 910 AI processor. You can find the complete executable sample code at: . +> This example uses the hardware platform of the Ascend 910 AI processor. You can find the complete executable sample code at: . The key parts of the task process code are explained below. ### Downloading the CIFAR-10 Dataset -CIFAR-10 dataset download address: [the website of Cifar-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html) In this example, the data is in binary format. In the Linux environment, run the following command to download the dataset: +CIFAR-10 dataset download address: [the website of Cifar-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html). In this example, the data is in binary format. In the Linux environment, run the following command to download the dataset: ```shell wget https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz @@ -119,8 +119,8 @@ tar -zvxf cifar-10-binary.tar.gz c_trans += [resize_op, rescale_op, normalize_op, changeswap_op] # apply map operations on images - cifar_ds = cifar_ds.map(input_columns="label", operations=type_cast_op) - cifar_ds = cifar_ds.map(input_columns="image", operations=c_trans) + cifar_ds = cifar_ds.map(operations=type_cast_op, input_columns="label") + cifar_ds = cifar_ds.map(operations=c_trans, input_columns="image") ``` 3. Shuffle and batch process the data. diff --git a/tutorials/source_en/advanced_use/customized_debugging_information.md b/tutorials/source_en/advanced_use/customized_debugging_information.md index 4d2add7d7ada5eb6138d28fa3be072cd894f3e79..c5acc66fd282f17ed80d414904d9d5228843a072 100644 --- a/tutorials/source_en/advanced_use/customized_debugging_information.md +++ b/tutorials/source_en/advanced_use/customized_debugging_information.md @@ -11,7 +11,9 @@ - [Custom Callback](#custom-callback) - [MindSpore Metrics](#mindspore-metrics) - [MindSpore Print Operator](#mindspore-print-operator) - - [Asynchronous Data Dump](#asynchronous-data-dump) + - [Data Dump Introduction](#data-dump-introduction) + - [Synchronous Dump](#synchronous-dump) + - [Asynchronous Dump](#asynchronous-dump) - [Log-related Environment Variables and Configurations](#log-related-environment-variables-and-configurations) @@ -118,8 +120,8 @@ Here are two examples to further understand the usage of custom Callback. loss = cb_params.net_outputs cur_time = time.time() if (cur_time - cb_params.init_time) > self.run_time: - print("epoch: ", epoch_num, " step: ", step_num, " loss: ", loss) - run_context.request_stop() + print("epoch: ", epoch_num, " step: ", step_num, " loss: ", loss) + run_context.request_stop() stop_cb = StopAtTime(run_time=10) model.train(100, dataset, callbacks=stop_cb) @@ -259,50 +261,108 @@ val:[[1 1] [1 1]] ``` -## Asynchronous Data Dump +## Data Dump Introduction -When the training result deviates from the expectation on Ascend, the input and output of the operator can be dumped for debugging through Asynchronous Data Dump. +The input and output of the operator can be saved for debugging through the data dump when the training result deviates from the expectation. Data dump includes Synchronous Dump and Asynchronous Dump. -> `comm_ops` operators are not supported by Asynchronous Data Dump. `comm_ops` can be found in [Operator List](https://www.mindspore.cn/docs/en/master/operator_list.html). +### Synchronous Dump -1. Turn on the switch to save graph IR: `context.set_context(save_graphs=True)`. -2. Execute training script. -3. Open `hwopt_d_end_graph_{graph id}.ir` in the directory you execute the script and find the name of the operators you want to Dump. -4. Configure json file: `data_dump.json`. +1. Create dump json file:`data_dump.json`. + + The name and location of the JSON file can be customized. ```json { - "DumpSettings": { + "common_dump_settings": { + "dump_mode": 0, + "path": "/tmp/net/", "net_name": "ResNet50", + "iteration": 0, + "input_output": 0, + "kernels": ["Default/Conv-op12"], + "support_device": [0,1,2,3,4,5,6,7] + }, + "e2e_dump_settings": { + "enable": false, + "trans_flag": false + } + } + ``` + + - `dump_mode`:0:dump all kernels in graph, 1: dump kernels in kernels list. + - `path`:The absolute path where dump saves data. + - `net_name`:net name eg:ResNet50. + - `iteration`:Specify the iterations to dump. All kernels in graph will be dumped. + - `input_output`:0:dump input and output of kernel, 1:dump input of kernel, 2:dump output of kernel. This parameter does not take effect on the GPU and only the output of operator will be dumped. + - `kernels`:full name of kernel. Enable `context.set_context(save_graphs=True)` and get full name of kernel from `ir` file. You can get it from `hwopt_d_end_graph_{graph_id}.ir` when `device_target` is `Ascend` and you can get it from `hwopt_pm_7_getitem_tuple.ir` when `device_target` is `GPU`. + - `support_device`:support devices, default setting is `[0,1,2,3,4,5,6,7]`. You can specify specific device ids to dump specific device data. + - `enable`:enable synchronous dump. + - `trans_flag`:enable trans flag. Transform the device data format into NCHW. + +2. Specify the location of the JSON file. + + ```bash + export MINDSPORE_DUMP_CONFIG={Absolute path of data_dump.json} + ``` + + - Set the environment variables before executing the training script. Settings will not take effect during training. + - Dump environment variables need to be configured before calling `mindspore.communication.management.init`. + +3. Execute the training script to dump data. + + You can set `context.set_context(reserve_class_name_in_scope=False)` in your training script to avoid dump failure because of file name is too long. + +4. Parse the Dump file + + Call `numpy.fromfile` to parse dump data file. + +### Asynchronous Dump + +1. Create dump json file:`data_dump.json`. + + The name and location of the JSON file can be customized. + ```json + { + "common_dump_settings": { "dump_mode": 0, - "op_debug_mode": 0, + "path": "/relative_path", + "net_name": "ResNet50", "iteration": 0, - "kernels": ["Default/Conv2D-op2", "Default/TensorAdd-op10"] + "input_output": 0, + "kernels": ["Default/Conv-op12"], + "support_device": [0,1,2,3,4,5,6,7] + }, + "async_dump_settings": { + "enable": false, + "op_debug_mode": 0 } } ``` - > - `net_name`: net name eg:ResNet50. - > - `dump_mode`: 0: dump all kernels, 1: dump kernels in kernels list. - > - `op_debug_mode`: please set to 0. - > - `iteration`: specified iteration to dump. `iteration` should be set to 0 when `dataset_sink_mode` is False and data of every iteration will be dumped. - > - `kernels`: `fullname_with_scope` of kernel which need to dump. + - `dump_mode`:0:dump all kernels in graph, 1: dump kernels in kernels list. + - `path`:Relative path where dump data saves. eg:data will be saved in `/var/log/npu/ide_daemon/dump/relative_path`. + - `net_name`:net name eg:ResNet50. + - `iteration`:Specify the iterations to dump. Iteration should be set to 0 when dataset_sink_mode is False and data of every iteration will be dumped. + - `input_output`:0:dump input and output of kernel, 1:dump input of kernel, 2:dump output of kernel. + - `kernels`:Full name of kernel. Enable `context.set_context(save_graphs=True)` and get full name of kernel from `hwopt_d_end_graph_{graph_id}.ir`. `kernels` only support TBE operator, AiCPU operator and communication operator. Data of communication operation input operator will be dumped if `kernels` is set to the name of communication operator. + - `support_device`:support devices, default setting is `[0,1,2,3,4,5,6,7]`. You can specify specific device ids to dump specific device data. + - `enable`:enable Asynchronous Dump. + - `op_debug_mode`:please set to 0. -5. Set environment variables. +2. Specify the json configuration file of Dump. ```bash - export ENABLE_DATA_DUMP=1 - export DATA_DUMP_PATH=/test - export DATA_DUMP_CONFIG_PATH=data_dump.json + export MINDSPORE_DUMP_CONFIG={Absolute path of data_dump.json} ``` - > - Set the environment variables before executing the training script. Setting environment variables during training will not take effect. - > - Dump environment variables need to be configured before calling `mindspore.communication.management.init`. + - Set the environment variables before executing the training script. Setting environment variables during training will not take effect. + - Dump environment variables need to be configured before calling `mindspore.communication.management.init`. + +3. Execute the training script to dump data. -6. Execute the training script again. -7. Parse the Dump file. +4. Parse the Dump file - Change directory to `/var/log/npu/ide_daemon/dump/` after training and execute the following commands to parse Dump data file: + Change directory to /var/log/npu/ide_daemon/dump/ after training, execute the following commands to parse Dump data file: ```bash python /usr/local/Ascend/toolkit/tools/operator_cmp/compare/dump_data_conversion.pyc -type offline -target numpy -i ./{Dump file path}} -o ./{output file path} diff --git a/tutorials/source_en/advanced_use/dashboard.md b/tutorials/source_en/advanced_use/dashboard.md index 7c875e1c8151661194b42f4f8c26825cb76b6d2d..b3e3660fd1ab0eee35ddac21f30f75373c1aef5f 100644 --- a/tutorials/source_en/advanced_use/dashboard.md +++ b/tutorials/source_en/advanced_use/dashboard.md @@ -20,7 +20,7 @@ ## Overview -Training dashboard is an important part of mindinsight's visualization component, and its tags include scalar visualization, parameter distribution visualization, computational visualization, data visualization, image visualization and tensor visualization. +Training dashboard is an important part of mindinsight's visualization component, and its tags include scalar visualization, parameter distribution visualization, computational graph visualization, data graph visualization, image visualization and tensor visualization. Access the Training Dashboard by selecting a specific training from the training list. @@ -159,8 +159,7 @@ Figure 12: Table display Figure 12 shows tensors recorded by a user in a form of a table which includes the following function: - Click the small square button on the right side of the table to zoom in the table. -- The white box in the table shows the tensor data under which dimension is currently displayed, where the colon `:` represents all values of the current dimension, you can enter the corresponding index or `:` in the box and press `Enter` or click the button of tick on the back to query tensor data for specific dimensions. - Assuming a certain dimension is 32, the index range is -32 to 31. Note: tensor data from 0 to 2 dimensions can be queried. Tensor data of more than two dimensions is not supported, in other word, the query conditions of more than two colons `:` cannot be set. +- The white box in the table shows the tensor data under which dimension is currently displayed. The colon `:` indicates index range of the current dimension which is basically the same as the meaning of Python index. If no specific index is specified, it indicates all the values of the current dimension and `2:5` indicates the value of index from 2 to 5 (not including 5). you can enter the corresponding index or index range containing `:` in the box and press `Enter` or click the button of tick on the back to query tensor data for specific dimensions. Assuming a certain dimension is 32, the index range is -32 to 31. Note: tensor data from 0 to 2 dimensions can be queried. Tensor data of more than two dimensions is not supported, in other word, the query conditions of more than two colons `:` cannot be set. - Query the tensor data of a specific step by dragging the hollow circle below the table. ![tensor_histogram.png](./images/tensor_histogram.png) diff --git a/tutorials/source_en/advanced_use/dataset_conversion.md b/tutorials/source_en/advanced_use/dataset_conversion.md new file mode 100644 index 0000000000000000000000000000000000000000..3bfbb3df49a953184848cf7250ea68f7fb64e818 --- /dev/null +++ b/tutorials/source_en/advanced_use/dataset_conversion.md @@ -0,0 +1 @@ +# Convert Dataset to MindRecord diff --git a/tutorials/source_en/advanced_use/debugger.md b/tutorials/source_en/advanced_use/debugger.md new file mode 100644 index 0000000000000000000000000000000000000000..94121a490e8a8f7956ad1067a17d70bc01079eff --- /dev/null +++ b/tutorials/source_en/advanced_use/debugger.md @@ -0,0 +1,187 @@ +# Using Debugger + +`Linux` `Ascend` `GPU` `Graph Mode` `Debug Training` `Intermediate` `Expert` + + + +- [Using Debugger](#using-debugger) + - [Overview](#overview) + - [Operation Process](#operation-process) + - [Debugger Environment Preparation](#debugger-environment-preparation) + - [Debugger UI Introduction](#debugger-UI-introduction) + - [Computational Graph](#computational-graph) + - [Node List](#node-list) + - [Graph Node Details](#graph-node-details) + - [Conditional Breakpoint](#conditional-breakpoint) + - [Training Control](#training-control) + - [Debugger Usage Example](#debugger-usage-example) + - [Notices](#notices) + + + + + +## Overview + +MindSpore Debugger is a debugging tool for training in `Graph Mode`. It can be applied to visualize and analyze the intermediate computation results of the computational graph. + +In `Graph Mode` training, the computation results of intermediate nodes in the computational graph can not be acquired from python layer, which makes it difficult for users to do the debugging. By applying MindSpore Debugger, users can: + +- Visualize the computational graph on the UI and analyze the output of the graph node; +- Set a conditional breakpoint to monitor training exceptions (such as INF), if the condition is met, users can track the cause of the bug when an exception occurs; +- Visualize and analyze the change of parameters, such as weights. + +## Operation Process + +- Launch MindInsight in debugger mode, and set Debugger environment variables for the training; +- At the beginning of the training, set conditional breakpoints; +- Analyze the training progress on MindInsight Debugger UI. + +## Debugger Environment Preparation + +At first, install MindInsight and launch it in debugger mode. MindSpore will send training information to MindInsight Debugger Server in debugger mode, users can analyze the information on MindInsight UI. + +The command to launch MindInsight in debugger mode is as follows: + +```shell script +mindinsight start --port {PORT} --enable-debugger True --debugger-port {DEBUGGER_PORT} +``` + +The Debugger related parameters: + +|Name|Argument|Description|Type|Default|Scope| +|---|---|---|---|---|---| +|`--port {PORT}`|Optional|Specifies the port number of the web visualization service.|Integer|8080|1~65535| +|`--enable-debugger {ENABLE_DEBUGGER}`|Required|Should be set to `True`, this will launch the MindInsight debugger server.|Boolean|False|True/False| +|`--debugger-port {DEBUGGER_PORT}`|Optional|Specifies the port number of the debugger server.|Integer|50051|1~65535| + +For more launch parameters, please refer to [MindInsight Commands](https://www.mindspore.cn/tutorial/en/master/advanced_use/mindinsight_commands.html). + +Then, set `export ENABLE_MS_DEBUGGER=1` to specify the training is in the debugger mode, and set the debugger host and port to which the training is connected: +`export MS_DEBUGGER_HOST=127.0.0.1` (the service address must be consistent with MindInsight host address); +`export MS_DEBUGGER_PORT=50051` (the port must be consistent with MindInsight debugger-port). + +If the memory space of your equipment is limited, you can use the memory reuse mode before starting the training to reduce the running space: `export MS_DEBUGGER_PARTIAL_MEM=1`。 + +Besides, do not use dataset sink mode (Set the parameter `dataset_sink_mode` in `model.train` to `False`) to ensure the Debugger can acquire information for all steps. + +## Debugger UI Introduction + +After the Debugger environment preparation, users can run the training script. +Before the execution of the computational graph, the MindInsight Debugger UI will show the information of the optimized computational graph. +The following are the Debugger UI components. + +![debugger_init_page](./images/debugger_init_page.png) + +Figure 1: The initial UI of debugger + +### Computational Graph + +Debugger will display the optimized computational graph in the upper middle area of the page. +Users can click the box (stand for one `scope`) to expand the graph, and analyze the nodes contained in that `scope`. + +In the GPU environment, there are `Current Node` and `Next Node` buttons in the upper right corner of the computational graph panel, +which are used to return to the current execution node and execute the next node respectively. Users can easily execute one node at a time. + +The area on the top shows the training metadata, such as the `Client IP` (address and port of the training script process), +`Device ID` being used and the current training `Step`. + +### Node List + +As shown in Figure 1,the Computational Graph `Node List` will be displayed on the left of the page. +The `Node List` can be expanded according to the `scope` of the nodes. +When clicking one node in the list, the computational graph on the right will also be expanded and choose the corresponding node automatically. + +The search bar on the top can be used to search for nodes in the graph by node name. + +### Graph Node Details + +![debugger_tensor_info](./images/debugger_tensor_info.png) + +Figure 2: The Graph Node Details + +When choosing one node on the graph, the details of this node will be displayed at the bottom. +The `Tensor Value Overview` area will show the input nodes and the outputs of this node. The `Type`, `Shape` and `Value` of the `Tensor` can also be viewed. + +For GPU environment, after selecting an executable node on the graph, right-click to select `Continue to` on this node, +which means running the training script to the selected node within one step. +After left-click `Continue to`, the training script will be executed and paused after running to this node. + +![debugger_tensor_value](./images/debugger_tensor_value.png) + +Figure 3: `Tensor` Value Visualization + +Some outputs of the node contain too many dimensions. +For these `Tensors`, users can click the `View` link and visualize the `Tensor` in the new panel, which is shown in Figure 3. + +![debugger_tensor_compare](./images/debugger_tensor_compare.png) + +Figure 4: Previous Step Value Compare For Parameter Nodes + +In addition, the output of the parameter nodes can be compared with their output in the previous step. +Click the `Compare with Previous Step` button to enter the comparison interface, as shown in Figure 4. + +### Conditional Breakpoint + +![debugger_set_watch_point](./images/debugger_set_watch_point.png) + +Figure 5: Set Conditional Breakpoint (Watch Point) + +In order to monitor the training and find out the bugs, users can set conditional breakpoints (called `Watch Point List` on UI) to analyze the outputs of the +specified nodes automatically. Figure 5 displays how to set a `Watch Point`: +- At first, click the `+` button on the upper right corner, and then choose a watch condition; +- Select the nodes to be watched in the `Node List`, tick the boxes in the front of the chosen nodes; +- Click the `OK` button to add this `Watch Point`. + +The outputs of the watched nodes will be checked by the corresponding conditions. Once the condition is satisfied, the training will pause, and users can analyze +the triggered `Watch Point List` on the Debugger UI. + +![debugger_watch_point_hit](./images/debugger_watch_point_hit.png) + +Figure 6: The Triggered `Watch Point List` + +Figure 6 displays the triggered `Watch Point List`, the displayed area is the same as the `Node List`. +The triggered nodes and corresponding conditions are displayed in the execution order. Click one line in the list, the node will be shown in the computational graph automatically. +Users can further trace the reason of the bug by analyzing the node details. + +### Training Control + +At the bottom of the watchpoint setting panel is the training control panel, which shows the training control functions of the debugger, +with four buttons: `CONTINUE`, `PAUSE`, `TERMINATE` and `OK`: + +- `OK` stands for executing the training for several steps, the number of the `step` can be specified in the above bar. +The training will be paused until the `Watch Point List` is triggered, or the number of `step` is executed. +- `CONTINUE` stands for executing the training until the `Watch Point List` is triggered, or the training is finished. +- `PAUSE` stands for pausing the training. +- `TERMINATE` stands for terminating the training. + +## Debugger Usage Example + +1. Prepare the debugger environment, and open the MindInsight Debugger UI. + + ![debugger_waiting](./images/debugger_waiting.png) + + Figure 7: Debugger Start and Waiting for the Training + + The Debugger server is launched and waiting for the training to connect. + +2. Run the training script, after a while, the computational graph will be displayed on Debugger UI, as shown in Figure 1. + +3. Set conditional breakpoints for the training, as shown in Figure 5. + + In Figure 5, the conditions are selected, and some nodes are watched, which means whether there is any output meeting the conditions in the training process of these nodes. + After setting the conditional breakpoint, users can set steps in the control panel and click `OK` or `CONTINUE` to continue training. + +4. The conditional breakpoints are triggered, as shown in Figure 6. + + When the conditional breakpoints are triggered, users can analyze the corresponding node details to find out the reason of the bug. + +## Notices + +- Debugger will slow down the training performance. +- A single Debugger Server can only be connected to one training process. +- The debugger does not support distributed training scenarios. +- The debugger does not support multi-graph training scenarios. +- When too many `Watch Points` are set, the system may run out of memory. +- Debugger cannot get the initialization parameters of the neural network based on Davinci device. +- For GPU environment, only the parameter nodes that meet the conditions can be compared with the results of themselves in the previous step: the node executed with the `Next Node` and `Continue to`, and the parameter nodes as the input of the `Watch Points`. Otherwise, `Compare with Previous Step` cannot be used. diff --git a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md index 98c55656a9f80709e92f5e33176c7ac958c0a274..bbb9ad368809a108989deda5640cc9aea751642d 100644 --- a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md @@ -17,7 +17,7 @@ ## Overview -MindSpore supports the following running modes which are optimized in terms of debugging or running: +MindSpore supports the following running modes which are optimized for debugging or running: - PyNative mode: dynamic graph mode. In this mode, operators in the neural network are delivered and executed one by one, facilitating the compilation and debugging of the neural network model. - Graph mode: static graph mode. In this mode, the neural network model is compiled into an entire graph and then delivered for execution. This mode uses technologies such as graph optimization to improve the running performance and facilitates large-scale deployment and cross-platform running. @@ -105,12 +105,12 @@ print(output.asnumpy()) [3. 3. 3.]] ``` -> Parallel execution and summary is not supported in PyNative mode, so parallel and summary related operators can not be used. +> Parallel execution and summary are not supported in PyNative mode, so parallel and summary related operators cannot be used. ### Improving PyNative Performance -MindSpore provides the staging function to improve the execution speed of inference tasks in PyNative mode. This function compiles Python functions or Python class methods into computational graphs in PyNative mode and improves the execution speed by using graph optimization technologies, as shown in the following example: +MindSpore provides the Staging function to improve the execution speed of inference tasks in PyNative mode. This function compiles Python functions or Python class methods into computational graphs in PyNative mode and improves the execution speed by using graph optimization technologies, as shown in the following example: ```python import numpy as np diff --git a/tutorials/source_en/advanced_use/differential_privacy.md b/tutorials/source_en/advanced_use/differential_privacy.md index 57bd79f4adb8ef69eb377f3b42265c972142be9a..00e7743f9978913614d1f9bef7c18351d2414b01 100644 --- a/tutorials/source_en/advanced_use/differential_privacy.md +++ b/tutorials/source_en/advanced_use/differential_privacy.md @@ -45,7 +45,7 @@ MindArmour differential privacy module Differential-Privacy implements the diffe The LeNet model and MNIST dataset are used as an example to describe how to use the differential privacy optimizer to train a neural network model on MindSpore. -> This example is for the Ascend 910 AI processor. You can download the complete sample code from . +> This example is for the Ascend 910 AI processor. You can download the complete sample code from . ## Implementation @@ -70,13 +70,11 @@ import mindspore.dataset.transforms.c_transforms as C from mindspore.dataset.vision import Inter import mindspore.common.dtype as mstype -from mindarmour.diff_privacy import DPModel -from mindarmour.diff_privacy import PrivacyMonitorFactory -from mindarmour.diff_privacy import NoiseMechanismsFactory -from mindarmour.diff_privacy import ClipMechanismsFactory +from mindarmour.privacy.diff_privacy import DPModel +from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory +from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory +from mindarmour.privacy.diff_privacy import ClipMechanismsFactory from mindarmour.utils.logger import LogUtil -from lenet5_net import LeNet5 -from lenet5_config import mnist_cfg as cfg LOGGER = LogUtil.get_instance() LOGGER.set_level('INFO') @@ -85,7 +83,7 @@ TAG = 'Lenet5_train' ### Configuring Parameters -1. Set the running environment, dataset path, model training parameters, checkpoint storage parameters, and differential privacy parameters. Replace 'data_path' with you data path. For more configurations, see . +1. Set the running environment, dataset path, model training parameters, checkpoint storage parameters, and differential privacy parameters. Replace 'data_path' with you data path. For more configurations, see . ```python cfg = edict({ @@ -99,7 +97,7 @@ TAG = 'Lenet5_train' 'save_checkpoint_steps': 234, # the interval steps for saving checkpoint file of the model 'keep_checkpoint_max': 10, # the maximum number of checkpoint files would be saved 'device_target': 'Ascend', # device used - 'data_path': './MNIST_unzip', # the path of training and testing data set + 'data_path': '../../common/dataset/MNIST', # the path of training and testing data set 'dataset_sink_mode': False, # whether deliver all training data to device one time 'micro_batches': 32, # the number of small batches split from an original batch 'norm_bound': 1.0, # the clip bound of the gradients of model's training parameters diff --git a/tutorials/source_en/advanced_use/distributed_training_ascend.md b/tutorials/source_en/advanced_use/distributed_training_ascend.md index 81415d95d3f5639ca735a47a0a568165458fdac9..1fe5d9380d06b54fbdca971dc97ca84fb186d7fa 100644 --- a/tutorials/source_en/advanced_use/distributed_training_ascend.md +++ b/tutorials/source_en/advanced_use/distributed_training_ascend.md @@ -17,6 +17,11 @@ - [Defining the Optimizer](#defining-the-optimizer) - [Training the Network](#training-the-network) - [Running the Script](#running-the-script) + - [Distributed Training Model Parameters Saving and Loading](#distributed-training-model-parameters-saving-and-loading) + - [Auto Parallel Mode](#auto-parallel-mode) + - [Data Parallel Mode](#data-parallel-mode) + - [Semi Auto Parallel Mode](#semi-auto-parallel-mode) + - [Hybrid Parallel Mode](#hybrid-parallel-mode) @@ -219,7 +224,7 @@ The `Momentum` optimizer is used as the parameter update tool. The definition is > You are advised to set `device_num` and `global_rank` to their default values. The framework calls the HCCL API to obtain the values. -If multiple network cases exist in the script, call `context.reset_auto_parallel_context()` to restore all parameters to default values before executing the next case. +If multiple network cases exist in the script, call `context.reset_auto_parallel_context` to restore all parameters to default values before executing the next case. In the following sample code, the automatic parallel mode is specified. To switch to the data parallel mode, you only need to change `parallel_mode` to `DATA_PARALLEL`. @@ -334,3 +339,190 @@ epoch: 8 step: 156, loss is 1.2943741 epoch: 9 step: 156, loss is 1.2316195 epoch: 10 step: 156, loss is 1.1533381 ``` + +## Distributed Training Model Parameters Saving and Loading + +The below content introduced how to save and load models under the four distributed parallel training modes respectively. Before saving model parameters for distributed training, it is necessary to configure distributed environment variables and collective communication library in accordance with this tutorial. + +### Auto Parallel Mode + +It is convenient to save and load the model parameters in auto parallel mode. Just add configuration `CheckpointConfig` and `ModelCheckpoint` to `test_train_cifar` method in the training network steps of this tutorial, and the model parameters can be saved. The code is as follows: + +```python +def test_train_cifar(epoch_size=10): + context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL, gradients_mean=True) + loss_cb = LossMonitor() + dataset = create_dataset(data_path) + batch_size = 32 + num_classes = 10 + net = resnet50(batch_size, num_classes) + loss = SoftmaxCrossEntropyExpand(sparse=True) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) + save_path = '...' + ckpt_config = CheckpointConfig() + ckpt_callback = ModelCheckpoint(prefix='auto_parallel', directory=save_path, config=ckpt_config) + model = Model(net, loss_fn=loss, optimizer=opt) + model.train(epoch_size, dataset, callbacks=[loss_cb, ckpt_callback], dataset_sink_mode=True) +``` + +After saving the checkpoint file, users can easily load model parameters for reasoning or retraining. For example, the following code can be used for retraining: + +```python +net = Net() +param_dict = load_checkpoint(save_path) +load_param_into_net(net, param_dict) +``` + +For checkpoint configuration policy and saving method, please refer to [Saving and Loading Model Parameters](https://www.mindspore.cn/tutorial/en/master/use/saving_and_loading_model_parameters.html#checkpoint-configuration-policies). + +### Data Parallel Mode + +Under Data Parallel Mode, checkpoint can be used as shown in the following example: + +```python +from mindspore.train import Model +from context import set_auto_parallel_context, reset_auto_parallel_context +from mindspore.nn import Momentum, Cell, Flatten, ReLU +from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor +from mindspore.communication.management import get_rank +from mindspore.common.parameter import Parameter +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +# define network +class DataParallelNet(Cell): + def __init__(self, test_size, transpose_a=False, transpose_b=False, strategy=None, layerwise_parallel=True): + super().__init__() + weight_np = np.full(test_size, 0.1, dtype=np.float32) + self.weight = Parameter(Tensor(weight_np), name="fc_weight", layerwise_parallel=layerwise_parallel) + self.relu = ReLU() + self.fc = P.MatMul(transpose_a=transpose_a, transpose_b=transpose_b) + if strategy is not None: + self.fc.shard(strategy) + + def construct(self, inputs, label): + x = self.relu(inputs) + x = self.fc(x, self.weight) + return x +``` + +Assuming that the Data Parallel mode is used to train and save the model on an 8P machine, the data needs to be obtained first, and the parallel strategy and parallel mode need to be set. The code is as follows: + +```python +# create data sets +parallel_dataset = CreateData() +# set parallel strategy +strategy = ((1, 1), (1, 8)) +# create network model +net = DataParallelNet(strategy=strategy) +# reset parallel mode +context.reset_auto_parallel_context() +# set parallel mode, data parallel mode is selected for training and model saving. If you want to choose auto parallel +# mode, you can simply change the value of parallel_mode parameter to ParallelMode.AUTO_PARALLEL. +context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, device_num=8) +``` + +Then set the checkpoint saving policy, optimizer and loss function as required. The code is as follows: + +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1) +# define checkpoint save path +ckpt_path = './rank_{}_ckpt'.format(get_rank) +# create a ModelCheckpoint object +ckpt_callback = ModelCheckpoint(prefix='data_parallel', directory=ckpt_path, config=ckpt_config) +# set optimizer and loss function +opt = Momentum() +loss = SoftmaxCrossEntropyExpand() +model = Model(net, loss_fb=loss, optimizer=opt) +# After training, the system will automatically save the checkpoint file. +model.train(train_dataset=parallel_dataset, callbacks=[ckpt_callback, loss]) +# After training, reset the parallel mode to avoid unnecessary trouble when retraining. +context.reset_auto_parallel_context() +``` + +After saving the checkpoint file, users can also use `load_checkpoint` and `load_param_into_Net` to load the model parameters. + +### Semi Auto Parallel Mode + +The whole process of using checkpoint in Semi Auto parallel Mode also starts from defining a network model. + +```python +class SemiAutoParallelNet(Cell): + def __init__(self, mul_size, test_size, strategy=None, strategy2=None): + super().__init__() + mul_np = np.full(mul_size, 0.5, dtype=np.float32) + equal_np = np.full(test_size, 0.1, dtype=np.float32) + self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight") + self.equal_weight = Parameter(Tensor(equal_np), name="equal_weight") + self.mul = P.Mul() + self.equal = P.Equal() + if strategy is not None: + self.mul.shard(strategy) + self.equal.shard(strategy2) + + def construct(self, inputs, label): + x = self.mul(inputs, self.mul_weight) + x = self.equal(x, self.equal_weight) + return x +``` + +It is assumed that Semi Auto Parallel Mode is also trained and saved on an 8p machine. The code for getting data and setting the parallel strategy and parallel mode is as follows: + +```python +# create data sets +parallel_dataset = CreateData() +# set parallel strategy +strategy = ((1, 1), (1, 8)) +# create network model +net = SemiAutoParallelNet(strategy=strategy, strategy2=strategy) +# reset parallel mode +context.reset_auto_parallel_context() +# set parallel mode, data parallel mode is selected for training and model saving. If you want to choose auto parallel +# mode, you can simply change the value of parallel_mode parameter to ParallelMode.AUTO_PARALLEL. +context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, + strategy_ckpt_save_file='./rank_{}_ckpt/strategy.txt'.format(get_rank)) +``` + +Then set the checkpoint saving policy, optimizer and loss function as required. The code is as follows: + +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1) +# define checkpoint save path +ckpt_path = './rank_{}_ckpt'.format(get_rank) +# create a ModelCheckpoint object +ckpt_callback = ModelCheckpoint(prefix='semi_auto_parallel', directory=ckpt_path, config=ckpt_config) +# set optimizer and loss function +opt = Momentum() +loss = SoftmaxCrossEntropyExpand() +model = Model(net, loss_fb=loss, optimizer=opt) +# After you've trained your network, the system will automatically save the checkpoint file. +model.train(train_dataset=parallel_dataset, callbacks=[ckpt_callback, loss]) +# After training, reset the parallel mode to avoid unnecessary trouble when retraining. +context.reset_auto_parallel_context() +``` + +After saving the checkpoint file, users can also use `load_checkpoint`, `load_param_into_Net` to load the model parameters。 + +For the three parallel training modes described above, the checkpoint file is saved in a complete way on each card. Users also can save only the checkpoint file of this card on each card, take Semi Auto parallel Mode as an example for explanation. + +Only by changing the code that sets the checkpoint saving policy, the checkpoint file of each card can be saved on itself. The specific changes are as follows: + +Change the checkpoint configuration policy from: +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1) +``` + +to: +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1, integrated_save=False) +``` + +It should be noted that if users chooses this checkpoint saving policy, users need to save and load the segmented checkpoint for subsequent reasoning or retraining. Specific usage can refer to [Integrating the Saved Checkpoint Files](https://www.mindspore.cn/tutorial/en/master/advanced_use/checkpoint_for_hybrid_parallel.html#integrating-the-saved-checkpoint-files). + +### Hybrid Parallel Mode + +For model parameter saving and loading in Hybrid Parallel Mode, please refer to [Saving and Loading Model Parameters in the Hybrid Parallel Scenario](https://www.mindspore.cn/tutorial/en/master/advanced_use/checkpoint_for_hybrid_parallel.html). \ No newline at end of file diff --git a/tutorials/source_en/advanced_use/distributed_training_gpu.md b/tutorials/source_en/advanced_use/distributed_training_gpu.md new file mode 100644 index 0000000000000000000000000000000000000000..f7fd304a3354b7c9644d6c8f8b104f375f6a333a --- /dev/null +++ b/tutorials/source_en/advanced_use/distributed_training_gpu.md @@ -0,0 +1,147 @@ +# Distributed Parallel Training (GPU) + +`Linux` `GPU` `Model` `Training` `Intermediate` `Expert` + + + +- [Distributed Parallel Training (GPU)](#distributed-parallel-training-gpu) + - [Overview](#overview) + - [Preparation](#preparation) + - [Downloading the Dataset](#downloading-the-dataset) + - [Configuring Distributed Environment](#configuring-distributed-environment) + - [Calling the Collective Communication Library](#calling-the-collective-communication-library) + - [Defining the Network](#defining-the-network) + - [Running the Script](#running-the-script) + - [Running the Multi-Host Script](#running-the-multi-host-script) + + + + + +## Overview + +This tutorial describes how to train the ResNet-50 network using MindSpore data parallelism and automatic parallelism on GPU hardware platform. + +## Preparation + +### Downloading the Dataset + +The `CIFAR-10` dataset is used as an example. The method of downloading and loading the dataset is the same as that for the Ascend 910 AI processor. + +> The method of downloading and loading the dataset: +> +> + +### Configuring Distributed Environment + +- `OpenMPI-3.1.5`: multi-process communication library used by MindSpore. + + > Download the OpenMPI-3.1.5 source code package `openmpi-3.1.5.tar.gz` from . + > + > For details about how to install OpenMPI, see the official tutorial: . + +- `NCCL-2.4.8`: Nvidia collective communication library. + + > Download NCCL-2.4.8 from . + > + > For details about how to install NCCL, see the official tutorial: . + +- Password-free login between hosts (required for multi-host training). If multiple hosts are involved in the training, you need to configure password-free login between them. The procedure is as follows: + 1. Ensure that the same user is used to log in to each host. (The root user is not recommended.) + 2. Run the `ssh-keygen -t rsa -P ""` command to generate a key. + 3. Run the `ssh-copy-id DEVICE-IP` command to set the IP address of the host that requires password-free login. + 4. Run the`ssh DEVICE-IP` command. If you can log in without entering the password, the configuration is successful. + 5. Run the preceding command on all hosts to ensure that every two hosts can communicate with each other. + +### Calling the Collective Communication Library + +On the GPU hardware platform, MindSpore parallel distributed training uses NCCL for communication. + +> On the GPU platform, MindSpore does not support the following operations: +> +> `get_local_rank`, `get_local_size`, `get_world_rank_from_group_rank`, `get_group_rank_from_world_rank` and `create_group` + +The sample code for calling the HCCL is as follows: + +```python +from mindspore import context +from mindspore.communication.management import init + +if __name__ == "__main__": + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + init("nccl") + ... +``` + +In the preceding information, + +- `mode=context.GRAPH_MODE`: sets the running mode to graph mode for distributed training. (The PyNative mode does not support parallel running.) +- `init("nccl")`: enables NCCL communication and completes the distributed training initialization. + +## Defining the Network + +On the GPU hardware platform, the network definition is the same as that for the Ascend 910 AI processor. + +> For details about the definitions of the network, optimizer, and loss function, see . + +## Running the Script + +On the GPU hardware platform, MindSpore uses OpenMPI `mpirun` for distributed training. The following takes the distributed training script for eight devices as an example to describe how to run the script: + +> Obtain the running script of the example from: +> +> +> +> If the script is executed by the root user, the `--allow-run-as-root` parameter must be added to `mpirun`. + +```bash +#!/bin/bash + +DATA_PATH=$1 +export DATA_PATH=${DATA_PATH} + +rm -rf device +mkdir device +cp ./resnet50_distributed_training.py ./resnet.py ./device +cd ./device +echo "start training" +mpirun -n 8 pytest -s -v ./resnet50_distributed_training.py > train.log 2>&1 & +``` + +The script requires the variable `DATA_PATH`, which indicates the path of the dataset. In addition, you need to modify the `resnet50_distributed_training.py` file. Since the `DEVICE_ID` environment variable does not need to be set on the GPU, you do not need to call `int(os.getenv('DEVICE_ID'))` in the script to obtain the physical sequence number of the device, and `context` does not require `device_id`. You need to set `device_target` to `GPU` and call `init("nccl")` to enable the NCCL. The log file is saved in the device directory, and the loss result is saved in train.log. The output loss values of the grep command are as follows: + +``` +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +epoch: 1 step: 1, loss is 2.3025854 +``` + +## Running the Multi-Host Script + +If multiple hosts are involved in the training, you need to set the multi-host configuration in the `mpirun` command. You can use the `-H` option in the `mpirun` command. For example, `mpirun -n 16 -H DEVICE1_IP:8,DEVICE2_IP:8 python hello.py` indicates that eight processes are started on the host whose IP addresses are DEVICE1_IP and DEVICE2_IP, respectively. Alternatively, you can create a hostfile similar to the following and transfer its path to the `--hostfile` option of `mpirun`. Each line in the hostfile is in the format of `[hostname] slots=[slotnum]`, where hostname can be an IP address or a host name. +```bash +DEVICE1 slots=8 +DEVICE2 slots=8 +``` + +The following is the execution script of the 16-device two-host cluster. The variables `DATA_PATH` and `HOSTFILE` need to be transferred, indicating the dataset path and hostfile path. For details about more mpirun options, see the OpenMPI official website. + +```bash +#!/bin/bash + +DATA_PATH=$1 +HOSTFILE=$2 + +rm -rf device +mkdir device +cp ./resnet50_distributed_training.py ./resnet.py ./device +cd ./device +echo "start training" +mpirun -n 16 --hostfile $HOSTFILE -x DATA_PATH=$DATA_PATH -x PATH -mca pml ob1 pytest -s -v ./resnet50_distributed_training.py > train.log 2>&1 & +``` +Running on GPU, the model parameters can be saved and loaded for reference[Distributed Training Model Parameters Saving and Loading](https://www.mindspore.cn/tutorial/en/master/advanced_use/distributed_training_tutorials.html) diff --git a/tutorials/source_en/advanced_use/distributed_training_tutorials.rst b/tutorials/source_en/advanced_use/distributed_training_tutorials.rst index 4807338b07f88a8ef709abf861f1af9334deb256..3fd6919a96bce0b89fdcedf52f8e5668f9373348 100644 --- a/tutorials/source_en/advanced_use/distributed_training_tutorials.rst +++ b/tutorials/source_en/advanced_use/distributed_training_tutorials.rst @@ -17,6 +17,7 @@ MindSpore also provides the parallel distributed training function. It supports :maxdepth: 1 distributed_training_ascend + distributed_training_gpu host_device_training checkpoint_for_hybrid_parallel parameter_server_training diff --git a/tutorials/source_en/advanced_use/fuzzer.md b/tutorials/source_en/advanced_use/fuzzer.md new file mode 100644 index 0000000000000000000000000000000000000000..ab03dd2f72c6040ec1ac4996c20924d0413042ce --- /dev/null +++ b/tutorials/source_en/advanced_use/fuzzer.md @@ -0,0 +1,211 @@ +# AI Model Security Test + +`Ascend` `GPU` `CPU` `Data Preparation` `Model Development` `Model Training` `Model Optimization` `Enterprise` `Expert` + + + +- [AI Model Security Test](#ai-model-security-test) + - [Overview](#overview) + - [Implementation](#implementation) + - [Importing Library Files](#importing-library-files) + - [Parameter Configuration](#parameter-configuration) + - [Fuzz Testing Application](#fuzz-testing-application) + + +   + +## Overview + +The decision logic of traditional software is determined by the code logic. Traditional software determines whether the test is adequate based on the code line coverage rate. Ideally, the higher the coverage rate is, the more adequate the code test is. However, for deep neural network, the decision logic of the program is determined by the training data, network structure, and parameters through a black box mechanism. The code line coverage fails to evaluate the test adequacy. A more suitable test evaluation criterion needs to be selected according to the deep network features to guide the neural network to perform a more adequate test and find more corner error cases, thereby ensuring universality and robustness of a model. + +The fuzz testing module of MindArmour uses the neuron coverage rate as the test evaluation criterion. Neuron coverage is the range of the number of neurons observed and activated and the range of the neuron output value through a set of inputs. The neuron coverage is used to guide input mutation so that input can activate more neurons and neuron values can be distributed in a wider range. In this way, we can explore different types of model output results and incorrect behaviors. + +The LeNet model and MNIST dataset are used as an example to describe how to use Fuzz testing. + +> This example is for CPUs, GPUs, and Ascend 910 AI processors. You can download the complete sample code at . + +## Implementation + +### Importing Library Files + +The following lists the required common modules, MindSpore-related modules, Fuzz testing feature modules, and configuration log labels and log levels. + +```python +import numpy as np +from mindspore import Model +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from mindarmour.fuzz_testing import Fuzzer +from mindarmour.fuzz_testing import ModelCoverageMetrics +from mindarmour.utils.logger import LogUtil + +from examples.common.dataset.data_processing import generate_mnist_dataset +from examples.common.networks.lenet5.lenet5_net import LeNet5 + +LOGGER = LogUtil.get_instance() +TAG = 'Fuzz_testing' +LOGGER.set_level('INFO') +``` + +### Parameter Configuration + +Configure necessary information, including the environment information and execution mode. + +```python +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") +``` + +For details about the API configuration, see the `context.set_context`. + +### Fuzz Testing Application + +1. Create a LeNet model and load the MNIST dataset. The operation is the same as that for [Model Security](). + + ```python + ... + # Lenet model + model = Model(net) + # get training data + data_list = "../common/dataset/MNIST/train" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size, sparse=False) + train_images = [] + for data in ds.create_tuple_iterator(): + images = data[0].asnumpy().astype(np.float32) + train_images.append(images) + train_images = np.concatenate(train_images, axis=0) + + # get test data + data_list = "../common/dataset/MNIST/test" + batch_size = 32 + ds = generate_mnist_dataset(data_list, batch_size, sparse=False) + test_images = [] + test_labels = [] + for data in ds.create_tuple_iterator(): + images = data[0].asnumpy().astype(np.float32) + labels = data[1].asnumpy() + test_images.append(images) + test_labels.append(labels) + test_images = np.concatenate(test_images, axis=0) + test_labels = np.concatenate(test_labels, axis=0) + ``` + +2. Configure Fuzzer parameters. + + Set the data mutation method and parameters. Multiple methods can be configured at the same time. Currently, the following data mutation methods are supported: + + - Image affine transformation methods: Translate, Scale, Shear, and Rotate. + - Methods based on image pixel value changes: Contrast, Brightness, Blur, and Noise. + - Methods for generating adversarial examples based on white-box and black-box attacks: FGSM, PGD, and MDIIM. + + The data mutation method must include the method based on the image pixel value changes. + + The first two image transform methods support user-defined configuration parameters and randomly generated parameters by algorithms. For user-defined configuration parameters see the class methods corresponding to https://gitee.com/mindspore/mindarmour/blob/master/mindarmour/fuzz_testing/image_transform.py. For randomly generated parameters by algorithms you can set method's params to `'auto_param': [True]`. The mutation parameters are randomly generated within the recommended range. + + For details about how to set parameters based on the attack defense method, see the corresponding attack method class. + + Following is an example for configure Fuzzer parameters. + + ```python + mutate_config = [{'method': 'Blur', + 'params': {'radius': [0.1, 0.2, 0.3], + 'auto_param': [True, False]}}, + {'method': 'Contrast', + 'params': {'auto_param': [True]}}, + {'method': 'Translate', + 'params': {'auto_param': [True]}}, + {'method': 'Brightness', + 'params': {'auto_param': [True]}}, + {'method': 'Noise', + 'params': {'auto_param': [True]}}, + {'method': 'Scale', + 'params': {'auto_param': [True]}}, + {'method': 'Shear', + 'params': {'auto_param': [True]}}, + {'method': 'FGSM', + 'params': {'eps': [0.3, 0.2, 0.4], 'alpha': [0.1]}} + ] + ``` + + Set evaluation metrics. Currently, the following evaluation metrics are supported: + - General evaluation metric: accuracy + - Neuron coverage rate metrics: kmnc, nbc, and snac + - Adversarial attack evaluation metric: attack_success_rate. + You can set this parameter to `auto`. By default, all evaluation metrics are used. + + ```python + eval_metrics =['accuracy', 'kmnc', 'attack_success_rate'] + ``` + +3. Initialize the seed queue. Each seed in the seed queue has two values: original image and image label. Here we select 100 samples as initial seed queue. + + ```python + # make initial seeds + initial_seeds = [] + for img, label in zip(test_images, test_labels): + initial_seeds.append([img, label]) + initial_seeds = initial_seeds[:100] + ``` + +4. Test the neuron coverage rate before the fuzz testing. + + ```python + segmented_num=1000 + neuron_num=10 + model_coverage_test = ModelCoverageMetrics(model, segmented_num, neuron_num, train_images) + model_coverage_test.calculate_coverage(np.array(test_images[:100]).astype(np.float32)) + LOGGER.info(TAG, 'KMNC of this test is : %s', model_coverage_test.get_kmnc()) + ``` + + Result: + + ```python + KMNC of this test is : 0.0851 + ``` + +5. Perform the fuzz testing. + + ```python + eval_metrics = 'auto' + model_fuzz_test = Fuzzer(model, train_images, neuron_num, segmented_num) + _, _, _, _, metrics = model_fuzz_test.fuzzing(mutate_config, initial_seeds, eval_metrics=eval_metrics) + ``` + +6. Experiment results. + + The results of fuzz testing contains five aspect data: + + - fuzz_samples: mutated samples in fuzz testing. + - true_labels: the ground truth labels of fuzz_samples. + - fuzz_pred: predictions of tested model about fuzz_samples. + - fuzz_strategies: the methods used to mutate fuzz_samples. + - metrics_report: metrics report of fuzz testing. + + The first 4 returns can be used to further calculated complex metrics and analyze the robustness of the model. + + Run the following command to view the result: + + ```python + if metrics: + for key in metrics: + LOGGER.info(TAG, key + ': %s', metrics[key]) + ``` + + The fuzz testing result is as follows: + + ```python + Accuracy: 0.7929 + Attack_success_rate: 0.3939 + Neural_coverage_KMNC: 0.4797 + ``` + + Before the fuzzing test, the KMNC neuron coverage rate of the seed is 8.5%. After the fuzzing test, the KMNC neuron coverage rate is 47.97%, and the neuron coverage rate and sample diversity increase. After the fuzzing test, the accuracy rate of the model to generate samples is 79.29%, and the attack success rate is 39.39% for samples using the adversarial attack method. Since the initial seed, the mutation method and the corresponding parameters are all randomly selected, it is normal that the result floats to some extent. + + Original image: + + ![fuzz_seed](./images/fuzz_seed.png) + + Mutation images generated by fuzzing: + + ![fuzz_res](./images/fuzz_res.png) \ No newline at end of file diff --git a/tutorials/source_en/advanced_use/gradient_accumulation.md b/tutorials/source_en/advanced_use/gradient_accumulation.md index acb3af83cf4afb4606793916738daf181310ba2a..f1d150dc290405bebd01b14118246fa624afce9b 100644 --- a/tutorials/source_en/advanced_use/gradient_accumulation.md +++ b/tutorials/source_en/advanced_use/gradient_accumulation.md @@ -1,6 +1,6 @@ # Gradient Accumulation -`Linux` `Ascend` `GPU` `Model Optimization` `Intermediate` `Expert` +`Linux` `GPU` `Model Optimization` `Intermediate` `Expert` @@ -29,7 +29,7 @@ Different from the traditional training method, the concept of mini-batch is int The ultimate objective is to achieve the same effect as training with N x mini-batch data. -> This tutorial is applicable to GPUs and Ascend 910 AI Processors. You can download the main training sample code from . +> This tutorial is applicable to GPUs. You can download the main training sample code from . ## Creating a Gradient Accumulation Model @@ -129,8 +129,8 @@ class TrainClear(Cell): self.hyper_map = C.HyperMap() def construct(self): - seccess = self.hyper_map(F.partial(_clear_op), self.grad_sum, self.zeros) - return seccess + success = self.hyper_map(F.partial(_clear_op), self.grad_sum, self.zeros) + return success ``` ### Defining the Training Process @@ -207,8 +207,8 @@ Call the network, optimizer, and loss function, and then customize the `train_pr ```python if __name__ == "__main__": parser = argparse.ArgumentParser(description='MindSpore Gard Cumulative Example') - parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'], - help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--device_target', type=str, default="GPU", choices=['GPU'], + help='device where the code will be implemented (default: GPU)') parser.add_argument('--data_path', type=str, default="./Data", help='path where the dataset is saved') args = parser.parse_args() @@ -230,9 +230,11 @@ After 10 epochs, the accuracy on the test set is about 96.31%. **Training Execution** 1. Run the training code and view the running result. + ```shell $ python train.py --data_path=./MNIST_Data ``` + The output is as follows. The loss value decreases during training. ```shell @@ -245,7 +247,7 @@ After 10 epochs, the accuracy on the test set is about 96.31%. epoch: 10 step: 448 loss is 0.06443884 epoch: 10 step: 449 loss is 0.0067842817 ``` - + 2. Check the saved checkpoint files. The model file `gradient_accumulation.ckpt` is saved during training. @@ -255,7 +257,7 @@ After 10 epochs, the accuracy on the test set is about 96.31%. Use the saved checkpoint file to load the validation dataset through [eval.py]() in the lenet directory of model_zoo. ```shell -$ python eval.py --data_path=./MNIST_Data --ckpt_path=./gradient_accumulation.ckpt +$ python eval.py --data_path=./MNIST_Data --ckpt_path=./gradient_accumulation.ckpt --device_target=GPU ``` The output is as follows. The accuracy of the validation dataset is about 96.31%, which is the same as the result when the value of batch_size is 32. diff --git a/tutorials/source_en/advanced_use/hub_tutorial.md b/tutorials/source_en/advanced_use/hub_tutorial.md index 13e98abd3fa8aa362ba1b7613cd5bb613a82a43a..1b5c5d35ae7c2ad3b1f76c82ece4e6de57a268c8 100644 --- a/tutorials/source_en/advanced_use/hub_tutorial.md +++ b/tutorials/source_en/advanced_use/hub_tutorial.md @@ -1,52 +1,87 @@ -## Submitting, Loading and Fine-tuning Models using MindSpore Hub +# Submitting, Loading and Fine-tuning Models using MindSpore Hub -`Ascend` `GPU` `MindSpore Hub` `Model Submission` `Model Loading` `Model Fine-tuning` `Beginner` `Intermediate` `Expert` +`Linux` `Ascend` `GPU` `MindSpore Hub` `Model Submission` `Model Loading` `Model Fine-tuning` `Beginner` `Intermediate` `Expert` - [Submitting, Loading and Fine-tuning Models using MindSpore Hub](#submitting-loading-and-fine-tuning-models-using-mindspore-hub) - - [Overview](#overview) - - [How to submit models](#how-to-submit-models) - - [Steps](#steps) - - [How to load models](#how-to-load-models) - - [Model Fine-tuning](#model-fine-tuning) + - [Overview](#overview) + - [How to submit models](#how-to-submit-models) + - [Steps](#steps) + - [How to load models](#how-to-load-models) + - [Model Fine-tuning](#model-fine-tuning) -### Overview +## Overview -For algorithm developers who are interested in publishing models into MindSpore Hub, this tutorial introduces the specific steps to submit models using GoogleNet as an example. It also describes how to load/fine-tune MindSpore Hub models for application developers who aim to do inference/transfer learning on new dataset. In summary, this tutorial helps the algorithm developers submit models efficiently and enables the application developers to perform inference or fine-tuning using MindSpore Hub APIs quickly. +MindSpore Hub is a pre-trained model application tool of the MindSpore ecosystem, which serves as a channel for model developers and application developers. It not only provides model developers with a convenient and fast channel for model submission, but also provides application developers with simple model loading and fine-tuning APIs. For model developers who are interested in publishing models into MindSpore Hub, this tutorial introduces the specific steps to submit models using GoogleNet as an example. It also describes how to load/fine-tune MindSpore Hub models for application developers who aim to do inference/transfer learning on new dataset. In summary, this tutorial helps the model developers submit models efficiently and enables the application developers to perform inference or fine-tuning using MindSpore Hub APIs quickly. -### How to submit models +## How to submit models -We accept publishing models to MindSpore Hub via PR in `hub` repo. Here we use GoogleNet as an example to list the steps of model submission to MindSpore Hub. +We accept publishing models to MindSpore Hub via PR in [hub](https://gitee.com/mindspore/hub) repo. Here we use GoogleNet as an example to list the steps of model submission to MindSpore Hub. -#### Steps +### Steps 1. Host your pre-trained model in a storage location where we are able to access. -2. Add a model generation python file called `mindspore_hub_conf.py` in your own repo using this [template](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/mindspore_hub_conf.py). +2. Add a model generation python file called `mindspore_hub_conf.py` in your own repo using this [template](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/mindspore_hub_conf.py). The location of the `mindspore_hub_conf.py` file is shown below: + + ```shell script + googlenet + ├── src + │   ├── googlenet.py + ├── script + │   ├── run_train.sh + ├── train.py + ├── test.py + ├── mindspore_hub_conf.py + ``` -3. Create a `{model_name}_{model_version}_{dataset}.md` file in `hub/mshub_res/assets` using this [template](https://gitee.com/mindspore/hub/blob/master/mshub_res/assets/mindspore/gpu/0.6/alexnet_v1_cifar10.md). For each pre-trained model, please run the following command to obtain a hash value required at `asset-sha256` of this `.md` file: +3. Create a `{model_name}_{model_version}_{dataset}.md` file in `hub/mshub_res/assets/mindspore/ascend/0.7` using this [template](https://gitee.com/mindspore/hub/blob/master/mshub_res/assets/mindspore/ascend/0.7/googlenet_v1_cifar10.md). Here `ascend` refers to the hardware platform for the pre-trained model, and `0.7` indicates the MindSpore version. The structure of the `hub/mshub_res` folder is as follows: + + ```shell script + hub + ├── mshub_res + │   ├── assets + │   ├── mindspore + | ├── gpu + | ├── 0.7 + | ├── ascend + | ├── 0.7 + | ├── googlenet_v1_cifar10.md + │   ├── tools + | ├── md_validator.py + | └── md_validator.py + ``` + + Note that it is required to fill in the `{model_name}_{model_version}_{dataset}.md` template by providing `file-format`、`asset-link` and `asset-sha256` below, which refers to the model file format, model storage location from step 1 and model hash value, respectively. The MindSpore Hub supports multiple model file formats including [MindSpore CKPT](https://www.mindspore.cn/tutorial/en/master/use/saving_and_loading_model_parameters.html#checkpoint-configuration-policies), [AIR](https://www.mindspore.cn/tutorial/en/master/use/multi_platform_inference.html), [MindIR](https://www.mindspore.cn/tutorial/en/master/use/saving_and_loading_model_parameters.html#export-mindir-model), [ONNX](https://www.mindspore.cn/tutorial/en/master/use/multi_platform_inference.html) and [MSLite](https://www.mindspore.cn/lite/tutorial/en/master/use/converter_tool.html). + + ```shell script + file-format: ckpt + asset-link: https://download.mindspore.cn/model_zoo/official/cv/googlenet/goolenet_ascend_0.2.0_cifar10_official_classification_20200713/googlenet.ckpt + asset-sha256: 114e5acc31dad444fa8ed2aafa02ca34734419f602b9299f3b53013dfc71b0f7 + ``` + For each pre-trained model, please run the following command to obtain a hash value required at `asset-sha256` of this `.md` file. Here the pre-trained model `googlenet.ckpt` is accessed from the storage location in step 1 and then saved in `tools` folder. The output hash value is: `114e5acc31dad444fa8ed2aafa02ca34734419f602b9299f3b53013dfc71b0f7`. ```python cd ../tools python get_sha256.py ../googlenet.ckpt ``` -4. Check the format of the markdown file locally using `hub/mshub_res/tools/md_validator.py` by running the following command: +4. Check the format of the markdown file locally using `hub/mshub_res/tools/md_validator.py` by running the following command. The output is `All Passed`,which indicates that the format and content of the `.md` file meets the requirements. ```python python md_validator.py ../assets/mindspore/ascend/0.7/googlenet_v1_cifar10.md ``` -5. Create a PR in `mindspore/hub` repo. +5. Create a PR in `mindspore/hub` repo. See our [Contributor Wiki](https://gitee.com/mindspore/mindspore/blob/master/CONTRIBUTING.md) for more information about creating a PR. -Once your PR is merged into master branch here, your model will show up in [MindSpore Hub Website](https://hub.mindspore.com/mindspore) within 24 hours. For more information, please refer to the [README](https://gitee.com/mindspore/hub/blob/master/mshub_res/README.md). +Once your PR is merged into master branch here, your model will show up in [MindSpore Hub Website](https://hub.mindspore.com/mindspore) within 24 hours. Please refer to [README](https://gitee.com/mindspore/hub/blob/master/mshub_res/README.md) for more information about model submission. -### How to load models +## How to load models `mindspore_hub.load` API is used to load the pre-trained model in a single line of code. The main process of model loading is as follows: @@ -56,92 +91,119 @@ Once your PR is merged into master branch here, your model will show up in [Mind - Complete the task of loading model using `url` , as shown in the example below: -```python -import mindspore_hub as mshub -import mindspore -from mindspore import context, Tensor, nn -from mindspore.train.model import Model -from mindspore.common import dtype as mstype -from mindspore.dataset.transforms import py_transforms -from PIL import Image -import cv2 - -context.set_context(mode=context.GRAPH_MODE, - device_target="Ascend", - device_id=0) - -model = "mindspore/ascend/0.7/googlenet_v1_cifar10" + ```python -image = Image.open('cifar10/a.jpg') -transforms = py_transforms.ComposeOp([py_transforms.ToTensor()]) + import mindspore_hub as mshub + import mindspore + from mindspore import context, Tensor, nn + from mindspore.train.model import Model + from mindspore.common import dtype as mstype + import mindspore.dataset.vision.py_transforms as py_transforms + + context.set_context(mode=context.GRAPH_MODE, + device_target="Ascend", + device_id=0) + + model = "mindspore/ascend/0.7/googlenet_v1_cifar10" + + # Initialize the number of classes based on the pre-trained model. + network = mshub.load(model, num_classes=10) + network.set_train(False) + + # ... -# Initialize the number of classes based on the pre-trained model. -network = mshub.load(model, num_classes=10) -network.set_train(False) -out = network(transforms(image)) -``` + ``` +- After loading the model, you can use MindSpore to do inference. You can refer to [here](https://www.mindspore.cn/tutorial/en/master/use/multi_platform_inference.html). -### Model Fine-tuning +## Model Fine-tuning -When loading a model with `mindspore_hub.load` API, we can add an extra argument to load the feature extraction part of the model only. So we can easily add new layers to perform transfer learning. *This feature can be found in the related model page when an extra argument (e.g., include_top) has been integrated into the model construction by the algorithm engineer.* +When loading a model with `mindspore_hub.load` API, we can add an extra argument to load the feature extraction part of the model only. So we can easily add new layers to perform transfer learning. This feature can be found in the related model page when an extra argument (e.g., include_top) has been integrated into the model construction by the model developer. The value of `include_top` is True or False, indicating whether to keep the top layer in the fully-connected network. -We use Googlenet as example to illustrate how to load a model trained on ImageNet dataset and then perform transfer learning (re-training) on specific sub-task dataset. The main steps are listed below: +We use GoogleNet as example to illustrate how to load a model trained on ImageNet dataset and then perform transfer learning (re-training) on specific sub-task dataset. The main steps are listed below: 1. Search the model of interest on [MindSpore Hub Website](https://hub.mindspore.com/mindspore) and get the related `url`. -2. Load the model from MindSpore Hub using the `url`. *Note that the parameter `include_top` is provided by the model developer*. +2. Load the model from MindSpore Hub using the `url`. Note that the parameter `include_top` is provided by the model developer. ```python import mindspore - from mindspore import nn - from mindspore import context + from mindspore import nn, context, Tensor + from mindpsore.train.serialization import save_checkpoint + from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits + from mindspore.ops import operations as P + from mindspore.nn import Momentum + + import math + import numpy as np + import mindspore_hub as mshub + from src.dataset import create_dataset context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) - - network = mshub.load('mindspore/ascend/0.7/googlenet_v1_cifar10', include_top=False) + model_url = "mindspore/ascend/0.7/googlenet_v1_cifar10" + network = mshub.load(model_url, include_top=False, num_classes=1000) network.set_train(False) ``` 3. Add a new classification layer into current model architecture. ```python + class ReduceMeanFlatten(nn.Cell): + def __init__(self): + super(ReduceMeanFlatten, self).__init__() + self.mean = P.ReduceMean(keep_dims=True) + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.mean(x, (2, 3)) + x = self.flatten(x) + return x + # Check MindSpore Hub website to conclude that the last output shape is 1024. last_channel = 1024 # The number of classes in target task is 26. num_classes = 26 + + reducemean_flatten = ReduceMeanFlatten() + classification_layer = nn.Dense(last_channel, num_classes) classification_layer.set_train(True) - train_network = nn.SequentialCell([network, classification_layer]) + train_network = nn.SequentialCell([network, reducemean_flatten, classification_layer]) ``` 4. Define `loss` and `optimizer` for training. ```python - from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits + epoch_size = 60 # Wrap the backbone network with loss. - loss_fn = SoftmaxCrossEntropyWithLogits() + loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") loss_net = nn.WithLossCell(train_network, loss_fn) - # Create an optimizer. - optim = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), Tensor(lr), config.momentum, config.weight_decay) + lr = get_lr(global_step=0, + lr_init=0, + lr_max=0.05, + lr_end=0.001, + warmup_epochs=5, + total_epochs=epoch_size) + # Create an optimizer. + optim = Momentum(filter(lambda x: x.requires_grad, loss_net.get_parameters()), Tensor(lr), 0.9, 4e-5) train_net = nn.TrainOneStepCell(loss_net, optim) ``` -5. Create dataset and start fine-tuning. +5. Create dataset and start fine-tuning. As is shown below, the new dataset used for fine-tuning is the garbage classification data located at `/ssd/data/garbage/train` folder. ```python - from src.dataset import create_dataset - from mindspore.train.serialization import _exec_save_checkpoint - - dataset = create_dataset("/ssd/data/garbage/train", do_train=True, batch_size=32) - - epoch_size = 15 + dataset = create_dataset("/ssd/data/garbage/train", + do_train=True, + batch_size=32, + platform="Ascend", + repeat_num=1) + for epoch in range(epoch_size): for i, items in enumerate(dataset): data, label = items @@ -149,10 +211,10 @@ We use Googlenet as example to illustrate how to load a model trained on ImageNe label = mindspore.Tensor(label) loss = train_net(data, label) - print(f"epoch: {epoch}, loss: {loss}") + print(f"epoch: {epoch}/{epoch_size}, loss: {loss}") # Save the ckpt file for each epoch. ckpt_path = f"./ckpt/garbage_finetune_epoch{epoch}.ckpt" - _exec_save_checkpoint(train_network, ckpt_path) + save_checkpoint(train_network, ckpt_path) ``` 6. Eval on test set. @@ -160,22 +222,31 @@ We use Googlenet as example to illustrate how to load a model trained on ImageNe ```python from mindspore.train.serialization import load_checkpoint, load_param_into_net - network = mshub.load('mindspore/ascend/0.7/googlenet_v1_cifar10', include_top=False) - train_network = nn.SequentialCell([network, nn.Dense(last_channel, num_classes)]) + network = mshub.load('mindspore/ascend/0.7/googlenet_v1_cifar10', pretrained=False, + include_top=False, num_classes=1000) + + reducemean_flatten = ReduceMeanFlatten() + + classification_layer = nn.Dense(last_channel, num_classes) + classification_layer.set_train(False) + softmax = nn.Softmax() + network = nn.SequentialCell([network, reducemean_flatten, + classification_layer, softmax]) # Load a pre-trained ckpt file. - ckpt_path = "./ckpt/garbage_finetune_epoch15.ckpt" + ckpt_path = "./ckpt/garbage_finetune_epoch59.ckpt" trained_ckpt = load_checkpoint(ckpt_path) - load_param_into_net(train_network, trained_ckpt) + load_param_into_net(network, trained_ckpt) # Define loss and create model. - loss = SoftmaxCrossEntropyWithLogits() - model = Model(network, loss_fn=loss, metrics={'acc'}) + model = Model(network, metrics={'acc'}, eval_network=network) - eval_dataset = create_dataset("/ssd/data/garbage/train", do_train=False, - batch_size=32) + eval_dataset = create_dataset("/ssd/data/garbage/test", + do_train=True, + batch_size=32, + platform="Ascend", + repeat_num=1) res = model.eval(eval_dataset) print("result:", res, "ckpt=", ckpt_path) - ``` - + ``` \ No newline at end of file diff --git a/tutorials/source_en/advanced_use/images/cifar10_c_transforms.png b/tutorials/source_en/advanced_use/images/cifar10_c_transforms.png new file mode 100644 index 0000000000000000000000000000000000000000..10dc267dc650764566f6d20b7f090e20c12f8e11 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/cifar10_c_transforms.png differ diff --git a/tutorials/source_en/advanced_use/images/compose.png b/tutorials/source_en/advanced_use/images/compose.png new file mode 100644 index 0000000000000000000000000000000000000000..97b8ca59f4438852526b56a8a7ce00ff63771b40 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/compose.png differ diff --git a/tutorials/source_en/advanced_use/images/data_chart.png b/tutorials/source_en/advanced_use/images/data_chart.png index f698c682119efc886b46a911d3c61f50ab017879..9f1d5f4247472602823649909d934ad6f7160005 100644 Binary files a/tutorials/source_en/advanced_use/images/data_chart.png and b/tutorials/source_en/advanced_use/images/data_chart.png differ diff --git a/tutorials/source_en/advanced_use/images/data_enhancement_performance_scheme.png b/tutorials/source_en/advanced_use/images/data_enhancement_performance_scheme.png new file mode 100644 index 0000000000000000000000000000000000000000..6417031a63dd2bade4902a83934c05aeee6be195 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/data_enhancement_performance_scheme.png differ diff --git a/tutorials/source_en/advanced_use/images/data_label.png b/tutorials/source_en/advanced_use/images/data_label.png index f76c645e26b28401285f00dd0613d27e3506982c..ac79c2d53fe416e96b9ac841692b26f3eaf6ddd2 100644 Binary files a/tutorials/source_en/advanced_use/images/data_label.png and b/tutorials/source_en/advanced_use/images/data_label.png differ diff --git a/tutorials/source_en/advanced_use/images/data_loading_performance_scheme.png b/tutorials/source_en/advanced_use/images/data_loading_performance_scheme.png new file mode 100644 index 0000000000000000000000000000000000000000..44c84c1f14dee40cdd76926994ab670494abc006 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/data_loading_performance_scheme.png differ diff --git a/tutorials/source_en/advanced_use/images/data_table.png b/tutorials/source_en/advanced_use/images/data_table.png index 65dcd39049b2754ef9ed22641981743f985e2b85..c9f73cd59b8202eff0121b4c57466f9b39d1d0b9 100644 Binary files a/tutorials/source_en/advanced_use/images/data_table.png and b/tutorials/source_en/advanced_use/images/data_table.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_init_page.png b/tutorials/source_en/advanced_use/images/debugger_init_page.png new file mode 100644 index 0000000000000000000000000000000000000000..e0fedfd5e48d8679ea601c390411a47bdb564881 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_init_page.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_set_watch_point.png b/tutorials/source_en/advanced_use/images/debugger_set_watch_point.png new file mode 100644 index 0000000000000000000000000000000000000000..5b984c5ce447b2dd68e3e5295d67d79bb2920985 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_set_watch_point.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_tensor_compare.png b/tutorials/source_en/advanced_use/images/debugger_tensor_compare.png new file mode 100644 index 0000000000000000000000000000000000000000..8e82a42a8b9addb6ea87f7841f09e1b2596902b2 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_tensor_compare.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_tensor_info.png b/tutorials/source_en/advanced_use/images/debugger_tensor_info.png new file mode 100644 index 0000000000000000000000000000000000000000..ac4246a4865f4bbc7e724d057582f5c1e5f8bc5e Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_tensor_info.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_tensor_value.png b/tutorials/source_en/advanced_use/images/debugger_tensor_value.png new file mode 100644 index 0000000000000000000000000000000000000000..faa2dc2992e528bdc46804be558497580c4e904b Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_tensor_value.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_waiting.png b/tutorials/source_en/advanced_use/images/debugger_waiting.png new file mode 100644 index 0000000000000000000000000000000000000000..8171fce24fdef74135dfe0f0368bdfebadca1c4b Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_waiting.png differ diff --git a/tutorials/source_en/advanced_use/images/debugger_watch_point_hit.png b/tutorials/source_en/advanced_use/images/debugger_watch_point_hit.png new file mode 100644 index 0000000000000000000000000000000000000000..c8920281f0200a2fe37bd28fe5f299205599d8bb Binary files /dev/null and b/tutorials/source_en/advanced_use/images/debugger_watch_point_hit.png differ diff --git a/tutorials/source_en/advanced_use/images/fuzz_res.png b/tutorials/source_en/advanced_use/images/fuzz_res.png new file mode 100644 index 0000000000000000000000000000000000000000..be6d022850438ff4b9c070f7225cbd950e1e3686 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/fuzz_res.png differ diff --git a/tutorials/source_en/advanced_use/images/fuzz_seed.png b/tutorials/source_en/advanced_use/images/fuzz_seed.png new file mode 100644 index 0000000000000000000000000000000000000000..cb138aebfabea1a1f778fbb65b6a0ee4533974e2 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/fuzz_seed.png differ diff --git a/tutorials/source_en/advanced_use/images/lineage_label.png b/tutorials/source_en/advanced_use/images/lineage_label.png index 56f6eb7dfd4cd39ce7c8ebf6fa5e2b0d61ea5871..15c88f91edb7e870246b85f9f4d96f00145d9199 100644 Binary files a/tutorials/source_en/advanced_use/images/lineage_label.png and b/tutorials/source_en/advanced_use/images/lineage_label.png differ diff --git a/tutorials/source_en/advanced_use/images/lineage_model_chart.png b/tutorials/source_en/advanced_use/images/lineage_model_chart.png index 32e307551e210a48cfbd5022fc2901e841dd9b8a..56d08cc34e51293a82aa63dd50fc1fa1c90e7ab3 100644 Binary files a/tutorials/source_en/advanced_use/images/lineage_model_chart.png and b/tutorials/source_en/advanced_use/images/lineage_model_chart.png differ diff --git a/tutorials/source_en/advanced_use/images/lineage_model_table.png b/tutorials/source_en/advanced_use/images/lineage_model_table.png index 923b3ee95c08f2a32437988aae99c1aba6d191ef..a288ac6aa099c69a8b7f5cf97183992adb94b71a 100644 Binary files a/tutorials/source_en/advanced_use/images/lineage_model_table.png and b/tutorials/source_en/advanced_use/images/lineage_model_table.png differ diff --git a/tutorials/source_en/advanced_use/images/operator_fusion.png b/tutorials/source_en/advanced_use/images/operator_fusion.png new file mode 100644 index 0000000000000000000000000000000000000000..4aa6ee89a0970889abc84f1b74b95297f2ae2db4 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/operator_fusion.png differ diff --git a/tutorials/source_en/advanced_use/images/pipeline.png b/tutorials/source_en/advanced_use/images/pipeline.png new file mode 100644 index 0000000000000000000000000000000000000000..bbb1a391f8378bc02f4d821d657f2c74c21ff24e Binary files /dev/null and b/tutorials/source_en/advanced_use/images/pipeline.png differ diff --git a/tutorials/source_en/advanced_use/images/shuffle_performance_scheme.png b/tutorials/source_en/advanced_use/images/shuffle_performance_scheme.png new file mode 100644 index 0000000000000000000000000000000000000000..f4c72a99fbade41067f9e6dfe6383634d06433a8 Binary files /dev/null and b/tutorials/source_en/advanced_use/images/shuffle_performance_scheme.png differ diff --git a/tutorials/source_en/advanced_use/lineage_and_scalars_comparision.md b/tutorials/source_en/advanced_use/lineage_and_scalars_comparision.md index b120a5f0fffc6e70905efce9869d2922333af9ec..aaebbfcd1d649672ad4dbd85e5d823926a29da01 100644 --- a/tutorials/source_en/advanced_use/lineage_and_scalars_comparision.md +++ b/tutorials/source_en/advanced_use/lineage_and_scalars_comparision.md @@ -47,7 +47,7 @@ The overview page on the left shows information about optimization objective and Figure 4: Overview page -Figure 4 shows the optimization objective distribution, parameter importance, and scatter plots. +Figure 4 shows the optimization objective distribution, parameter importance, and scatter plots. You can select the optimization objective to view the importance of the parameters, and then click the histogram to view the scatter plot of the parameters and optimization objective. ## Dataset Lineage diff --git a/tutorials/source_en/advanced_use/mindinsight_commands.md b/tutorials/source_en/advanced_use/mindinsight_commands.md index 8ed9fcbed9126ed1ea78140626d7b5bc2411b317..73ffc4d884c85048eaaf25f309bc52b11790f162 100644 --- a/tutorials/source_en/advanced_use/mindinsight_commands.md +++ b/tutorials/source_en/advanced_use/mindinsight_commands.md @@ -30,10 +30,12 @@ mindinsight --version ## Start the Service ```shell -mindinsight start [-h] [--config ] [--workspace ] - [--port ] [--url-path-prefix ] - [--reload-interval ] - [--summary-base-dir ] +mindinsight start [-h] [--config {CONFIG}] [--workspace {WORKSPACE}] + [--port {PORT}] [--url-path-prefix {URL_PATH_PREFIX}] + [--reload-interval {RELOAD_INTERVAL}] + [--summary-base-dir {SUMMARY_BASE_DIR}] + [--enable-debugger {ENABLE_DEBUGGER}] + [--debugger-port {DEBUGGER_PORT}] ``` Optional parameters as follows: @@ -41,12 +43,14 @@ Optional parameters as follows: |Name|Argument|Description|Type|Default|Scope|Specifications| |---|---|---|---|---|---|---| |`-h, --help`|Optional|Displays the help information about the start command.|-|-|-|-| -|`--config `|Optional|Specifies the configuration file or module.|String|Empty string|-|Physical file path (file:/path/to/config.py) or a module path (python:path.to.config.module) that can be identified by Python.| -|`--workspace `|Optional|Specifies the working directory.|String|$HOME/mindinsight|-|-| -|`--port `|Optional|Specifies the port number of the web visualization service.|Integer|8080|1~65535|-| -|`--url-path-prefix `|Optional|Specifies the URL path prefix of the web visualization service.|String|Empty string|-|URL path prefix consists of segments separated by slashes. Each segment supports alphabets / digits / underscores / dashes / dots, but not single dot or double dots.| -|`--reload-interval `|Optional|Specifies the interval (unit: second) for loading data.|Integer|3|-|The value 0 indicates that data is loaded only once.| -|`--summary-base-dir `|Optional|Specifies the root directory for loading training log data.|String|./|-|MindInsight traverses the direct subdirectories in this directory and searches for log files. If a direct subdirectory contains log files, it is identified as the log file directory. If a root directory contains log files, it is identified as the log file directory.| +|`--config {CONFIG}`|Optional|Specifies the configuration file or module.|String|Empty string|-|Physical file path (file:/path/to/config.py) or a module path (python:path.to.config.module) that can be identified by Python.| +|`--workspace {WORKSPACE}`|Optional|Specifies the working directory.|String|$HOME/mindinsight|-|-| +|`--port {PORT}`|Optional|Specifies the port number of the web visualization service.|Integer|8080|1~65535|-| +|`--url-path-prefix {URL_PATH_PREFIX}`|Optional|Specifies the URL path prefix of the web visualization service.|String|Empty string|-|URL path prefix consists of segments separated by slashes. Each segment supports alphabets / digits / underscores / dashes / dots, but not single dot or double dots.| +|`--reload-interval {RELOAD_INTERVAL}`|Optional|Specifies the interval (unit: second) for loading data.|Integer|3|-|The value 0 indicates that data is loaded only once.| +|`--summary-base-dir {SUMMARY_BASE_DIR}`|Optional|Specifies the root directory for loading training log data.|String|./|-|MindInsight traverses the direct subdirectories in this directory and searches for log files. If a direct subdirectory contains log files, it is identified as the log file directory. If a root directory contains log files, it is identified as the log file directory.| +|`--enable-debugger {ENABLE_DEBUGGER}`|Optional|Whether to launch the MindInsight Debugger.|Boolean|False|True/False|-| +|`--debugger-port {DEBUGGER_PORT}`|Optional|Specifies the port number of the debugger server.|Integer|50051|1~65535|-| > When the service is started, the parameter values of the command line are saved as the environment variables of the process and start with `MINDINSIGHT_`, for example, `MINDINSIGHT_CONFIG`, `MINDINSIGHT_WORKSPACE`, and `MINDINSIGHT_PORT`. diff --git a/tutorials/source_en/advanced_use/mixed_precision.md b/tutorials/source_en/advanced_use/mixed_precision.md index b211b1737c85729308459f6cb6e71bd15ad1b977..a49dbaf22adeb18c727998bddb7dda36b7eac78a 100644 --- a/tutorials/source_en/advanced_use/mixed_precision.md +++ b/tutorials/source_en/advanced_use/mixed_precision.md @@ -39,14 +39,14 @@ This document describes the computation process by using examples of automatic a ## Automatic Mixed Precision -To use the automatic mixed precision, you need to invoke the corresponding API, which takes the network to be trained and the optimizer as the input. This API converts the operators of the entire network into FP16 operators (except the `BatchNorm` and Loss operators). +To use the automatic mixed precision, you need to invoke the corresponding API, which takes the network to be trained and the optimizer as the input. This API converts the operators of the entire network into FP16 operators (except the `BatchNorm` and Loss operators). You can use automatic mixed precision through API `amp` or API `Model`. -The procedure is as follows: -1. Introduce the MindSpore mixed precision API. +The procedure of using automatic mixed precision by API `amp` is as follows: +1. Introduce the MindSpore mixed precision API `amp`. 2. Define the network. This step is the same as the common network definition. (You do not need to manually configure the precision of any specific operator.) -3. Use the `amp.build_train_network` API to encapsulate the network model and optimizer. In this step, MindSpore automatically converts the operators to the required format. +3. Use the `amp.build_train_network` API to encapsulate the network model and optimizer. You can learn how to set parameter `level` through . In this step, MindSpore automatically converts the operators to the required format. A code example is as follows: @@ -92,6 +92,77 @@ train_network = amp.build_train_network(net, optimizer, loss, level="O3", loss_s output = train_network(predict, label) ``` +The procedure of using automatic mixed precision by API `Model` is as follows: +1. Introduce the MindSpore model API `Model`. + +2. Define the network. This step is the same as the common network definition. (You do not need to manually configure the precision of any specific operator.) + +3. Create dataset.You can learn detail step at . + +4. Use the `Model` API to encapsulate the network model and optimizer. You can learn how to set parameter `amp_level` through . In this step, MindSpore automatically converts the operators to the required format. + +A code example is as follows: + +```python +import numpy as np +import mindspore.nn as nn +from mindspore import context +from mindspore.common.initializer import Normal +from mindspore.train import Model +from src.dataset import create_dataset + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + +# Define network +class LeNet5(nn.Cell): + """ + Lenet network + + Args: + num_class (int): Number of classes. Default: 10. + num_channel (int): Number of channels. Default: 1. + + Returns: + Tensor, output tensor + Examples: + >>> LeNet(num_class=10) + + """ + def __init__(self, num_class=10, num_channel=1): + super(LeNet5, self).__init__() + self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') + self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') + self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02)) + self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) + self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02)) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.max_pool2d(self.relu(self.conv1(x))) + x = self.max_pool2d(self.relu(self.conv2(x))) + x = self.flatten(x) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x + +# create dataset +ds_train = create_dataset("/dataset/train", 32) + +# Initialize network +network = LeNet5(10) + +# Define Loss and Optimizer +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") +net_opt = nn.Momentum(network.trainable_params(),learning_rate=0.01, momentum=0.9) +model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()},amp_level="O3") + +# Run training +model.train(epoch=10, train_dataset=ds_train) +``` ## Manual Mixed Precision diff --git a/tutorials/source_en/advanced_use/model_scripts_transformation.md b/tutorials/source_en/advanced_use/model_scripts_transformation.md new file mode 100644 index 0000000000000000000000000000000000000000..5e4ba4dc0d26c89abba7ac34209d49aef13af325 --- /dev/null +++ b/tutorials/source_en/advanced_use/model_scripts_transformation.md @@ -0,0 +1,205 @@ +# Migrate From Third Party Framework + +`Linux` `Ascend` `Model Development` `Beginner` + + + +- [Model Scripts Transformation](#Model-Scripts-Transformation) + - [Overview](#Overview) + - [Installation](#Installation) + - [Usage](#Usage) + - [Scenario](#Scenario) + - [Example](#Example) + - [AST-Based Conversion](#AST-Based-Conversion) + - [Graph-Based Conversion](#Graph-Based-Conversion) + - [Caution](#Caution) + + + + + +## Overview + +MindConverter is a migration tool to transform the model scripts from PyTorch to Mindspore. Users can migrate their PyTorch models to Mindspore rapidly with minor changes according to the conversion report. + + +## Installation + +Mindconverter is a submodule in MindInsight. Please follow the [Guide](https://www.mindspore.cn/install/en) here to install MindInsight. + + +## Usage + +MindConverter currently only provides command-line interface. Here is the manual page. + +```bash +usage: mindconverter [-h] [--version] [--in_file IN_FILE] + [--model_file MODEL_FILE] [--shape SHAPE] + [--output OUTPUT] [--report REPORT] + [--project_path PROJECT_PATH] + +optional arguments: + -h, --help show this help message and exit + --version show program version number and exit + --in_file IN_FILE Specify path for script file to use AST schema to do + script conversation. + --model_file MODEL_FILE + PyTorch .pth model file path to use graph based schema + to do script generation. When `--in_file` and + `--model_file` are both provided, use AST schema as + default. + --shape SHAPE Optional, excepted input tensor shape of + `--model_file`. It is required when use graph based + schema. Usage: --shape 3,244,244 + --output OUTPUT Optional, specify path for converted script file + directory. Default output directory is `output` folder + in the current working directory. + --report REPORT Optional, specify report directory. Default is + converted script directory. + --project_path PROJECT_PATH + Optional, PyTorch scripts project path. If PyTorch + project is not in PYTHONPATH, please assign + `--project_path` when use graph based schema. Usage: + --project_path ~/script_file/ + +``` + +**MindConverter provides two modes:** + +1. **Abstract Syntax Tree (AST) based conversion**:Use the argument `--in_file` will enable the AST mode. +2. **Computational Graph basedconversion**:Use `--model_file` and `--shape` arguments will enable the Graph mode. + +> The AST mode will be enabled, if both `--in_file` and `--model_file` are specified. + +For the Graph mode, `--shape` is mandatory. + +For the AST mode, `--shape` is ignored. + +`--output` and `--report` is optional. MindConverter creates an `output` folder under the current working directory, and outputs generated scripts and conversion reports to it. + +Please note that your original PyTorch project is included in the module search path (PYTHONPATH). Use the python interpreter and test your module can be successfully loaded by `import` command. Use `--project_path` instead if your project is not in the PYTHONPATH to ensure MindConverter can load it. + +> Assume the project is located at `/home/user/project/model_training`, users can use this command to add the project to `PYTHONPATH` : `export PYTHONPATH=/home/user/project/model_training:$PYTHONPATH` + +> MindConverter needs the original PyTorch scripts because of the reverse serialization. + + + +## Scenario + +MindConverter provides two modes for different migration demands. + +1. Keep original scripts' structures, including variables, functions, and libraries. +2. Keep extra modifications as few as possible, or no modifications are required after conversion. + +The AST mode is recommended for the first demand. It parses and analyzes PyTorch scripts, then replace them with the MindSpore AST to generate codes. Theoretically, The AST mode supports any model script. However, the conversion may differ due to the coding style of original scripts. + +For the second demand, the Graph mode is recommended. As the computational graph is a standard descriptive language, it is not affected by user's coding style. This mode may have more operators converted as long as these operators are supported by MindConverter. + +Some typical image classification networks such as ResNet and VGG have been tested for the Graph mode. Note that: + +> 1. Currently, the Graph mode does not support models with multiple inputs. Only models with a single input and single output are supported. +> 2. The Dropout operator will be lost after conversion because the inference mode is used to load the PyTorch model. Manually re-implement is necessary. +> 3. The Graph-based mode will be continuously developed and optimized with further updates. + + +## Example + +### AST-Based Conversion + +Assume the PyTorch script is located at `/home/user/model.py`, and outputs the transformed MindSpore script to `/home/user/output`, with the conversion report to `/home/user/output/report`. Use the following command: + +```bash +mindconverter --in_file /home/user/model.py \ + --output /home/user/output \ + --report /home/user/output/report +``` + +In the conversion report, non-transformed code is listed as follows: + +```text +line : [UnConvert] 'operator' didn't convert. ... +``` + +For non-transformed operators, the original code keeps. Please manually migrate them. [Click here](https://www.mindspore.cn/docs/en/master/index.html#operator_api) for more information about operator mapping. + + +Here is an example of the conversion report: +```text + [Start Convert] + [Insert] 'import mindspore.ops.operations as P' is inserted to the converted file. + line 1:0: [Convert] 'import torch' is converted to 'import mindspore'. + ... + line 157:23: [UnConvert] 'nn.AdaptiveAvgPool2d' didn't convert. Maybe could convert to mindspore.ops.operations.ReduceMean. + ... + [Convert Over] +``` + +For non-transformed operators, suggestions are provided in the report. For instance, MindConverter suggests that replace `torch.nn.AdaptiveAvgPool2d` with `mindspore.ops.operations.ReduceMean`. + + +### Graph-Based Conversion + +Assume the PyTorch model (.pth file) is located at `/home/user/model.pth`, with input shape (3, 224, 224) and the original PyTorch script is at `/home/user/project/model_training`. Output the transformed MindSpore script to `/home/user/output`, with the conversion report to `/home/user/output/report`. Use the following command: + +```bash +mindconverter --model_file /home/user/model.pth --shape 3,224,224 \ + --output /home/user/output \ + --report /home/user/output/report \ + --project_path /home/user/project/model_training +``` + +The Graph mode has the same conversion report as the AST mode. However, the line number and column number refer to the transformed scripts since no original scripts are used in the process. + +In addition, input and output Tensor shape of unconverted operators shows explicitly (`input_shape` and `output_shape`) as comments in converted scripts to help further manual modifications. Here is an example of the `Reshape` operator (Not supported in current version): + +```python +class Classifier(nn.Cell): + + def __init__(self): + super(Classifier, self).__init__() + ... + self.reshape = onnx.Reshape(input_shape=(1, 1280, 1, 1), + output_shape=(1, 1280)) + ... + + def construct(self, x): + ... + # Suppose input of `reshape` is x. + reshape_output = self.reshape(x) + ... + +``` + +It is convenient to replace the operators according to the `input_shape` and `output_shape` parameters. The replacement is like this: + +```python +from mindspore.ops import operations as P +... + +class Classifier(nn.Cell): + + def __init__(self): + super(Classifier, self).__init__() + ... + self.reshape = P.Reshape(input_shape=(1, 1280, 1, 1), + output_shape=(1, 1280)) + ... + + def construct(self, x): + ... + # Suppose input of `reshape` is x. + reshape_output = self.reshape(x, (1, 1280)) + ... + +``` + +> Note: `--output` and `--report` are optional. MindConverter creates an `output` folder under the current working directory, and outputs generated scripts and conversion reports to it. + + +## Caution + +1. PyTorch is not an explicitly stated dependency library in MindInsight. The Graph conversion requires the consistent PyTorch version as the model is trained. (MindConverter recommends PyTorch 1.4.0 or 1.6.0) +2. This script conversion tool relies on operators which supported by MindConverter and MindSpore. Unsupported operators may not successfully mapped to MindSpore operators. You can manually edit, or implement the mapping based on MindConverter, and contribute to our MindInsight repository. We appreciate your support for the MindSpore community. + + diff --git a/tutorials/source_en/advanced_use/model_security.md b/tutorials/source_en/advanced_use/model_security.md index a13cf2a91dc17a44cb47c36ae7bd11a60784eb89..972d42574ada423ffdbe7c62df1f611d34fa30ed 100644 --- a/tutorials/source_en/advanced_use/model_security.md +++ b/tutorials/source_en/advanced_use/model_security.md @@ -59,9 +59,9 @@ from mindspore import Tensor from mindspore import context from mindspore.train.callback import LossMonitor -from mindarmour.attacks.gradient_method import FastGradientSignMethod +from mindarmour.adv_robustness.attacks import FastGradientSignMethod from mindarmour.utils.logger import LogUtil -from mindarmour.evaluations.attack_evaluation import AttackEvaluate +from mindarmour.adv_robustness.evaluations import AttackEvaluate context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -99,7 +99,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, # apply map operations on images if not sparse: one_hot_enco = C.OneHot(10) - ds1 = ds1.map(input_columns="label", operations=one_hot_enco, + ds1 = ds1.map(operations=one_hot_enco, input_columns="label", num_parallel_workers=num_parallel_workers) type_cast_op = C.TypeCast(mstype.float32) ds1 = ds1.map(operations=type_cast_op, input_columns="label", @@ -178,7 +178,7 @@ The LeNet model is used as an example. You can also create and train your own mo 2. Train LeNet model. Use the defined data loading function `generate_mnist_dataset` to load data. ```python - mnist_path = "./MNIST_unzip/" + mnist_path = "./MNIST/" batch_size = 32 # train original model ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), @@ -198,8 +198,8 @@ The LeNet model is used as an example. You can also create and train your own mo inputs = [] labels = [] for data in ds_test.create_tuple_iterator(): - inputs.append(data[0].astype(np.float32)) - labels.append(data[1]) + inputs.append(data[0].asnumpy().astype(np.float32)) + labels.append(data[1].asnumpy()) test_inputs = np.concatenate(inputs) test_labels = np.concatenate(labels) ``` @@ -297,7 +297,7 @@ Natural Adversarial Defense (NAD) is a simple and effective adversarial example Call the NAD API provided by MindArmour. ```python -from mindarmour.defenses import NaturalAdversarialDefense +from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense # defense diff --git a/tutorials/source_en/advanced_use/optimize_the_performance_of_data_preparation.md b/tutorials/source_en/advanced_use/optimize_the_performance_of_data_preparation.md new file mode 100644 index 0000000000000000000000000000000000000000..017e6022b2377d186a6ea1f01e94a529dfaf901c --- /dev/null +++ b/tutorials/source_en/advanced_use/optimize_the_performance_of_data_preparation.md @@ -0,0 +1,389 @@ +# Optimizing the Data Preparation Performance + +`Linux` `Ascend` `GPU` `CPU` `Data Preparation` `Beginner` `Intermediate` `Expert` + + + +- [Optimizing the Data Preparation Performance](#optimizing-the-data-preparation-performance) + - [Overview](#overview) + - [Overall Process](#overall-process) + - [Preparations](#preparations) + - [Importing Modules](#importing-modules) + - [Downloading the Required Dataset](#downloading-the-required-dataset) + - [Optimizing the Data Loading Performance](#optimizing-the-data-loading-performance) + - [Performance Optimization Solution](#performance-optimization-solution) + - [Code Example](#code-example) + - [Optimizing the Shuffle Performance](#optimizing-the-shuffle-performance) + - [Performance Optimization Solution](#performance-optimization-solution-1) + - [Code Example](#code-example-1) + - [Optimizing the Data Augmentation Performance](#optimizing-the-data-augmentation-performance) + - [Performance Optimization Solution](#performance-optimization-solution-2) + - [Code Example](#code-example-2) + - [Performance Optimization Solution Summary](#performance-optimization-solution-summary) + - [Multi-thread Optimization Solution](#multi-thread-optimization-solution) + - [Multi-process Optimization Solution](#multi-process-optimization-solution) + - [Compose Optimization Solution](#compose-optimization-solution) + - [Operator Fusion Optimization Solution](#operator-fusion-optimization-solution) + + + +   + +## Overview + +Data is the most important factor of deep learning. Data quality determines the upper limit of deep learning result, whereas model quality enables the result to approach the upper limit.Therefore, high-quality data input is beneficial to the entire deep neural network. During the entire data processing and data augmentation process, data continuously flows through a "pipeline" to the training system, as shown in the following figure: + +![title](./images/pipeline.png) + +MindSpore provides data processing and data augmentation functions for users. In the pipeline process, if each step can be properly used, the data performance will be greatly improved. This section describes how to optimize performance during data loading, data processing, and data augmentation based on the CIFAR-10 dataset. + +## Overall Process +- Prepare data. +- Optimize the data loading performance. +- Optimize the shuffle performance. +- Optimize the data augmentation performance. +- Summarize the performance optimization solution. + +## Preparations + +### Importing Modules + +The `dataset` module provides APIs for loading and processing datasets. + + +```python +import mindspore.dataset as ds +``` + +The `numpy` module is used to generate ndarrays. + + +```python +import numpy as np +``` + +### Downloading the Required Dataset + +1. Create the `./dataset/Cifar10Data` directory in the current working directory. The dataset used for this practice is stored in this directory. +2. Create the `./transform` directory in the current working directory. The dataset generated during the practice is stored in this directory. +3. Download [the CIFAR-10 dataset in binary format](https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz) and decompress the dataset file to the `./dataset/Cifar10Data/cifar-10-batches-bin` directory. The dataset will be used during data loading. +4. Download [the CIFAR-10 Python dataset in file-format](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz) and decompress the dataset file to the `./dataset/Cifar10Data/cifar-10-batches-py` directory. The dataset will be used for data conversion. + +The directory structure is as follows: + + + dataset/Cifar10Data + ├── cifar-10-batches-bin + │   ├── batches.meta.txt + │   ├── data_batch_1.bin + │   ├── data_batch_2.bin + │   ├── data_batch_3.bin + │   ├── data_batch_4.bin + │   ├── data_batch_5.bin + │   ├── readme.html + │   └── test_batch.bin + └── cifar-10-batches-py + ├── batches.meta + ├── data_batch_1 + ├── data_batch_2 + ├── data_batch_3 + ├── data_batch_4 + ├── data_batch_5 + ├── readme.html + └── test_batch + +In the preceding information: +- The `cifar-10-batches-bin` directory is the directory for storing the CIFAR-10 dataset in binary format. +- The `cifar-10-batches-py` directory is the directory for storing the CIFAR-10 dataset in Python file format. + +## Optimizing the Data Loading Performance + +MindSpore provides multiple data loading methods, including common dataset loading, user-defined dataset loading, and MindSpore data format loading. For details, see [Loading Datasets](https://www.mindspore.cn/tutorial/en/master/use/data_preparation/loading_the_datasets.html). The dataset loading performance varies depending on the underlying implementation method. + +| | Common Dataset | User-defined Dataset | MindRecord Dataset | +| :----: | :----: | :----: | :----: | +| Underlying implementation | C++ | Python | C++ | +| Performance | High | Medium | High | + +### Performance Optimization Solution + +![title](./images/data_loading_performance_scheme.png) + +Suggestions on data loading performance optimization are as follows: +- Built-in loading operators are preferred for supported dataset formats. For details, see [Built-in Loading Operators](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.dataset.html). If the performance cannot meet the requirements, use the multi-thread concurrency solution. For details, see [Multi-thread Optimization Solution](#multi-thread-optimization-solution). +- For a dataset format that is not supported, convert the format to MindSpore data format and then use the `MindDataset` class to load the dataset. For details, see [Converting Datasets into MindSpore Data Format](https://www.mindspore.cn/tutorial/en/master/use/data_preparation/converting_datasets.html). If the performance cannot meet the requirements, use the multi-thread concurrency solution, for details, see [Multi-thread Optimization Solution](#multi-thread-optimization-solution). +- For dataset formats that are not supported, the user-defined `GeneratorDataset` class is preferred for implementing fast algorithm verification. If the performance cannot meet the requirements, the multi-process concurrency solution can be used. For details, see [Multi-process Optimization Solution](#multi-process-optimization-solution). + +### Code Example + +Based on the preceding suggestions of data loading performance optimization, the `Cifar10Dataset` class of built-in loading operators, the `MindDataset` class after data conversion, and the `GeneratorDataset` class are used to load data. The sample code is displayed as follows: + +1. Use the `Cifar10Dataset` class of built-in operators to load the CIFAR-10 dataset in binary format. The multi-thread optimization solution is used for data loading. Four threads are enabled to concurrently complete the task. Finally, a dictionary iterator is created for the data and a data record is read through the iterator. + + + ```python + cifar10_path = "./dataset/Cifar10Data/cifar-10-batches-bin/" + + # create Cifar10Dataset for reading data + cifar10_dataset = ds.Cifar10Dataset(cifar10_path, num_parallel_workers=4) + # create a dictionary iterator and read a data record through the iterator + print(next(cifar10_dataset.create_dict_iterator())) + ``` + + The output is as follows: + ``` + {'image': Tensor(shape=[32, 32, 3], dtype=UInt8, value= + [[[235, 235, 235], + [230, 230, 230], + [234, 234, 234], + ..., + [248, 248, 248], + [248, 248, 248], + [249, 249, 249]], + ..., + [120, 120, 119], + [146, 146, 146], + [177, 174, 190]]]), 'label': Tensor(shape=[], dtype=UInt32, value= 9)} + ``` + +2. Use the `Cifar10ToMR` class to convert the CIFAR-10 dataset into MindSpore data format. In this example, the CIFAR-10 dataset in Python file format is used. Then use the `MindDataset` class to load the dataset in MindSpore data format. The multi-thread optimization solution is used for data loading. Four threads are enabled to concurrently complete the task. Finally, a dictionary iterator is created for data and a data record is read through the iterator. + + + ```python + from mindspore.mindrecord import Cifar10ToMR + + cifar10_path = './dataset/Cifar10Data/cifar-10-batches-py/' + cifar10_mindrecord_path = './transform/cifar10.record' + + cifar10_transformer = Cifar10ToMR(cifar10_path, cifar10_mindrecord_path) + # executes transformation from Cifar10 to MindRecord + cifar10_transformer.transform(['label']) + + # create MindDataset for reading data + cifar10_mind_dataset = ds.MindDataset(dataset_file=cifar10_mindrecord_path, num_parallel_workers=4) + # create a dictionary iterator and read a data record through the iterator + print(next(cifar10_mind_dataset.create_dict_iterator())) + ``` + + The output is as follows: + ``` + {'data': Tensor(shape=[1431], dtype=UInt8, value= [255, 216, 255, ..., 63, 255, 217]), 'id': Tensor(shape=[], dtype=Int64, value= 30474), 'label': Tensor(shape=[], dtype=Int64, value= 2)} + ``` + +3. The `GeneratorDataset` class is used to load the user-defined dataset, and the multi-process optimization solution is used. Four processes are enabled to concurrently complete the task. Finally, a dictionary iterator is created for the data, and a data record is read through the iterator. + + + ```python + def generator_func(num): + for i in range(num): + yield (np.array([i]),) + + # create GeneratorDataset for reading data + dataset = ds.GeneratorDataset(source=generator_func(5), column_names=["data"], num_parallel_workers=4) + # create a dictionary iterator and read a data record through the iterator + print(next(dataset.create_dict_iterator())) + ``` + + The output is as follows: + ``` + {'data': Tensor(shape=[1], dtype=Int64, value= [0])} + ``` + +## Optimizing the Shuffle Performance + +The shuffle operation is used to shuffle ordered datasets or repeated datasets. MindSpore provides the `shuffle` function for users. A larger value of `buffer_size` indicates a higher shuffling degree, consuming more time and computing resources. This API allows users to shuffle the data at any time during the entire pipeline process. For details, see [Shuffle Processing](https://www.mindspore.cn/tutorial/en/master/use/data_preparation/data_processing_and_augmentation.html#shuffle). However, because the underlying implementation methods are different, the performance of this method is not as good as that of setting the `shuffle` parameter to directly shuffle data by referring to the [Built-in Loading Operators](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.dataset.html). + +### Performance Optimization Solution + +![title](./images/shuffle_performance_scheme.png) + +Suggestions on shuffle performance optimization are as follows: +- Use the `shuffle` parameter of built-in loading operators to shuffle data. +- If the `shuffle` function is used and the performance still cannot meet the requirements, increase the value of the `buffer_size` parameter to improve the performance. + +### Code Example + +Based on the preceding shuffle performance optimization suggestions, the `shuffle` parameter of the `Cifar10Dataset` class of built-in loading operators and the `Shuffle` function are used to shuffle data. The sample code is displayed as follows: + +1. Use the built-in operator in `Cifar10Dataset` class to load the CIFAR-10 dataset. In this example, the CIFAR-10 dataset in binary format is used, and the `shuffle` parameter is set to True to perform data shuffle. Finally, a dictionary iterator is created for the data and a data record is read through the iterator. + + + ```python + cifar10_path = "./dataset/Cifar10Data/cifar-10-batches-bin/" + + # create Cifar10Dataset for reading data + cifar10_dataset = ds.Cifar10Dataset(cifar10_path, shuffle=True) + # create a dictionary iterator and read a data record through the iterator + print(next(cifar10_dataset.create_dict_iterator())) + ``` + + The output is as follows: + ``` + {'image': Tensor(shape=[32, 32, 3], dtype=UInt8, value= + [[[235, 235, 235], + [230, 230, 230], + [234, 234, 234], + ..., + [248, 248, 248], + [248, 248, 248], + [249, 249, 249]], + ..., + [120, 120, 119], + [146, 146, 146], + [177, 174, 190]]]), 'label': Tensor(shape=[], dtype=UInt32, value= 9)} + ``` + +2. Use the `shuffle` function to shuffle data. Set `buffer_size` to 3 and use the `GeneratorDataset` class to generate data. + + + ```python + def generator_func(): + for i in range(5): + yield (np.array([i, i+1, i+2, i+3, i+4]),) + + ds1 = ds.GeneratorDataset(source=generator_func, column_names=["data"]) + print("before shuffle:") + for data in ds1.create_dict_iterator(): + print(data["data"]) + + ds2 = ds1.shuffle(buffer_size=3) + print("after shuffle:") + for data in ds2.create_dict_iterator(): + print(data["data"]) + ``` + ``` + The output is as follows: + + before shuffle: + [0 1 2 3 4] + [1 2 3 4 5] + [2 3 4 5 6] + [3 4 5 6 7] + [4 5 6 7 8] + after shuffle: + [2 3 4 5 6] + [0 1 2 3 4] + [4 5 6 7 8] + [1 2 3 4 5] + [3 4 5 6 7] + ``` + +## Optimizing the Data Augmentation Performance + +During image classification training, especially when the dataset is small, users can use data augmentation to preprocess images to enrich the dataset. MindSpore provides multiple data augmentation methods, including: +- Use the built-in C operator (`c_transforms` module) to perform data augmentation. +- Use the built-in Python operator (`py_transforms` module) to perform data augmentation. +- Users can define Python functions as needed to perform data augmentation. + +For details, see [Data Augmentation](https://www.mindspore.cn/tutorial/en/master/use/data_preparation/data_processing_and_augmentation.html#id3). The performance varies according to the underlying implementation methods. + +| Module | Underlying API | Description | +| :----: | :----: | :----: | +| c_transforms | C++ (based on OpenCV) | High performance | +| py_transforms | Python (based on PIL) | This module provides multiple image augmentation functions and the method for converting PIL images into NumPy arrays. | + + +### Performance Optimization Solution + +![title](./images/data_enhancement_performance_scheme.png) + + +Suggestions on data augmentation performance optimization are as follows: +- The `c_transforms` module is preferentially used to perform data augmentation for its highest performance. If the performance cannot meet the requirements, refer to [Multi-thread Optimization Solution](#multi-thread-optimization-solution), [Compose Optimization Solution](#compose-optimization-solution), or [Operator Fusion Optimization Solution](#operator-fusion-optimization-solution). +- If the `py_transforms` module is used to perform data augmentation and the performance still cannot meet the requirements, refer to [Multi-thread Optimization Solution](#multi-thread-optimization-solution), [Multi-process Optimization Solution](#multi-process-optimization-solution), [Compose Optimization Solution](#compose-optimization-solution), or [Operator Fusion Optimization Solution](#operator-fusion-optimization-solution). +- The `c_transforms` module maintains buffer management in C++, and the `py_transforms` module maintains buffer management in Python. Because of the performance cost of switching between Python and C++, it is advised not to use different operator types together. +- If the user-defined Python functions are used to perform data augmentation and the performance still cannot meet the requirements, use the [Multi-thread Optimization Solution](#multi-thread-optimization-solution) or [Multi-process Optimization Solution](#multi-process-optimization-solution). If the performance still cannot be improved, in this case, optimize the user-defined Python code. + +### Code Example + +Based on the preceding suggestions of data augmentation performance optimization, the `c_transforms` module and user-defined Python function are used to perform data augmentation. The code is displayed as follows: + +1. The `c_transforms` module is used to perform data augmentation. During data augmentation, the multi-thread optimization solution is used. Four threads are enabled to concurrently complete the task. The operator fusion optimization solution is used and the `RandomResizedCrop` fusion class is used to replace the `RandomResize` and `RandomCrop` classes. + + + ```python + import mindspore.dataset.transforms.c_transforms as c_transforms + import mindspore.dataset.vision.c_transforms as C + import matplotlib.pyplot as plt + cifar10_path = "./dataset/Cifar10Data/cifar-10-batches-bin/" + + # create Cifar10Dataset for reading data + cifar10_dataset = ds.Cifar10Dataset(cifar10_path, num_parallel_workers=4) + transforms = C.RandomResizedCrop((800, 800)) + # apply the transform to the dataset through dataset.map() + cifar10_dataset = cifar10_dataset.map(operations=transforms, input_columns="image", num_parallel_workers=4) + + data = next(cifar10_dataset.create_dict_iterator()) + plt.imshow(data["image"].asnumpy()) + plt.show() + ``` + + The output is as follows: + + ![png](./images/cifar10_c_transforms.png) + + +2. A user-defined Python function is used to perform data augmentation. During data augmentation, the multi-process optimization solution is used, and four processes are enabled to concurrently complete the task. + + + ```python + def generator_func(): + for i in range(5): + yield (np.array([i, i+1, i+2, i+3, i+4]),) + + ds3 = ds.GeneratorDataset(source=generator_func, column_names=["data"]) + print("before map:") + for data in ds3.create_dict_iterator(): + print(data["data"]) + + func = lambda x:x**2 + ds4 = ds3.map(operations=func, input_columns="data", python_multiprocessing=True, num_parallel_workers=4) + print("after map:") + for data in ds4.create_dict_iterator(): + print(data["data"]) + ``` + + The output is as follows: + ``` + before map: + [0 1 2 3 4] + [1 2 3 4 5] + [2 3 4 5 6] + [3 4 5 6 7] + [4 5 6 7 8] + after map: + [ 0 1 4 9 16] + [ 1 4 9 16 25] + [ 4 9 16 25 36] + [ 9 16 25 36 49] + [16 25 36 49 64] + ``` + +## Performance Optimization Solution Summary + +### Multi-thread Optimization Solution + +During the data pipeline process, the number of threads for related operators can be set to improve the concurrency and performance. For example: +- During data loading, the `num_parallel_workers` parameter in the built-in data loading class is used to set the number of threads. +- During data augmentation, the `num_parallel_workers` parameter in the `map` function is used to set the number of threads. +- During batch processing, the `num_parallel_workers` parameter in the `batch` function is used to set the number of threads. + +For details, see [Built-in Loading Operators](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.dataset.html). + +### Multi-process Optimization Solution + +During data processing, operators implemented by Python support the multi-process mode. For example: +- By default, the `GeneratorDataset` class is in multi-process mode. The `num_parallel_workers` parameter indicates the number of enabled processes. The default value is 1. For details, see [Generator Dataset](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.dataset.html#mindspore.dataset.GeneratorDataset) +- If the user-defined Python function or the `py_transforms` module is used to perform data augmentation and the `python_multiprocessing` parameter of the `map` function is set to True, the `num_parallel_workers` parameter indicates the number of processes and the default value of the `python_multiprocessing` parameter is False. In this case, the `num_parallel_workers` parameter indicates the number of threads. For details, see [Built-in Loading Operators](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.dataset.html). + +### Compose Optimization Solution + +Map operators can receive the Tensor operator list and apply all these operators based on a specific sequence. Compared with the Map operator used by each Tensor operator, such Fat Map operators can achieve better performance, as shown in the following figure: + +![title](./images/compose.png) + +### Operator Fusion Optimization Solution + +Some fusion operators are provided to aggregate the functions of two or more operators into one operator. For details, see [Data Augmentation Operators](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.dataset.vision.html). Compared with the pipelines of their components, such fusion operators provide better performance. As shown in the figure: + +![title](./images/operator_fusion.png) diff --git a/tutorials/source_en/advanced_use/parameter_server_training.md b/tutorials/source_en/advanced_use/parameter_server_training.md index 570bb28d7f6c21d533c17dfa56ab71bb0bba744c..27200d45e2bc288c08da8c57fc691acf08e89053 100644 --- a/tutorials/source_en/advanced_use/parameter_server_training.md +++ b/tutorials/source_en/advanced_use/parameter_server_training.md @@ -25,7 +25,7 @@ The ps-lite architecture consists of three independent components: server, worke - Server: saves model weights and backward computation gradients, and updates the model using gradients pushed by workers. -- Worker: performs forward and backward computation on the network. The gradient value for forward computation is uploaded to a server through the `Push` API, and the model updated by the server is downloaded to the worker through the `Pull` API. +- Worker: performs forward and backward computation on the network. The gradient value for backward computation is uploaded to a server through the `Push` API, and the model updated by the server is downloaded to the worker through the `Pull` API. - Scheduler: establishes the communication relationship between the server and worker. diff --git a/tutorials/source_en/advanced_use/performance_profiling.md b/tutorials/source_en/advanced_use/performance_profiling.md index 53551b6126a1bce1e33e56ec29c2da06c1edf26f..98d79ae100acf9f9c9e6a1a130472d869afb49f9 100644 --- a/tutorials/source_en/advanced_use/performance_profiling.md +++ b/tutorials/source_en/advanced_use/performance_profiling.md @@ -31,7 +31,7 @@ Performance data like operators' execution time is recorded in files and can be ## Preparing the Environment -Before using Profiler, make sure the process of ada in background running right. The ada process must using the root user to run. The start command is `/usr/local/Ascend/driver/tools/ada`. +Before using Profiler, make sure the process of ada in background running right. The ada process must using users in HwHiAiUser user group or the root user to run, and run the scripts using the same user. The start command is `/usr/local/Ascend/driver/tools/ada`. ## Preparing the Training Script diff --git a/tutorials/source_en/advanced_use/performance_profiling_gpu.md b/tutorials/source_en/advanced_use/performance_profiling_gpu.md index d3327f4f1a557f7da90d3dfaff92893f65fef5d8..c84d19b695b13bab6f037cfc199b32f0e903f05b 100644 --- a/tutorials/source_en/advanced_use/performance_profiling_gpu.md +++ b/tutorials/source_en/advanced_use/performance_profiling_gpu.md @@ -26,6 +26,11 @@ Performance data like operators' execution time is recorded in files and can be > > +> By default, common users do not have the permission to access the NVIDIA GPU performance counters on the target device. +> If common users need to use the profiler performance statistics capability in the training script, configure the permission by referring to the following description: +> +> + ## Preparing the Training Script To enable the performance profiling of neural networks, MindSpore Profiler APIs should be added into the script.Only the output_path in parameters is worked in GPU now. Then, at the end of the training, `Profiler.analyse()` should be called to finish profiling and generate the perforamnce analyse results. @@ -77,8 +82,9 @@ Users can access the Performance Profiler by selecting a specific training from Figure 1:Overall Performance -Figure 1 displays the overall performance of the training, including the overall data of Step Trace, Operator Performance, MindData Performance and Timeline. Operator Performance Analysis is supportted only: +Figure 1 displays the overall performance of the training, including the overall data of Step Trace, Operator Performance, MindData Performance and Timeline: - Operator Performance: It will collect the average execution time of operators and operator types. The overall performance page will show the pie graph for different operator types. +- Timeline: It will collect execution time for operations and CUDA activity. The tasks will be shown on the time axis. The overall performance page will show the statistics for tasks. Users can click the detail link to see the details of each components. diff --git a/tutorials/source_en/advanced_use/quantization_aware.md b/tutorials/source_en/advanced_use/quantization_aware.md index 490857301540a76e6f765506417b2ab02ef4cfe5..760e6d7831d4a6a6aec9acdccb61e33d86002426 100644 --- a/tutorials/source_en/advanced_use/quantization_aware.md +++ b/tutorials/source_en/advanced_use/quantization_aware.md @@ -60,21 +60,19 @@ Aware quantization training specifications The procedure for the quantization aware training model is the same as that for the common training. After the network is defined and the model is generated, additional operations need to be performed. The complete process is as follows: 1. Process data and load datasets. -2. Define a network. -3. Define a fusion network. After a network is defined, replace the specified operators to define a fusion network. +2. Define an original unquantative network. +3. Define a fusion network. After defining a original unquantative network, replace the specified operators to define a fusion network. 4. Define an optimizer and loss function. -5. Perform model training. Generate a fusion model based on the fusion network training. -6. Generate a quantization network. After the fusion model is obtained based on the fusion network training, insert a fake quantization node into the fusion model by using a conversion API to generate a quantization network. -7. Perform quantization training. Generate a quantization model based on the quantization network training. +5. Generate a quantization network. Insert a fake quantization node into the fusion network by using a conversion API, a quantization network will be generated based on the fusion network. +6. Perform quantization training. Generate a quantization model based on the quantization network training. -Compared with common training, the quantization aware training requires additional steps which are steps 3, 6, and 7 in the preceding process. +Compared with common training, the quantization aware training requires additional steps which are steps 3, 5, and 6 in the preceding process. > - Fusion network: network after the specified operators (`nn.Conv2dBnAct` and `nn.DenseBnAct`) are used for replacement. -> - Fusion model: model in the checkpoint format generated by the fusion network training. > - Quantization network: network obtained after the fusion model uses a conversion API (`convert_quant_network`) to insert a fake quantization node. > - Quantization model: model in the checkpoint format obtained after the quantization network training. -Next, the LeNet network is used as an example to describe steps 3 and 6. +Next, the LeNet network is used as an example to describe steps 2 and 3. > You can obtain the complete executable sample code at . @@ -132,8 +130,8 @@ class LeNet5(nn.Cell): super(LeNet5, self).__init__() self.num_class = num_class - self.conv1 = nn.Conv2dBnAct(1, 6, kernel_size=5, batchnorm=True, activation='relu') - self.conv2 = nn.Conv2dBnAct(6, 16, kernel_size=5, batchnorm=True, activation='relu') + self.conv1 = nn.Conv2dBnAct(1, 6, kernel_size=5, activation='relu') + self.conv2 = nn.Conv2dBnAct(6, 16, kernel_size=5, activation='relu') self.fc1 = nn.DenseBnAct(16 * 5 * 5, 120, activation='relu') self.fc2 = nn.DenseBnAct(120, 84, activation='relu') @@ -155,9 +153,9 @@ class LeNet5(nn.Cell): Use the `convert_quant_network` API to automatically insert a fake quantization node into the fusion model to convert the fusion model into a quantization network. ```python -from mindspore.train.quant import quant as qat +from mindspore.train.quant import quant -net = qat.convert_quant_network(net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) +net = quant.convert_quant_network(network, quant_delay=900, bn_fold=False, per_channel=[True, False], symmetric=[False, False]) ``` ## Retraining and Inference @@ -167,16 +165,16 @@ net = qat.convert_quant_network(net, quant_delay=0, bn_fold=False, freeze_bn=100 The preceding describes the quantization aware training from scratch. A more common case is that an existing model file needs to be converted to a quantization model. The model file and training script obtained through common network model training are available for quantization aware training. To use a checkpoint file for retraining, perform the following steps: 1. Process data and load datasets. - 2. Define a network. - 3. Define a fusion network. - 4. Define an optimizer and loss function. - 5. Load a model file and retrain the model. Load an existing model file and retrain the model based on the fusion network to generate a fusion model. For details, see . - 6. Generate a quantization network. - 7. Perform quantization training. + 2. Define an original unquantative network. + 3. Train the original network to generate a unquantative model. + 4. Define a fusion network. + 5. Define an optimizer and loss function. + 6. Generate a quantative network based on the fusion network. + 7. Load a model file and retrain the model. Load the unquantative model file generated in step 3 and retrain the quantative model based on the quantative network to generate a quantative model. For details, see . ### Inference -The inference using a quantization model is the same as common model inference. The inference can be performed by directly using the checkpoint file or converting the checkpoint file into a common model format (such as ONNX or AIR). +The inference using a quantization model is the same the common model inference. The inference can be performed by directly using the checkpoint file or converting the checkpoint file into a common model format (such as AIR or MINDIR). For details, see . diff --git a/tutorials/source_en/advanced_use/second_order_optimizer_for_resnet50_application.md b/tutorials/source_en/advanced_use/second_order_optimizer_for_resnet50_application.md new file mode 100644 index 0000000000000000000000000000000000000000..5aa73da80ac66c478049924da4cc689cdee74b2f --- /dev/null +++ b/tutorials/source_en/advanced_use/second_order_optimizer_for_resnet50_application.md @@ -0,0 +1,474 @@ +# ResNet-50 Second-Order Optimization Practice + +`Ascend` `GPU` `Model Development` `Model Optimization` `Expert` + + + +- [ResNet-50 Second-Order Optimization Practice](#resnet-50-second-order-optimization-practice) + - [Overview](#overview) + - [Preparation](#preparation) + - [Preparing the Dataset](#preparing-the-dataset) + - [Configuring Distributed Environment Variables](#configuring-distributed-environment-variables) + - [Ascend 910](#ascend-910) + - [GPU](#gpu) + - [Loading the Dataset](#loading-the-dataset) + - [Defining the Network](#defining-the-network) + - [Defining the Loss Function and Optimizer THOR](#defining-the-loss-function-and-optimizer-thor) + - [Defining the Loss Function](#defining-the-loss-function) + - [Defining the Optimizer](#defining-the-optimizer) + - [Training the Network](#training-the-network) + - [Saving the Configured Model](#saving-the-configured-model) + - [Configuring the Network Training](#configuring-the-network-training) + - [Running the Script](#running-the-script) + - [Ascend 910](#ascend-910-1) + - [GPU](#gpu-1) + - [Model Inference](#model-inference) + - [Defining the Inference Network](#defining-the-inference-network) + - [Inference](#inference) + - [Ascend 910](#ascend-910-2) + - [GPU](#gpu-2) + + +   + +## Overview + +Common optimization algorithms are classified into the first-order and the second-order optimization algorithms. Typical first-order optimization algorithms, such as stochastic gradient descent (SGD), support a small amount of computation with high computation speed but a low convergence speed and require a large number of training steps. The second-order optimization algorithms use the second-order derivative of the objective function to accelerate convergence to the optimal value of a model, and require a small quantity of training steps. However, the second-order optimization algorithms have excessively high computation costs, an overall execution time of the second-order optimization algorithms is still slower than that of the first-order optimization algorithms. As a result, the second-order optimization algorithms are not widely used in deep neural network training. The main computation costs of the second-order optimization algorithms lie in the inverse operation of the second-order information matrices such as the Hessian matrix and the [Fisher information matrix (FIM)](https://arxiv.org/pdf/1808.07172.pdf). The time complexity is about $O(n^3)$. + +Based on the existing natural gradient algorithm, MindSpore development team uses optimized acceleration methods such as approximation and sharding for the FIM, greatly reducing the computation complexity of the inverse matrix and developing the available second-order optimizer THOR. With eight Ascend 910 AI processors, THOR can complete the training of ResNet-50 v1.5 network and ImageNet dataset within 72 minutes, which is nearly twice the speed of SGD+Momentum. + + +This tutorial describes how to use the second-order optimizer THOR provided by MindSpore to train the ResNet-50 v1.5 network and ImageNet dataset on Ascend 910 and GPU. +> Download address of the complete code example: + + +Directory Structure of Code Examples + +```shell +├── resnet_thor + ├── README.md + ├── scripts + ├── run_distribute_train.sh # launch distributed training for Ascend 910 + └── run_eval.sh # launch inference for Ascend 910 + ├── run_distribute_train_gpu.sh # launch distributed training for GPU + └── run_eval_gpu.sh # launch inference for GPU + ├── src + ├── crossentropy.py # CrossEntropy loss function + ├── config.py # parameter configuration + ├── dataset_helper.py # dataset helper for minddata dataset + ├── grad_reducer_thor.py # grad reduce for thor + ├── model_thor.py # model for train + ├── resnet_thor.py # resnet50_thor backone + ├── thor.py # thor optimizer + ├── thor_layer.py # thor layer + └── dataset.py # data preprocessing + ├── eval.py # infer script + └── train.py # train script + +``` + +The overall execution process is as follows: +1. Prepare the ImageNet dataset and process the required dataset. +2. Define the ResNet-50 network. +3. Define the loss function and the optimizer THOR. +4. Load the dataset and perform training. After the training is complete, check the result and save the model file. +5. Load the saved model for inference. + + +## Preparation + +Ensure that MindSpore has been correctly installed. If not, install it by referring to [Install](https://www.mindspore.cn/install/en). + +### Preparing the Dataset + +Download the complete ImageNet2012 dataset, decompress the dataset, and save it to the `ImageNet2012/ilsvrc` and `ImageNet2012/ilsvrc_eval` directories in the local workspace. + +The directory structure is as follows: + +``` +└─ImageNet2012 + ├─ilsvrc + │ n03676483 + │ n04067472 + │ n01622779 + │ ...... + └─ilsvrc_eval + │ n03018349 + │ n02504013 + │ n07871810 + │ ...... + +``` +### Configuring Distributed Environment Variables +#### Ascend 910 +For details about how to configure the distributed environment variables of Ascend 910 AI processors, see [Parallel Distributed Training (Ascend)](https://www.mindspore.cn/tutorial/en/master/advanced_use/distributed_training_ascend.html#id4). + +#### GPU +For details about how to configure the distributed environment of GPUs, see [Parallel Distributed Training (GPU)](https://www.mindspore.cn/tutorial/en/master/advanced_use/distributed_training_gpu.html#id4). + + +## Loading the Dataset + +During distributed training, load the dataset in parallel mode and process it through the data argumentation API provided by MindSpore. The `src/dataset.py` script in the source code is for loading and processing the dataset. +```python +import os +import mindspore.common.dtype as mstype +import mindspore.dataset.engine as de +import mindspore.dataset.transforms.vision.c_transforms as C +import mindspore.dataset.transforms.c_transforms as C2 +from mindspore.communication.management import init, get_rank, get_group_size + +def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"): + if target == "Ascend": + device_num, rank_id = _get_rank_info() + else: + init() + rank_id = get_rank() + device_num = get_group_size() + if device_num == 1: + ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True) + else: + ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True, + num_shards=device_num, shard_id=rank_id) + + image_size = 224 + mean = [0.485 * 255, 0.456 * 255, 0.406 * 255] + std = [0.229 * 255, 0.224 * 255, 0.225 * 255] + # define map operations + if do_train: + trans = [ + C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)), + C.RandomHorizontalFlip(prob=0.5), + C.Normalize(mean=mean, std=std), + C.HWC2CHW() + ] + else: + trans = [ + C.Decode(), + C.Resize(256), + C.CenterCrop(image_size), + C.Normalize(mean=mean, std=std), + C.HWC2CHW() + ] + type_cast_op = C2.TypeCast(mstype.int32) + ds = ds.map(input_columns="image", num_parallel_workers=8, operations=trans) + ds = ds.map(input_columns="label", num_parallel_workers=8, operations=type_cast_op) + + # apply batch operations + ds = ds.batch(batch_size, drop_remainder=True) + + # apply dataset repeat operation + ds = ds.repeat(repeat_num) + + return ds +``` + +> MindSpore supports multiple data processing and augmentation operations, which are usually combined. For details, see [Data Processing and Augmentation](https://www.mindspore.cn/tutorial/en/master/use/data_preparation/data_processing_and_augmentation.html). + + +## Defining the Network +Use the ResNet-50 v1.5 network model as an example. Define the [ResNet-50 network](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/resnet/src/resnet.py), and replace the `Conv2d` and `Dense` operators with the operators customized by the second-order optimizer. + The defined network model stores in the `src/resnet_thor.py` script in the source code, and the customized operators `Conv2d_thor` and `Dense_thor` store in the `src/thor_layer.py` script. + +- Use `Conv2d_thor` to replace `Conv2d` in the original network model. +- Use `Dense_thor` to replace `Dense` in the original network model. + +> The `Conv2d_thor` and `Dense_thor` operators customized by THOR are used to save the second-order matrix information in model training. The backbone of the newly defined network is the same as that of the original network model. + +After the network is built, call the defined ResNet-50 in the `__main__` function. +```python +... +from src.resnet_thor import resnet50 +... +if __name__ == "__main__": + ... + # define the net + net = resnet50(class_num=config.class_num, damping=damping, loss_scale=config.loss_scale, + frequency=config.frequency, batch_size=config.batch_size) + ... +``` + + +## Defining the Loss Function and Optimizer THOR + + +### Defining the Loss Function + +Loss functions supported by MindSpore include `SoftmaxCrossEntropyWithLogits`, `L1Loss`, and `MSELoss`. The `SoftmaxCrossEntropyWithLogits` loss function is required by THOR. + +The implementation procedure of the loss function is in the `src/crossentropy.py` script. A common trick in deep network model training, label smoothing, is used to improve the model tolerance to error label classification by smoothing real labels, thereby improving the model generalization capability. +```python +class CrossEntropy(_Loss): + """CrossEntropy""" + def __init__(self, smooth_factor=0., num_classes=1000): + super(CrossEntropy, self).__init__() + self.onehot = P.OneHot() + self.on_value = Tensor(1.0 - smooth_factor, mstype.float32) + self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32) + self.ce = nn.SoftmaxCrossEntropyWithLogits() + self.mean = P.ReduceMean(False) + + def construct(self, logit, label): + one_hot_label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value) + loss = self.ce(logit, one_hot_label) + loss = self.mean(loss, 0) + return loss +``` +Call the defined loss function in the `__main__` function. + +```python +... +from src.crossentropy import CrossEntropy +... +if __name__ == "__main__": + ... + # define the loss function + if not config.use_label_smooth: + config.label_smooth_factor = 0.0 + loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num) + ... +``` + +### Defining the Optimizer + +The parameter update formula of THOR is as follows: + +$$ \theta^{t+1} = \theta^t + \alpha F^{-1}\nabla E$$ + +The meanings of parameters in the formula are as follows: +- $\theta$: trainable parameters on the network +- $t$: number of training steps +- $\alpha$: learning rate, which is the parameter update value per step +- $F^{-1}$: FIM obtained from the network computation +- $\nabla E$: the first-order gradient value + +As shown in the parameter update formula, THOR needs to additionally compute an FIM of each layer, and the FIM of each layer is obtained through computation in the customized network model. The FIM can adaptively adjust the parameter update step and direction of each layer, accelerating convergence and reducing parameter optimization complexity. + +```python +... +if args_opt.device_target == "Ascend": + from src.thor import THOR +else: + from src.thor import THOR_GPU as THOR +... + +if __name__ == "__main__": + ... + # learning rate setting + lr = get_model_lr(0, config.lr_init, config.lr_decay, config.lr_end_epoch, step_size, decay_epochs=39) + # define the optimizer + opt = THOR(filter(lambda x: x.requires_grad, net.get_parameters()), Tensor(lr), config.momentum, + filter(lambda x: 'matrix_A' in x.name, net.get_parameters()), + filter(lambda x: 'matrix_G' in x.name, net.get_parameters()), + filter(lambda x: 'A_inv_max' in x.name, net.get_parameters()), + filter(lambda x: 'G_inv_max' in x.name, net.get_parameters()), + config.weight_decay, config.loss_scale) + ... +``` + +## Training the Network + +### Saving the Configured Model + +MindSpore provides the callback mechanism to execute customized logic during training. The `ModelCheckpoint` function provided by the framework is used in this example. +`ModelCheckpoint` can save the network model and parameters for subsequent fine-tuning. +`TimeMonitor` and `LossMonitor` are callback functions provided by MindSpore. They can be used to monitor the single training step time and `loss` value changes during training, respectively. + +```python +... +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor +... +if __name__ == "__main__": + ... + # define callbacks + time_cb = TimeMonitor(data_size=step_size) + loss_cb = LossMonitor() + cb = [time_cb, loss_cb] + if config.save_checkpoint: + config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size, + keep_checkpoint_max=config.keep_checkpoint_max) + ckpt_cb = ModelCheckpoint(prefix="resnet", directory=ckpt_save_dir, config=config_ck) + cb += [ckpt_cb] + ... +``` + +### Configuring the Network Training + +Use the `model.train` API provided by MindSpore to easily train the network. THOR reduces the computation workload and improves the computation speed by reducing the frequency of updating the second-order matrix. Therefore, the Model_Thor class is redefined to inherit the Model class provided by MindSpore. The parameter for controlling the frequency of updating the second-order matrix is added to the Model_Thor class. You can adjust this parameter to optimize the overall performance. + + +```python +... +from mindspore.train.loss_scale_manager import FixedLossScaleManager +from src.model_thor import Model_Thor as Model +... + +if __name__ == "__main__": + ... + loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False) + if target == "Ascend": + model = Model(net, loss_fn=loss, optimizer=opt, amp_level='O2', loss_scale_manager=loss_scale, + keep_batchnorm_fp32=False, metrics={'acc'}, frequency=config.frequency) + else: + model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'}, + amp_level="O2", keep_batchnorm_fp32=True, frequency=config.frequency) + ... +``` + +### Running the Script +After the training script is defined, call the shell script in the `scripts` directory to start the distributed training process. +#### Ascend 910 +Currently, MindSpore distributed execution on Ascend uses the single-device single-process running mode. That is, one process runs on a device, and the number of total processes is the same as the number of devices that are being used. For device 0, the corresponding process is executed in the foreground. For other devices, the corresponding processes are executed in the background. Create a directory named `train_parallel`+`device_id` for each process to store log information, operator compilation information, and training checkpoint files. The following takes the distributed training script for eight devices as an example to describe how to run the script: + +Run the script. +``` +sh run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [DEVICE_NUM] +``` +Variables `RANK_TABLE_FILE`, `DATASET_PATH`, and `DEVICE_NUM` need to be transferred to the script. The meanings of variables are as follows: +- `RANK_TABLE_FILE`: path for storing the networking information file +- `DATASET_PATH`: training dataset path +- `DEVICE_NUM`: the actual number of running devices. +For details about other environment variables, see configuration items in the installation guide. + +The following is an example of loss values output during training: + +```bash +... +epoch: 1 step: 5004, loss is 4.4182425 +epoch: 2 step: 5004, loss is 3.740064 +epoch: 3 step: 5004, loss is 4.0546017 +epoch: 4 step: 5004, loss is 3.7598825 +epoch: 5 step: 5004, loss is 3.3744206 +... +epoch: 40 step: 5004, loss is 1.6907625 +epoch: 41 step: 5004, loss is 1.8217756 +epoch: 42 step: 5004, loss is 1.6453942 +... +``` + +After the training is complete, the checkpoint file generated by each device is stored in the training directory. The following is an example of the checkpoint file generated by `device_0`: + +```bash +└─train_parallel0 + ├─resnet-1_5004.ckpt + ├─resnet-2_5004.ckpt + │ ...... + ├─resnet-42_5004.ckpt + │ ...... +``` + +In the preceding information, +`*.ckpt` indicates the saved model parameter file. The name of a checkpoint file is in the following format: *Network name*-*Number of epochs*_*Number of steps*.ckpt. + +#### GPU +On the GPU hardware platform, MindSpore uses `mpirun` of OpenMPI to perform distributed training. The process creates a directory named `train_parallel` to store log information and training checkpoint files. The following takes the distributed training script for eight devices as an example to describe how to run the script: +``` +sh run_distribute_train_gpu.sh [DATASET_PATH] [DEVICE_NUM] +``` +Variables `DATASET_PATH` and `DEVICE_NUM` need to be transferred to the script. The meanings of variables are as follows: +- `DATASET_PATH`: training dataset path +- `DEVICE_NUM`: the actual number of running devices + +During GPU-based training, the `DEVICE_ID` environment variable is not required. Therefore, you do not need to call `int(os.getenv('DEVICE_ID'))` in the main training script to obtain the device ID or transfer `device_id` to `context`. You need to set `device_target` to `GPU` and call `init()` to enable the NCCL. + +The following is an example of loss values output during training: +```bash +... +epoch: 1 step: 5004, loss is 4.2546034 +epoch: 2 step: 5004, loss is 4.0819564 +epoch: 3 step: 5004, loss is 3.7005644 +epoch: 4 step: 5004, loss is 3.2668946 +epoch: 5 step: 5004, loss is 3.023509 +... +epoch: 36 step: 5004, loss is 1.645802 +... +``` + +The following is an example of model files saved after training: + +```bash +└─train_parallel + ├─ckpt_0 + ├─resnet-1_5004.ckpt + ├─resnet-2_5004.ckpt + │ ...... + ├─resnet-36_5004.ckpt + │ ...... + ...... + ├─ckpt_7 + ├─resnet-1_5004.ckpt + ├─resnet-2_5004.ckpt + │ ...... + ├─resnet-36_5004.ckpt + │ ...... +``` + +## Model Inference + +Use the checkpoint files saved during training to perform inference and validate the model generalization capability. Load the model file using the `load_checkpoint` API, call the `eval` API of the `Model` to predict the input image class, and compare the predicted class with the actual class of the input image to obtain the final prediction accuracy. + +### Defining the Inference Network + +1. Use the `load_checkpoint` API to load the model file. +2. Use the `model.eval` API to read the test dataset for inference. +3. Compute the prediction accuracy. + +```python +... +from mindspore.train.serialization import load_checkpoint, load_param_into_net +... + +if __name__ == "__main__": + ... + # define net + net = resnet(class_num=config.class_num) + net.add_flags_recursive(thor=False) + + # load checkpoint + param_dict = load_checkpoint(args_opt.checkpoint_path) + keys = list(param_dict.keys()) + for key in keys: + if "damping" in key: + param_dict.pop(key) + load_param_into_net(net, param_dict) + net.set_train(False) + + # define model + model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'}) + + # eval model + res = model.eval(dataset) + print("result:", res, "ckpt=", args_opt.checkpoint_path) +``` + +### Inference +After the inference network is defined, the shell script in the `scripts` directory is called for inference. +#### Ascend 910 +On the Ascend 910 hardware platform, run the following inference command: +``` +sh run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH] +``` +Variables `DATASET_PATH` and `CHECKPOINT_PATH` need to be transferred to the script. The meanings of variables are as follows: +- `DATASET_PATH`: inference dataset path +- `CHECKPOINT_PATH`: path for storing the checkpoint file + +Currently, a single device (device 0 by default) is used for inference. The inference result is as follows: +``` +result: {'top_5_accuracy': 0.9295574583866837, 'top_1_accuracy': 0.761443661971831} ckpt=train_parallel0/resnet-42_5004.ckpt +``` +- `top_5_accuracy`: For an input image, if the labels whose prediction probability ranks top 5 contain actual labels, the classification is correct. +- `top_1_accuracy`: For an input image, if the label with the highest prediction probability is the same as the actual label, the classification is correct. +#### GPU + +On the GPU hardware platform, run the following inference command: +``` +sh run_eval_gpu.sh [DATASET_PATH] [CHECKPOINT_PATH] +``` +Variables `DATASET_PATH` and `CHECKPOINT_PATH` need to be transferred to the script. The meanings of variables are as follows: +- `DATASET_PATH`: inference dataset path +- `CHECKPOINT_PATH`: path for storing the checkpoint file + +The inference result is as follows: +``` +result: {'top_5_accuracy': 0.9287972151088348, 'top_1_accuracy': 0.7597031049935979} ckpt=train_parallel/resnet-36_5004.ckpt +``` \ No newline at end of file diff --git a/tutorials/source_en/advanced_use/summary_record.md b/tutorials/source_en/advanced_use/summary_record.md index 23ec33e637f881deebd02a9953dfad3d466d4c7a..e6b8d4c1ea9e515f12ff3ff019968a9ed78b3a0f 100644 --- a/tutorials/source_en/advanced_use/summary_record.md +++ b/tutorials/source_en/advanced_use/summary_record.md @@ -127,10 +127,10 @@ model.eval(ds_eval, callbacks=[summary_collector]) In addition to providing the `SummaryCollector` that automatically collects some summary data, MindSpore provides summary operators that enable custom collection other data on the network, such as the input of each convolutional layer, or the loss value in the loss function, etc. Summary operators currently supported: -- [ScalarSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html?highlight=scalarsummary#mindspore.ops.operations.ScalarSummary): Record a scalar data. -- [TensorSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html?highlight=tensorsummary#mindspore.ops.operations.TensorSummary): Record a tensor data. -- [ImageSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html?highlight=imagesummary#mindspore.ops.operations.ImageSummary): Record a image data. -- [HistogramSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html?highlight=histogramsummar#mindspore.ops.operations.HistogramSummary): Convert tensor data into histogram data records. +- [ScalarSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html?highlight=scalarsummary#mindspore.ops.ScalarSummary): Record a scalar data. +- [TensorSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html?highlight=tensorsummary#mindspore.ops.TensorSummary): Record a tensor data. +- [ImageSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html?highlight=imagesummary#mindspore.ops.ImageSummary): Record a image data. +- [HistogramSummary](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.html?highlight=histogramsummar#mindspore.ops.HistogramSummary): Convert tensor data into histogram data records. The recording method is shown in the following steps. @@ -366,4 +366,6 @@ For more parameter Settings, see the [MindInsight related commands](https://www. model.train(epoch=2, train_dataset, callbacks=[confusion_callback, summary_collector]) ``` -3. In each Summary log file directory, only one training data should be placed. If a summary log directory contains summary data from multiple training, MindInsight will overlay the summary data from these training when visualizing the data, which may not be consistent with the expected visualizations. \ No newline at end of file +3. In each Summary log file directory, only one training data should be placed. If a summary log directory contains summary data from multiple training, MindInsight will overlay the summary data from these training when visualizing the data, which may not be consistent with the expected visualizations. + +4. Currently, `SummaryCollector` and `SummaryRecord` do not support scenarios with GPU multi-card running. \ No newline at end of file diff --git a/tutorials/source_en/advanced_use/synchronization_training_and_evaluation.md b/tutorials/source_en/advanced_use/synchronization_training_and_evaluation.md index d42c89c84014a15d0646b82ed3019ab837eb04ad..193c6f9f116c87186ff404c0a9f448cdc9fa7ae0 100644 --- a/tutorials/source_en/advanced_use/synchronization_training_and_evaluation.md +++ b/tutorials/source_en/advanced_use/synchronization_training_and_evaluation.md @@ -32,7 +32,7 @@ Implementation idea: The model accuracy is validated every n epochs. The model a Core implementation: Validation points are set in `epoch_end` of the callback function as follows: -`cur_epoch % eval_per_epoch == 0`: indicates that the model accuracy is validated every `eval_per_epoch` epochs. +`cur_epoch % eval_per_epoch == 0`: indicates that the model accuracy is validated every `eval_per_epoch` epoch. - `cur_epoch`: indicates epoch value in the current training process. - `eval_per_epoch`: indicates user-defined value, that is, the validation frequency. @@ -40,7 +40,7 @@ Core implementation: Validation points are set in `epoch_end` of the callback fu Other parameters are described as follows: - `model`: indicates `Model` function in MindSpore. -- `eval_dataset`: indicates validation dataset. +- `eval_dataset`: indicates the validation dataset. - `epoch_per_eval`: records the accuracy of the validation model and the corresponding number of epochs. The data format is `{"epoch": [], "acc": []}`. ```python @@ -75,7 +75,7 @@ The parameters are described as follows: - `keep_checkpoint_max`: indicates the maximum number of models that can be saved. - `ckpoint_cb`: defines the name and path for saving the model. - `model`: defines a model. -- `model.train`: indicates model training function. +- `model.train`: indicates the model training function. - `epoch_per_eval`: defines the number for collecting `epoch` and the dictionary of corresponding model accuracy information. ```python diff --git a/tutorials/source_en/advanced_use/visualization_tutorials.rst b/tutorials/source_en/advanced_use/visualization_tutorials.rst index 17b1532bba19766e25351c89b259100f1b96d47d..e2912987938bf4e475c3fa9292f1413bdae57b3a 100644 --- a/tutorials/source_en/advanced_use/visualization_tutorials.rst +++ b/tutorials/source_en/advanced_use/visualization_tutorials.rst @@ -9,4 +9,5 @@ Training Process Visualization lineage_and_scalars_comparision performance_profiling performance_profiling_gpu + debugger mindinsight_commands diff --git a/tutorials/source_en/index.rst b/tutorials/source_en/index.rst index 0c0e217dec12430a11983202537e4c2176226531..fc81a983602b23e8ffa49b3549a00773ed91cdbf 100644 --- a/tutorials/source_en/index.rst +++ b/tutorials/source_en/index.rst @@ -19,7 +19,7 @@ MindSpore Tutorials :maxdepth: 1 :caption: Use - use/data_preparation/data_preparation + use/data_preparation use/defining_the_network use/saving_and_loading_model_parameters use/multi_platform_inference @@ -31,7 +31,9 @@ MindSpore Tutorials advanced_use/computer_vision_application advanced_use/nlp_application - advanced_use/synchronization_training_and_evaluation.md + advanced_use/synchronization_training_and_evaluation + advanced_use/optimize_the_performance_of_data_preparation + advanced_use/second_order_optimizer_for_resnet50_application .. toctree:: :glob: @@ -52,6 +54,8 @@ MindSpore Tutorials advanced_use/graph_kernel_fusion advanced_use/quantization_aware advanced_use/gradient_accumulation + advanced_use/dataset_conversion + advanced_use/auto_augmentation .. toctree:: :glob: @@ -66,6 +70,7 @@ MindSpore Tutorials :caption: Network Migration advanced_use/network_migration + advanced_use/model_scripts_transformation .. toctree:: :glob: @@ -74,4 +79,4 @@ MindSpore Tutorials advanced_use/model_security advanced_use/differential_privacy - + advanced_use/fuzzer diff --git a/tutorials/source_en/quick_start/quick_video.md b/tutorials/source_en/quick_start/quick_video.md index a46f6ae41dde7d4826433e6a24140039427d6ad2..b11de9ebd0d6a767ad183dc4e19fc019ee7bcc26 100644 --- a/tutorials/source_en/quick_start/quick_video.md +++ b/tutorials/source_en/quick_start/quick_video.md @@ -269,7 +269,7 @@ Provides video tutorials from installation to try-on, helping you quickly use Mi class="video-item-wraper" style="width: 33.3%;display: flex;justify-content: center;align-items: center;padding: 10px;box-sizing: border-box;"> - (This document contains Hands-on Tutorial Series. Gitee does not support display. Please check tutorials on the official website) - -**View code**: +**View code**: **View the full tutorial**: \ No newline at end of file diff --git a/tutorials/source_en/use/data_preparation/data_preparation.rst b/tutorials/source_en/use/data_preparation.rst similarity index 44% rename from tutorials/source_en/use/data_preparation/data_preparation.rst rename to tutorials/source_en/use/data_preparation.rst index ec222aac3337623368140aa1e8c3e369d3f09796..610d74cd67f74ee26342c766fddf9812e798165f 100644 --- a/tutorials/source_en/use/data_preparation/data_preparation.rst +++ b/tutorials/source_en/use/data_preparation.rst @@ -4,6 +4,5 @@ Data Preparation .. toctree:: :maxdepth: 1 - loading_the_datasets - converting_datasets - data_processing_and_augmentation \ No newline at end of file + image_loading + text_loading diff --git a/tutorials/source_en/use/data_preparation/converting_datasets.md b/tutorials/source_en/use/data_preparation/converting_datasets.md deleted file mode 100644 index b1d8a21224cba6204a2d411e2dd2664e67748a61..0000000000000000000000000000000000000000 --- a/tutorials/source_en/use/data_preparation/converting_datasets.md +++ /dev/null @@ -1,241 +0,0 @@ -# Converting Datasets to the Mindspore Data Format - -`Linux` `Ascend` `GPU` `CPU` `Data Preparation` `Beginner` `Intermediate` `Expert` - - - -- [Converting Datasets to the Mindspore Data Format](#converting-datasets-to-the-mindspore-data-format) - - [Overview](#overview) - - [Converting Non-Standard Datasets to the Mindspore Data Format](#converting-non-standard-datasets-to-the-mindspore-data-format) - - [Converting Images and Labels](#converting-images-and-labels) - - [Converting Common Datasets to the MindSpore Data Format](#converting-common-datasets-to-the-mindspore-data-format) - - [Converting the CIFAR-10 Dataset](#converting-the-cifar-10-dataset) - - [Converting the CIFAR-100 Dataset](#converting-the-cifar-100-dataset) - - [Converting the ImageNet Dataset](#converting-the-imagenet-dataset) - - [Converting the MNIST Dataset](#converting-the-mnist-dataset) - - - - - -## Overview - -You can convert non-standard datasets and common datasets to the MindSpore data format so that they can be easily loaded to MindSpore for training. In addition, the performance of MindSpore in some scenarios is optimized, therefore using datasets in the MindSpore data format can deliver a better user experience. -The MindSpore data format has the following features: -1. Unified storage and access of user data are implemented, simplifying training data reading. -2. Data is aggregated for storage, efficient reading, and easy management and transfer. -3. Data encoding and decoding are efficient and transparent to users. -4. The partition size is flexibly controlled to implement distributed training. - -## Converting Non-Standard Datasets to the Mindspore Data Format - -MindSpore provides write operation tools to write user-defined raw data in MindSpore format. - -### Converting Images and Labels - -1. Import the `FileWriter` class for file writing. - - ```python - from mindspore.mindrecord import FileWriter - ``` - -2. Define a dataset schema which specifies dataset fields and field types. - - ```python - cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}} - ``` - Schema specifications are as follows: - A field name can contain only letters, digits, and underscores (_). - The field type can be int32, int64, float32, float64, string, or bytes. - The field shape can be a one-dimensional array represented by [-1], a two-dimensional array represented by [m, n], or a three-dimensional array represented by [x, y, z]. - > 1. The type of a field with the shape attribute can only be int32, int64, float32, or float64. - > 2. If the field has the shape attribute, prepare the data type as `numpy.ndarray` before transferring the data to the `write_raw_data` API. - - Examples: - - Image classification - ```python - cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}} - ``` - - Natural Language Processing (NLP) - ```python - cv_schema_json = {"id": {"type": "int32"}, "masks": {"type": "int32", "shape": [-1]}, "inputs": {"type": "int64", "shape": [4, 32]}, "labels": {"type": "int64", "shape": [-1]}} - ``` - -3. Prepare the data sample list to be written based on the user-defined schema format. - - ```python - data = [{"file_name": "1.jpg", "label": 0, "data": b"\x10c\xb3w\xa8\xee$o&\xd4\x00\xf8\x129\x15\xd9\xf2q\xc0\xa2\x91YFUO\x1dsE1\x1ep"}, - {"file_name": "3.jpg", "label": 99, "data": b"\xaf\xafU<\xb8|6\xbd}\xc1\x99[\xeaj+\x8f\x84\xd3\xcc\xa0,i\xbb\xb9-\xcdz\xecp{T\xb1\xdb"}] - ``` - -4. Prepare index fields. Adding index fields can accelerate data reading. This step is optional. - - ```python - indexes = ["file_name", "label"] - ``` - -5. Create a `FileWriter` object, transfer the file name and number of slices, add the schema and index, call the `write_raw_data` API to write data, and call the `commit` API to generate a local data file. - - ```python - writer = FileWriter(file_name="testWriter.mindrecord", shard_num=4) - writer.add_schema(cv_schema_json, "test_schema") - writer.add_index(indexes) - writer.write_raw_data(data) - writer.commit() - ``` - In the preceding information: - `write_raw_data`: writes data to the memory. - `commit`: writes the data in the memory to the disk. - -6. Add data to the existing data format file, call the `open_for_append` API to open the existing data file, call the `write_raw_data` API to write new data, and then call the `commit` API to generate a local data file. - ```python - writer = FileWriter.open_for_append("testWriter.mindrecord0") - writer.write_raw_data(data) - writer.commit() - ``` - -## Converting Common Datasets to the MindSpore Data Format - -MindSpore provides utility classes to convert common datasets to the MindSpore data format. The following table lists common datasets and utility classes to be called: - -| Dataset | Utility Class | -| -------- | ------------ | -| CIFAR-10 | Cifar10ToMR | -| CIFAR-100| Cifar100ToMR | -| ImageNet | ImageNetToMR | -| MNIST | MnistToMR | - - -### Converting the CIFAR-10 Dataset -You can use the `Cifar10ToMR` class to convert the raw CIFAR-10 data into the MindSpore data format. - -1. Prepare the CIFAR-10 python version dataset and decompress the file to a specified directory (the `cifar10` directory in the example), as the following shows: - ``` - % ll cifar10/cifar-10-batches-py/ - batches.meta - data_batch_1 - data_batch_2 - data_batch_3 - data_batch_4 - data_batch_5 - readme.html - test_batch - ``` - > CIFAR-10 dataset download address: - -2. Import the `Cifar10ToMR` class for dataset converting. - - ```python - from mindspore.mindrecord import Cifar10ToMR - ``` -3. Instantiate the `Cifar10ToMR` object and call the `transform` API to convert the CIFAR-10 dataset to the MindSpore data format. - - ```python - CIFAR10_DIR = "./cifar10/cifar-10-batches-py" - MINDRECORD_FILE = "./cifar10.mindrecord" - cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, MINDRECORD_FILE) - cifar10_transformer.transform(['label']) - ``` - In the preceding information: - `CIFAR10_DIR`: path where the CIFAR-10 dataset folder is stored. - `MINDRECORD_FILE`: path where the output file in the MindSpore data format is stored. - -### Converting the CIFAR-100 Dataset -You can use the `Cifar100ToMR` class to convert the raw CIFAR-100 data to the MindSpore data format. - -1. Prepare the CIFAR-100 dataset and decompress the file to a specified directory (the `cifar100` directory in the example). - ``` - % ll cifar100/cifar-100-python/ - meta - test - train - ``` - > CIFAR-100 dataset download address: - -2. Import the `Cifar100ToMR` class for converting the dataset. - - ```python - from mindspore.mindrecord import Cifar100ToMR - ``` -3. Instantiate the `Cifar100ToMR` object and call the `transform` API to convert the CIFAR-100 dataset to the MindSpore data format. - - ```python - CIFAR100_DIR = "./cifar100/cifar-100-python" - MINDRECORD_FILE = "./cifar100.mindrecord" - cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, MINDRECORD_FILE) - cifar100_transformer.transform(['fine_label', 'coarse_label']) - ``` - In the preceding information: - `CIFAR100_DIR`: path where the CIFAR-100 dataset folder is stored. - `MINDRECORD_FILE`: path where the output file in the MindSpore data format is stored. - -### Converting the ImageNet Dataset - -You can use the `ImageNetToMR` class to convert the raw ImageNet data (images and labels) to the MindSpore data format. - -1. Download and prepare the ImageNet dataset as required. - - > ImageNet dataset download address: - - Store the downloaded ImageNet dataset in a folder. The folder contains all images and a mapping file that records labels of the images. - - In the mapping file, there are two columns, which are separated by spaces. They indicate image classes and label IDs. The following is an example of the mapping file: - ``` - n01440760 0 - n01443537 1 - n01484850 2 - n01491361 3 - n01494475 4 - n01496331 5 - ``` - -2. Import the `ImageNetToMR` class for dataset converting. - - ```python - from mindspore.mindrecord import ImageNetToMR - ``` - -3. Instantiate the `ImageNetToMR` object and call the `transform` API to convert the dataset to the MindSpore data format. - ```python - IMAGENET_MAP_FILE = "./testImageNetDataWhole/labels_map.txt" - IMAGENET_IMAGE_DIR = "./testImageNetDataWhole/images" - MINDRECORD_FILE = "./testImageNetDataWhole/imagenet.mindrecord" - PARTITION_NUMBER = 4 - imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, IMAGENET_IMAGE_DIR, MINDRECORD_FILE, PARTITION_NUMBER) - imagenet_transformer.transform() - ``` - In the preceding information: - `IMAGENET_MAP_FILE`: path where the label mapping file of the ImageNetToMR dataset is stored. - `IMAGENET_IMAGE_DIR`: path where all ImageNet images are stored. - `MINDRECORD_FILE`: path where the output file in the MindSpore data format is stored. - -### Converting the MNIST Dataset -You can use the `MnistToMR` class to convert the raw MNIST data to the MindSpore data format. - -1. Prepare the MNIST dataset and save the downloaded file to a specified directory, as the following shows: - ``` - % ll mnist_data/ - train-images-idx3-ubyte.gz - train-labels-idx1-ubyte.gz - t10k-images-idx3-ubyte.gz - t10k-labels-idx1-ubyte.gz - ``` - > MNIST dataset download address: - -2. Import the `MnistToMR` class for dataset converting. - - ```python - from mindspore.mindrecord import MnistToMR - ``` -3. Instantiate the `MnistToMR` object and call the `transform` API to convert the MNIST dataset to the MindSpore data format. - - ```python - MNIST_DIR = "./mnist_data" - MINDRECORD_FILE = "./mnist.mindrecord" - mnist_transformer = MnistToMR(MNIST_DIR, MINDRECORD_FILE) - mnist_transformer.transform() - ``` - In the preceding information: - `MNIST_DIR`: path where the MNIST dataset folder is stored. - `MINDRECORD_FILE`: path where the output file in the MindSpore data format is stored. diff --git a/tutorials/source_en/use/data_preparation/data_processing_and_augmentation.md b/tutorials/source_en/use/data_preparation/data_processing_and_augmentation.md deleted file mode 100644 index 419b1edc5518dffd9b8ba545e99cf8e9bc9b85d9..0000000000000000000000000000000000000000 --- a/tutorials/source_en/use/data_preparation/data_processing_and_augmentation.md +++ /dev/null @@ -1,340 +0,0 @@ -# Data Processing and Augmentation - -`Linux` `Ascend` `GPU` `CPU` `Data Preparation` `Beginner` `Intermediate` `Expert` - - - -- [Data Processing and Augmentation](#data-processing-and-augmentation) - - [Overview](#overview) - - [Data Processing Operations Supported by Mindspore](#data-processing-operations-supported-by-mindspore) - - [repeat](#repeat) - - [batch](#batch) - - [shuffle](#shuffle) - - [map](#map) - - [zip](#zip) - - [Data Augmentation](#data-augmentation) - - [Using the `c_transforms` Module](#using-the-c_transforms-module) - - [Using the `py_transforms` Module](#using-the-py_transforms-module) - - - - - -## Overview - -Data is the basis of deep learning. Data input plays an important role in the deep neural network training. Therefore, after the original dataset is obtained and before data is loaded and trained, data processing or augmentation is often required due to data size and performance restrictions, to obtain optimized data input. -MindSpore provides users with data processing and augmentation functions. -> Essentially, data augmentation is implemented through the data processing operation `map`. Yet data augmentation is described separately due to its diversified transform operations. - -## Data Processing Operations Supported by Mindspore -MindSpore supports multiple data processing operations, including repeat, batch, shuffle, and map, as shown in the following table. - -| Operation | Description | -| -------- | -------------------------------------- | -| repeat | Repeat a dataset to increase the data size. | -| batch | Process data in batches to accelerate the training process. | -| shuffle | Shuffle data. | -| map | Apply the provided functions or operators to the specified column data. | -| zip | Combine multiple datasets into one dataset. | - - - -The operations can be performed separately. In practice, they are often used together as needed. You are advised to use them in the following sequence: - -![avatar](../images/dataset_pipeline.png) - -In the following example, the `shuffle`, `batch`, and `repeat` operations are performed when the MNIST dataset is read. - -```python -import mindspore.dataset as ds - -ds1 = ds.MnistDataset(MNIST_DATASET_PATH, MNIST_SCHEMA) # Create MNIST dataset. - -ds1 = ds1.shuffle(buffer_size=10000) -ds1 = ds1.batch(32, drop_remainder=True) -ds1 = ds1.repeat(10) -``` -In the preceding operations, data is shuffled, every 32 data records are combined into a batch, and then the dataset is repeated for 10 times. - -The following describes how to construct a simple dataset `ds1` and perform data processing operations on it. -1. Import the module on which data processing depends. - ```python - import mindspore.dataset as ds - ``` -2. Define the `generator_func` function for dataset generating. - ```python - def generator_func(): - for i in range(5): - yield (np.array([i, i+1, i+2]),) - ``` -3. Use `GeneratorDataset` to create the dataset `ds1` for data processing. - ```python - ds1 = ds.GeneratorDataset(generator_func, ["data"]) - print("ds1:") - for data in ds1.create_dict_iterator(): - print(data["data"]) - ``` - The output is as follows: - ``` - ds1: - [0 1 2] - [1 2 3] - [2 3 4] - [3 4 5] - [4 5 6] - ``` -### repeat -In limited datasets, to optimize the network, a dataset is usually trained for multiple times. - -![avatar](../images/repeat.png) - -> In machine learning, an epoch refers to one cycle through the full training dataset. - -During training, `repeat` can be used to increase the data size. The definition of `repeat` is as follows: -```python -def repeat(self, count=None): -``` - -You can define the dataset `ds2` and call `repeat` to increase the data size. The sample code is as follows: - -```python -ds2 = ds.GeneratorDataset(generator_func, ["data"]) -ds2 = ds2.repeat(2) -print("ds2:") -for data in ds2.create_dict_iterator(): - print(data["data"]) -``` -Set the multiple to 2. Therefore, the data size of `ds2` is twice that of the original dataset `ds1`. The output is as follows: - -``` -ds2: -[0 1 2] -[1 2 3] -[2 3 4] -[3 4 5] -[4 5 6] -[0 1 2] -[1 2 3] -[2 3 4] -[3 4 5] -[4 5 6] -``` -### batch -Combine data records in datasets into batches. In practice, data can be processed in batches. Training data in batches can reduce training steps and accelerate the training process. MindSpore uses the `batch` function to implement the batch operation. The function is defined as follows: - -![avatar](../images/batch.png) - -```python -def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None) -``` - -Use the dataset `ds1` generated by GeneratorDataset to construct two datasets. -- In the first dataset `ds2`, combine every two data records into a batch. -- In the second dataset `ds3`, combine every three data records into a batch, and remove the remaining data records that are less than three. - -The sample code of `ds2` is as follows: -```python -ds2 = ds1.batch(batch_size=2) # Default drop_remainder is False, the last remainder batch isn't dropped. -print("batch size:2 drop remainder:False") -for data in ds2.create_dict_iterator(): - print(data["data"]) -``` -The output is as follows: -``` -batch size:2 drop remainder:False -[[0 1 2] - [1 2 3]] -[[2 3 4] - [3 4 5]] -[[4 5 6]] -``` - -The sample code of `ds3` is as follows: -```python -ds3 = ds1.batch(batch_size=3, drop_remainder=True) # When drop_remainder is True, the last remainder batch will be dropped. -print("batch size:3 drop remainder:True") -for data in ds3.create_dict_iterator(): - print(data["data"]) -``` -The output is as follows: -``` -batch size:3 drop remainder:True -[[0 1 2] - [1 2 3] - [2 3 4]] -``` -### shuffle -You can shuffle ordered or repeated datasets. - -![avatar](../images/shuffle.png) - -The shuffle operation is used to shuffle data. A larger value of buffer_size indicates a higher shuffling degree, consuming more time and computing resources. -The definition of `shuffle` is as follows: -```python -def shuffle(self, buffer_size): -``` -Call `shuffle` to shuffle the dataset `ds1`. The sample code is as follows: - -```python -print("Before shuffle:") -for data in ds1.create_dict_iterator(): - print(data["data"]) - -ds2 = ds1.shuffle(buffer_size=5) -print("After shuffle:") -for data in ds2.create_dict_iterator(): - print(data["data"]) -``` -The possible output is as follows. After data is shuffled, the data sequence changes randomly. -``` -Before shuffle: -[0 1 2] -[1 2 3] -[2 3 4] -[3 4 5] -[4 5 6] -After shuffle: -[3 4 5] -[2 3 4] -[4 5 6] -[1 2 3] -[0 1 2] -``` -### map -The map operation is used to process data. For example, convert the dataset of color images into the dataset of grayscale images. You can flexibly perform the operation as required. -MindSpore provides the `map` function to map datasets. You can apply the provided functions or operators to the specified column data. -You can customize the function or use `c_transforms` or `py_transforms` for data augmentation. -> For details about data augmentation operations, see Data Augmentation section. - -![avatar](../images/map.png) - -The definition of `map` is as follows: - -```python -def map(self, input_columns=None, operations=None, output_columns=None, columns_order=None, - num_parallel_workers=None): -``` -In the following example, the `map` function is used to apply the defined anonymous function (lambda function) to the dataset `ds1` so that the data values in the dataset are multiplied by 2. -```python -func = lambda x : x*2 # Define lambda function to multiply each element by 2. -ds2 = ds1.map(operations=func, input_columns="data") -for data in ds2.create_dict_iterator(): - print(data["data"]) -``` -The code output is as follows. Data values in each row of the dataset `ds2` is multiplied by 2. -``` -[0 2 4] -[2 4 6] -[4 6 8] -[6 8 10] -[8 10 12] -``` -### zip -MindSpore provides the `zip` function to combine multiple datasets into one dataset. - -![avatar](../images/zip.png) - -> If the column names in the two datasets are the same, the two datasets are not combined. Therefore, pay attention to column names. -> If the number of rows in the two datasets is different, the number of rows after combination is the same as the smaller number. -```python -def zip(self, datasets): -``` -1. Use the preceding construction method of the dataset `ds1` to construct the dataset `ds2`. - ```python - def generator_func2(): - for i in range(5): - yield (np.array([i-3, i-2, i-1]),) - - ds2 = ds.GeneratorDataset(generator_func2, ["data2"]) - ``` - -2. Use `zip()` to combine the `data` column of the dataset `ds1`and the `data2` column of the dataset `ds2` into the dataset `ds3`. - ```python - ds3 = ds.zip((ds1, ds2)) - for data in ds3.create_dict_iterator(): - print(data) - ``` - The output is as follows: - ``` - {'data': array([0, 1, 2], dtype=int64), 'data2': array([-3, -2, -1], dtype=int64)} - {'data': array([1, 2, 3], dtype=int64), 'data2': array([-2, -1, 0], dtype=int64)} - {'data': array([2, 3, 4], dtype=int64), 'data2': array([-1, 0, 1], dtype=int64)} - {'data': array([3, 4, 5], dtype=int64), 'data2': array([0, 1, 2], dtype=int64)} - {'data': array([4, 5, 6], dtype=int64), 'data2': array([1, 2, 3], dtype=int64)} - ``` -## Data Augmentation -During image training, especially when the dataset size is relatively small, you can preprocess images by using a series of data augmentation operations, thereby enriching the datasets. -MindSpore provides the `c_transforms` and `py_transforms` module functions for users to perform data augmentation. You can also customize functions or operators to perform data augmentation. The following table describes the two modules provided by MindSpore. For details, see the related description in the API reference document. - -| Module | Implementation | Description | -| ---------------| ------------------------------------------------------ | --- | -| `c_transforms` | C++-based [OpenCV](https://opencv.org/) implementation | The performance is high. | -| `py_transforms` | Python-based [PIL](https://pypi.org/project/Pillow/) implementation | This module provides multiple image augmentation functions and the method for converting between PIL images and NumPy arrays. | - -For users who would like to use Python PIL in image learning tasks, the `py_transforms` module is a good tool for image augmentation. You can use Python PIL to customize extensions. -Data augmentation requires the `map` function. For details about how to use the `map` function, see [map](#map). - -### Using the `c_transforms` Module - -1. Import the module to the code. - ```python - from mindspore.dataset.vision import Inter - import mindspore.dataset.vision.c_transforms as transforms - import matplotlib.pyplot as plt - ``` -2. Define data augmentation operators. The following uses `Resize` as an example: - ```python - # path to imagefolder directory. This directory needs to contain sub-directories which contain the images - DATA_DIR = "/path/to/imagefolder_directory" - dataset = ds.ImageFolderDatasetV2(DATA_DIR, decode=True) # Decode images. - resize_op = transforms.Resize(size=(500,500), interpolation=Inter.LINEAR) - dataset.map(operations=resize_op, input_columns="image") - - for data in dataset.create_dict_iterator(): - imgplot_resized = plt.imshow(data["image"]) - plt.show() - ``` -The running result shows that the original image is changed from 1024 x 683 pixels to 500 x 500 pixels after data processing by using `Resize`. -![avatar](../images/image.png) - -Figure 1: Original image - -![avatar](../images/image_resized.png) - -Figure 2: Image after its size is reset - -### Using the `py_transforms` Module - -1. Import the module to the code. - ```python - import mindspore.dataset.vision.py_transforms as transforms - from mindspore.transforms.py_transforms import Compose - import matplotlib.pyplot as plt - ``` -2. Define data augmentation operators and use the `Compose` API to combine multiple data augmentation operations. The following uses `RandomCrop` as an example: - ```python - # path to imagefolder directory. This directory needs to contain sub-directories which contain the images - DATA_DIR = "/path/to/imagefolder_directory" - dataset = ds.ImageFolderDataset(DATA_DIR) - - transforms_list = [ - transforms.Decode(), # Decode images to PIL format. - transforms.RandomCrop(size=(500,500)), - transforms.ToTensor() # Convert PIL images to Numpy ndarray. - ] - compose = Compose(transforms_list) - dataset = dataset.map(operations=compose, input_columns="image") - for data in dataset.create_dict_iterator(): - print(data["image"]) - imgplot_resized = plt.imshow(data["image"].transpose(1, 2, 0)) - plt.show() - ``` - -The running result shows that the original image is changed from 1024 x 683 pixels to 500 x 500 pixels after data processing by using `RandomCrop`. -![avatar](../images/image.png) - -Figure 1: Original image - -![avatar](../images/image_random_crop.png) - -Figure 2: 500 x 500 image that is randomly cropped from the original image diff --git a/tutorials/source_en/use/data_preparation/loading_the_datasets.md b/tutorials/source_en/use/data_preparation/loading_the_datasets.md deleted file mode 100644 index ad339502f6e2b8353adab2536e1475166947cfb3..0000000000000000000000000000000000000000 --- a/tutorials/source_en/use/data_preparation/loading_the_datasets.md +++ /dev/null @@ -1,262 +0,0 @@ -# Loading the Dataset - -`Linux` `Ascend` `GPU` `CPU` `Data Preparation` `Beginner` `Intermediate` `Expert` - - - -- [Loading the Dataset](#loading-the-dataset) - - [Overview](#overview) - - [Loading Common Datasets](#loading-common-datasets) - - [Loading Datasets of a Specific Data Format](#loading-datasets-of-a-specific-data-format) - - [MindSpore Data Format](#mindspore-data-format) - - [`Manifest` Data Format](#manifest-data-format) - - [`TFRecord` Data Format](#tfrecord-data-format) - - [Loading a Custom Dataset](#loading-a-custom-dataset) - - - - - -## Overview - -MindSpore helps you load common datasets, datasets of specific data formats, or custom datasets. Before loading a dataset, you need to import the required library `mindspore.dataset`. -```python -import mindspore.dataset as ds -``` - -## Loading Common Datasets -MindSpore can load common standard datasets. The following table lists the supported datasets: - -| Dataset | Description | -| --------- | -------------------------------------------------------------------------------------------------------------------------- | -| ImageNet | An image database organized based on the WordNet hierarchical structure. Each node in the hierarchical structure is represented by hundreds of images. | -| MNIST | A large database of handwritten digit images, which is usually used to train various image processing systems. | -| CIFAR-10 | A collection of images that are commonly used to train machine learning and computer vision algorithms. The CIFAR-10 dataset contains 60,000 32x32 color images in 10 different classes. | -| CIFAR-100 | The dataset is similar to CIFAR-10. The difference is that this dataset has 100 classes, and each class contains 600 images, including 500 training images and 100 test images. | -| PASCAL-VOC | The data content is diversified and can be used to train computer vision models (such as classification, positioning, detection, segmentation, and action recognition). | -| CelebA | CelebA face dataset contains tens of thousands of face images of celebrities with 40 attribute annotations, which are usually used for face-related training tasks. | - -The procedure for loading common datasets is as follows. The following describes how to create the `CIFAR-10` object to load supported datasets. - -1. Download and decompress the [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz). The dataset in binary format (CIFAR-10 binary version) is used. -2. Configure the dataset directory and define the dataset instance to be loaded. - ```python - DATA_DIR = "cifar10_dataset_dir/" - - cifar10_dataset = ds.Cifar10Dataset(DATA_DIR) - ``` -3. Create an iterator and read data through the iterator. - ```python - for data in cifar10_dataset.create_dict_iterator(): - # In CIFAR-10 dataset, each dictionary of data has keys "image" and "label". - print(data["image"]) - print(data["label"]) - ``` - -## Loading Datasets of a Specific Data Format - -### MindSpore Data Format -MindSpore supports reading datasets stored in MindSpore data format, such as reading datasets stored in `MindRecord`. By this nature, MindSpore may have better performance and characteristics. -> For details about how to convert datasets to the MindSpore data format, see [Converting the Dataset to MindSpore Data Format](converting_datasets.md). - -To read a dataset using the `MindDataset` object, perform the following steps: - -1. Create `MindDataset` for reading data. - ```python - import os - CV_FILE_NAME = os.path.join(MODULE_PATH, "./imagenet.mindrecord") - data_set = ds.MindDataset(dataset_file=CV_FILE_NAME) - ``` - In the preceding information: - `dataset_file`: specifies the MindRecord file or the list of MindRecord files. - -2. Create a dictionary iterator and read data records through the iterator. - ```python - num_iter = 0 - for data in data_set.create_dict_iterator(): - print(data["label"]) - num_iter += 1 - ``` - -### `Manifest` Data Format -`Manifest` is a data format file supported by Huawei ModelArts. For details, see . - -MindSpore provides dataset classes for datasets in `Manifest` format. Run the following commands to configure the dataset directory and define the dataset instance to be loaded: -```python -DATA_DIR = "manifest_dataset_path" - -manifest_dataset = ds.ManifestDataset(DATA_DIR) -``` -Currently, ManifestDataset supports only datasets of images and labels. The default column names are 'image' and 'label'. - -### `TFRecord` Data Format -MindSpore can also read datasets in the `TFRecord` data format through the `TFRecordDataset` object. - -1. Input the dataset path or the .tfrecord file list to create the `TFRecordDataset`. - ```python - DATA_DIR = ["tfrecord_dataset_path/train-0000-of-0001.tfrecord"] - - dataset = ds.TFRecordDataset(DATA_DIR) - ``` - -2. Create schema files or schema classes to set the dataset format and features. - - The following is an example of the schema file: - - ``` - { - "datasetType": "TF", - "numRows": 3, - "columns": { - "image": { - "type": "uint8", - "rank": 1 - }, - "label" : { - "type": "int64", - "rank": 1 - } - } - } - ``` - In the preceding information: - `datasetType`: data format. TF indicates the TFRecord data format. - `columns`: column information field, which is defined based on the actual column names of the dataset. In the preceding schema file example, the dataset columns are 'image' and 'label'. - `numRows`: row information field, which controls the maximum number of rows for loading data. If the number of defined rows is greater than the actual number of rows, the actual number of rows prevails during loading. - - When creating the TFRecordDataset, input the schema file path. An example is as follows: - ```python - DATA_DIR = ["tfrecord_dataset_path/train-0000-of-0001.tfrecord"] - SCHEMA_DIR = "dataset_schema_path/schema.json" - - dataset = ds.TFRecordDataset(DATA_DIR, schema=SCHEMA_DIR) - ``` - - An example of creating a schema class is as follows: - ```python - import mindspore.common.dtype as mstype - schema = ds.Schema() - schema.add_column('image', de_type=mstype.uint8) # Binary data usually use uint8 here. - schema.add_column('label', de_type=mstype.int32) - - dataset = ds.TFRecordDataset(DATA_DIR, schema=schema) - ``` - -3. Create a dictionary iterator and read data through the iterator. - ```python - for data in dataset.create_dict_iterator(): - # The dictionary of data has keys "image" and "label" which are consistent with columns names in its schema. - print(data["image"]) - print(data["label"]) - ``` - -## Loading a Custom Dataset -In real scenarios, there are various datasets. For a custom dataset or a dataset that cannot be loaded by APIs directly, there are two ways. -One is to convert the dataset to MindSpore data format (for details, see [Converting Datasets to the Mindspore Data Format](https://www.mindspore.cn/tutorial/en/master/use/data_preparation/converting_datasets.html)). The other one is to use the `GeneratorDataset` object. -The following shows how to use `GeneratorDataset`. - -1. Define an iterable object to generate a dataset. There are two examples following. One is a customized function which contains `yield`. The other one is a customized class which contains `__getitem__`. - Both of them will generate a dataset with numbers from 0 to 9. - > The custom iterable object returns a tuple of `numpy arrays` as a row of data each time. - - An example of a custom function is as follows: - ```python - import numpy as np # Import numpy lib. - def generator_func(num): - for i in range(num): - yield (np.array([i]),) # Notice, tuple of only one element needs following a comma at the end. - ``` - An example of a custom class is as follows: - ```python - import numpy as np # Import numpy lib. - class Generator(): - - def __init__(self, num): - self.num = num - - def __getitem__(self, item): - return (np.array([item]),) # Notice, tuple of only one element needs following a comma at the end. - - def __len__(self): - return self.num - ``` - -2. Create a dataset with `GeneratorDataset`. Transfer `generator_func` to `GeneratorDataset` to create a dataset and set `column` to `data`. -Define a `Generator` and transfer it to `GeneratorDataset` to create a dataset and set `column` to `data`. - ```python - dataset1 = ds.GeneratorDataset(source=generator_func(10), column_names=["data"], shuffle=False) - dataset2 = ds.GeneratorDataset(source=Generator(10), column_names=["data"], shuffle=False) - ``` - -3. After creating a dataset, create an iterator for the dataset to obtain the corresponding data. Iterator creation methods are as follows: - - Create an iterator whose return value is a sequence type. As shown in the following, create the iterators for `dataset1` and `dataset2`, and print the output. - ```python - print("dataset1:") - for data in dataset1.create_tuple_iterator(): # each data is a sequence - print(data[0]) - - print("dataset2:") - for data in dataset2.create_tuple_iterator(): # each data is a sequence - print(data[0]) - ``` - The output is as follows: - ``` - dataset1: - [array([0], dtype=int64)] - [array([1], dtype=int64)] - [array([2], dtype=int64)] - [array([3], dtype=int64)] - [array([4], dtype=int64)] - [array([5], dtype=int64)] - [array([6], dtype=int64)] - [array([7], dtype=int64)] - [array([8], dtype=int64)] - [array([9], dtype=int64)] - dataset2: - [array([0], dtype=int64)] - [array([1], dtype=int64)] - [array([2], dtype=int64)] - [array([3], dtype=int64)] - [array([4], dtype=int64)] - [array([5], dtype=int64)] - [array([6], dtype=int64)] - [array([7], dtype=int64)] - [array([8], dtype=int64)] - [array([9], dtype=int64)] - ``` - - - Create an iterator whose return value is a dictionary type. As shown in the following, create the iterators for `dataset1` and `dataset2`, and print the output. - ```python - print("dataset1:") - for data in dataset1.create_dict_iterator(): # each data is a dictionary - print(data["data"]) - - print("dataset2:") - for data in dataset2.create_dict_iterator(): # each data is a dictionary - print(data["data"]) - ``` - The output is as follows: - ``` - dataset1: - {'data': array([0], dtype=int64)} - {'data': array([1], dtype=int64)} - {'data': array([2], dtype=int64)} - {'data': array([3], dtype=int64)} - {'data': array([4], dtype=int64)} - {'data': array([5], dtype=int64)} - {'data': array([6], dtype=int64)} - {'data': array([7], dtype=int64)} - {'data': array([8], dtype=int64)} - {'data': array([9], dtype=int64)} - dataset2: - {'data': array([0], dtype=int64)} - {'data': array([1], dtype=int64)} - {'data': array([2], dtype=int64)} - {'data': array([3], dtype=int64)} - {'data': array([4], dtype=int64)} - {'data': array([5], dtype=int64)} - {'data': array([6], dtype=int64)} - {'data': array([7], dtype=int64)} - {'data': array([8], dtype=int64)} - {'data': array([9], dtype=int64)} - ``` \ No newline at end of file diff --git a/tutorials/source_en/use/image_loading.md b/tutorials/source_en/use/image_loading.md new file mode 100644 index 0000000000000000000000000000000000000000..3993ceacf93526f96f95abd41d145fa9f591be8b --- /dev/null +++ b/tutorials/source_en/use/image_loading.md @@ -0,0 +1 @@ +# Load Image Dataset diff --git a/tutorials/source_en/use/images/batch.png b/tutorials/source_en/use/images/batch.png deleted file mode 100644 index 9d328ec0e00fca6812c3b26109ba86f0a0e13d51..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/batch.png and /dev/null differ diff --git a/tutorials/source_en/use/images/dataset_pipeline.png b/tutorials/source_en/use/images/dataset_pipeline.png deleted file mode 100644 index 8d1fea9a77621077af2ca66c976c2cdd6d492a96..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/dataset_pipeline.png and /dev/null differ diff --git a/tutorials/source_en/use/images/image.png b/tutorials/source_en/use/images/image.png deleted file mode 100644 index dd20a4be5f2b0de0e59e8bacbc0f536b1ca59356..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/image.png and /dev/null differ diff --git a/tutorials/source_en/use/images/image_random_crop.png b/tutorials/source_en/use/images/image_random_crop.png deleted file mode 100644 index dff63b81b87dd3a5711c7a20dae0974a105df179..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/image_random_crop.png and /dev/null differ diff --git a/tutorials/source_en/use/images/image_resized.png b/tutorials/source_en/use/images/image_resized.png deleted file mode 100644 index e42005b0d1b2a3217b338c2dca3708d90f46ad70..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/image_resized.png and /dev/null differ diff --git a/tutorials/source_en/use/images/map.png b/tutorials/source_en/use/images/map.png deleted file mode 100644 index 91ca6b81daa1febea6067e6c8160c46848f1e09f..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/map.png and /dev/null differ diff --git a/tutorials/source_en/use/images/mnist_5.png b/tutorials/source_en/use/images/mnist_5.png new file mode 100644 index 0000000000000000000000000000000000000000..f6ab8189e759f47b890e96b01cca8573774dada3 Binary files /dev/null and b/tutorials/source_en/use/images/mnist_5.png differ diff --git a/tutorials/source_en/use/images/mnist_5_resize_crop.png b/tutorials/source_en/use/images/mnist_5_resize_crop.png new file mode 100644 index 0000000000000000000000000000000000000000..084404666feaa0ef1c22384f7525003c3981577c Binary files /dev/null and b/tutorials/source_en/use/images/mnist_5_resize_crop.png differ diff --git a/tutorials/source_en/use/images/repeat.png b/tutorials/source_en/use/images/repeat.png deleted file mode 100644 index 3446d1805cd8f7e074b56f7abeb2e4f2d609f719..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/repeat.png and /dev/null differ diff --git a/tutorials/source_en/use/images/shuffle.png b/tutorials/source_en/use/images/shuffle.png deleted file mode 100644 index c31b9a250e61e91898429fd680ba37cfaa585c65..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/shuffle.png and /dev/null differ diff --git a/tutorials/source_en/use/images/zip.png b/tutorials/source_en/use/images/zip.png deleted file mode 100644 index c9555c175e4d422cd4b6047a76a940e61e7d907c..0000000000000000000000000000000000000000 Binary files a/tutorials/source_en/use/images/zip.png and /dev/null differ diff --git a/tutorials/source_en/use/multi_platform_inference.md b/tutorials/source_en/use/multi_platform_inference.md index 15b2ce276ea41856f8d2c10661fa7732054c12cc..28a9c4f887677927274bed78b1ec74cd0760de9c 100644 --- a/tutorials/source_en/use/multi_platform_inference.md +++ b/tutorials/source_en/use/multi_platform_inference.md @@ -80,19 +80,18 @@ MindSpore supports the following inference scenarios based on the hardware platf `model.eval` is an API for model validation. For details about the API, see . > Inference sample code: . - 1.2 Remote Storage + 1.2 Load from MindSpore Hub - When the pre-trained models are saved remotely, the steps of performing inference on validation dataset are as follows: firstly creating a model, then loading model and parameters using `hub.load_weights`, and finally performing inference on validation dataset once created. The processing method of the validation dataset is the same as that of the training dataset. + When the models are saved in MindSpore Hub, the steps of performing inference on validation dataset are as follows: firstly determine which model to be used, then loading model and parameters using `mindspore_hub.load`, and finally performing inference on validation dataset once created. The processing method of the validation dataset is the same as that of the training dataset. ```python - network = LeNet5(cfg.num_classes) + model_uid = "mindspore/ascend/0.7/googlenet_v1_cifar10" # using GoogleNet as an example. + network = mindspore_hub.load(model_uid, num_classes=10) net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) print("============== Starting Testing ==============") - hub.load_weights(network, network_name="lenet", **{"device_target": - "ascend", "dataset":"mnist", "version": "0.5.0"}) dataset = create_dataset(os.path.join(args.data_path, "test"), cfg.batch_size, 1) @@ -101,7 +100,7 @@ MindSpore supports the following inference scenarios based on the hardware platf ``` In the preceding information: - `hub.load_weights` is an API for loading model parameters. PLease check the details in . + `mindspore_hub.load` is an API for loading model parameters. PLease check the details in . 2. Use the `model.predict` API to perform inference. ```python diff --git a/tutorials/source_en/use/saving_and_loading_model_parameters.md b/tutorials/source_en/use/saving_and_loading_model_parameters.md index c361519ff9ac84c39be01ae2329190ad0c7265f9..e86ddae4e6bbc32c4ec480a138ef6664cea5e0a2 100644 --- a/tutorials/source_en/use/saving_and_loading_model_parameters.md +++ b/tutorials/source_en/use/saving_and_loading_model_parameters.md @@ -117,7 +117,7 @@ resnet = ResNet50() load_checkpoint("resnet50-2_32.ckpt", net=resnet) dateset_eval = create_dataset(os.path.join(mnist_path, "test"), 32, 1) # define the test dataset loss = CrossEntropyLoss() -model = Model(resnet, loss) +model = Model(resnet, loss, metrics={"accuracy"}) acc = model.eval(dataset_eval) ``` @@ -149,6 +149,9 @@ The `load_checkpoint` method returns a parameter dictionary and then the `load_p When you have a CheckPoint file, if you want to do inference, you need to generate corresponding models based on the network and CheckPoint. The `export` interface supports exporting multiple types of model file formats for inference on different hardware platforms. +> `input` is the input parameter of the `export` method, representing the input of the network. If the network has multiple inputs, they need to be passed into the `export` method together. +> eg:`export(network, Tensor(input1), Tensor(input2), file_name='network.mindir', file_format='MINDIR')`. + ### Export AIR Model AIR format file only supports Ascend AI processor. The code example of exporting this format file is as follows: @@ -161,8 +164,8 @@ resnet = ResNet50() param_dict = load_checkpoint("resnet50-2_32.ckpt") # load the parameter into net load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size = [32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name = 'resnet50-2_32.air', file_format = 'AIR') +input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) +export(resnet, Tensor(input), file_name='resnet50-2_32.air', file_format='AIR') ``` Before using the `export` interface, you need to import` mindspore.train.serialization`. @@ -183,8 +186,8 @@ resnet = ResNet50() param_dict = load_checkpoint("resnet50-2_32.ckpt") # load the parameter into net load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size = [32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name = 'resnet50-2_32.onnx', file_format = 'ONNX') +input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) +export(resnet, Tensor(input), file_name='resnet50-2_32.onnx', file_format='ONNX') ``` It is recommended to use '.onnx' as the suffix of ONNX format files. @@ -204,8 +207,8 @@ resnet = ResNet50() param_dict = load_checkpoint("resnet50-2_32.ckpt") # load the parameter into net load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size = [32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name = 'resnet50-2_32.mindir', file_format = 'MINDIR') +input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) +export(resnet, Tensor(input), file_name='resnet50-2_32.mindir', file_format='MINDIR') ``` -It is recommended to use '.mindir' as the suffix of MINDIR format files. +It is recommended to use '.mindir' as the suffix of MINDIR format files. \ No newline at end of file diff --git a/tutorials/source_en/use/text_loading.md b/tutorials/source_en/use/text_loading.md new file mode 100644 index 0000000000000000000000000000000000000000..121e6699daf57ff6b8063cecda627730a0df9dd3 --- /dev/null +++ b/tutorials/source_en/use/text_loading.md @@ -0,0 +1 @@ +# Load Text Dataset diff --git a/tutorials/source_zh_cn/_static/logo_notebook.png b/tutorials/source_zh_cn/_static/logo_notebook.png index 8b60a39049880c74956d5e37c985ebfd7f401d5d..18c2e29e4b73ee428f70253feffdd855fdf0c422 100644 Binary files a/tutorials/source_zh_cn/_static/logo_notebook.png and b/tutorials/source_zh_cn/_static/logo_notebook.png differ diff --git a/tutorials/source_zh_cn/_static/logo_online_experience.png b/tutorials/source_zh_cn/_static/logo_online_experience.png new file mode 100644 index 0000000000000000000000000000000000000000..1814278d9e3a386ad319eaa099bda518d76f7be1 Binary files /dev/null and b/tutorials/source_zh_cn/_static/logo_online_experience.png differ diff --git a/tutorials/source_zh_cn/_static/logo_source.png b/tutorials/source_zh_cn/_static/logo_source.png index fc347d271abe082ae8d16242328551648766b6fb..880f2bc87172daf487654c0ba4f1657c672bd2b8 100644 Binary files a/tutorials/source_zh_cn/_static/logo_source.png and b/tutorials/source_zh_cn/_static/logo_source.png differ diff --git a/tutorials/source_zh_cn/advanced_use/auto_augmentation.md b/tutorials/source_zh_cn/advanced_use/auto_augmentation.md index c8d4fefc225f0111614d488ba73341e4d8d5b16f..546faee4dcafe8a168a1098f603c86465ea8b9a4 100644 --- a/tutorials/source_zh_cn/advanced_use/auto_augmentation.md +++ b/tutorials/source_zh_cn/advanced_use/auto_augmentation.md @@ -1,6 +1,6 @@ # 自动数据增强 -`Ascend` `GPU` `CPU` `中级` `高级` +`Linux` `Ascend` `GPU` `CPU` `数据准备` `中级` `高级` @@ -15,11 +15,12 @@ ## 概述 -AutoAugment是在一系列图像增强子策略的搜索空间中通过搜索算法找到适合特定数据集的图像增强方案,针对ImageNet数据集的数据增强增强策略包含25条子策略,每条子策略中包含两种变换,针对一个batch中的每张图像随机挑选一个子策略的组合,以预定的概率来决定是否执行子策略中的每种变换。 +自动数据增强(AutoAugment)[1]是在一系列图像增强子策略的搜索空间中,通过搜索算法找到适合特定数据集的图像增强方案。MindSpore的`c_transforms`模块提供了丰富的C++算子来实现AutoAugment,用户也可以自定义函数或者算子来实现。 +更多MindSpore算子的详细说明参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html)。 -MindSpore的`c_transforms`模块提供了丰富的c_vision++算子来实现AutoAugment,用户也可以自定义函数或者算子来实现。更多MindSpore算子的详细说明参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html)。 +MindSpore算子和AutoAugment中的算子的对应关系如下: -| AutoAugment增强算子 | MindSpore算子 |描述 | +| AutoAugment算子 | MindSpore算子 |描述 | |:-------------------:|:------|--------------| |shearX|RandomAffine|横向剪切| |shearY|RandomAffine|纵向剪切| @@ -36,115 +37,25 @@ MindSpore的`c_transforms`模块提供了丰富的c_vision++算子来实现AutoA |equalize|Equalize|均衡图像直方图| |invert|Invert|反转图像| -ImageNet数据集的增强策略定义如下: - -```python -# define autoAugment operators - -PARAMETER_MAX = 10 - -def float_parameter(level, maxval): - return float(level) * maxval / PARAMETER_MAX - -def int_parameter(level, maxval): - return int(level * maxval / PARAMETER_MAX) - -def shear_x(level): - v = float_parameter(level, 0.3) - return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, shear=(-v,-v)), c_vision.RandomAffine(degrees=0, shear=(v, v))]) - -def shear_y(level): - v = float_parameter(level, 0.3) - return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, shear=(0, 0, -v,-v)), c_vision.RandomAffine(degrees=0, shear=(0, 0, v, v))]) - -def translate_x(level): - v = float_parameter(level, 150 / 331) - return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, translate=(-v,-v)), c_vision.RandomAffine(degrees=0, translate=(v, v))]) - -def translate_y(level): - v = float_parameter(level, 150 / 331) - return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, translate=(0, 0, -v,-v)), c_vision.RandomAffine(degrees=0, translate=(0, 0, v, v))]) - -def color_impl(level): - v = float_parameter(level, 1.8) + 0.1 - return c_vision.RandomColor(degrees=(v, v)) - -def rotate_impl(level): - v = int_parameter(level, 30) - return c_transforms.RandomChoice([c_vision.RandomRotation(degrees=(-v, -v)), c_vision.RandomRotation(degrees=(v, v))]) - -def solarize_impl(level): - level = int_parameter(level, 256) - v = 256 - level - return c_vision.RandomSolarize(threshold=(0, v)) - -def posterize_impl(level): - level = int_parameter(level, 4) - v = 4 - level - return c_vision.RandomPosterize(bits=(v, v)) - -def contrast_impl(level): - v = float_parameter(level, 1.8) + 0.1 - return c_vision.RandomColorAdjust(contrast=(v, v)) - -def autocontrast_impl(level): - return c_vision.AutoContrast() - -def sharpness_impl(level): - v = float_parameter(level, 1.8) + 0.1 - return c_vision.RandomSharpness(degrees=(v, v)) - -def brightness_impl(level): - v = float_parameter(level, 1.8) + 0.1 - return c_vision.RandomColorAdjust(brightness=(v, v)) - -# define AutoAugment policy -imagenet_policy = [ - [(posterize_impl(8), 0.4), (rotate_impl(9), 0.6)], - [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)], - [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)], - [(posterize_impl(7), 0.6), (posterize_impl(6), 0.6)], - [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)], - - [(c_vision.Equalize(), 0.4), (rotate_impl(8), 0.8)], - [(solarize_impl(3), 0.6), (c_vision.Equalize(), 0.6)], - [(posterize_impl(5), 0.8), (c_vision.Equalize(), 1.0)], - [(rotate_impl(3), 0.2), (solarize_impl(8), 0.6)], - [(c_vision.Equalize(), 0.6), (posterize_impl(6), 0.4)], - - [(rotate_impl(8), 0.8), (color_impl(0), 0.4)], - [(rotate_impl(9), 0.4), (c_vision.Equalize(), 0.6)], - [(c_vision.Equalize(), 0.0), (c_vision.Equalize(), 0.8)], - [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)], - [(color_impl(4), 0.6), (contrast_impl(8), 1.0)], - - [(rotate_impl(8), 0.8), (color_impl(2), 1.0)], - [(color_impl(8), 0.8), (solarize_impl(7), 0.8)], - [(sharpness_impl(7), 0.4), (c_vision.Invert(), 0.6)], - [(shear_x(5), 0.6), (c_vision.Equalize(), 1.0)], - [(color_impl(0), 0.4), (c_vision.Equalize(), 0.6)], - - [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)], - [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)], - [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)], - [(color_impl(4), 0.6), (contrast_impl(8), 1.0)], - [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)], - ] -``` - ## ImageNet自动数据增强 +本教程以在ImageNet数据集上实现AutoAugment作为示例。 + +针对ImageNet数据集的数据增强策略包含25条子策略,每条子策略中包含两种变换,针对一个batch中的每张图像随机挑选一个子策略的组合,以预定的概率来决定是否执行子策略中的每种变换。 -用户可以使用Mindspore提供的`RandomSelectSubpolicy`接口来实现自动数据增强, +用户可以使用MindSpore中`c_transforms`模块的`RandomSelectSubpolicy`接口来实现AutoAugment, 在ImageNet分类训练中标准的数据增强方式分以下几个步骤: -1. `RandomCropDecodeResize`:随机裁剪后进行解码。 -2. `RandomHorizontalFlip`:水平方向上随机翻转。 -3. `Normalize`:归一化。 -4. `HWC2CHW`:shape变换。 +- `RandomCropDecodeResize`:随机裁剪后进行解码。 + +- `RandomHorizontalFlip`:水平方向上随机翻转。 + +- `Normalize`:归一化。 -在步骤1后插入AutoAugment变换,如下所示: +- `HWC2CHW`:图片通道变化。 -1. 引入mindspore数据增强模块。 +在`RandomCropDecodeResize`后插入AutoAugment变换,如下所示: + +1. 引入MindSpore数据增强模块。 ```python import mindspore.common.dtype as mstype @@ -154,12 +65,113 @@ imagenet_policy = [ import matplotlib.pyplot as plt ``` -2. 在`RandomCropDecodeResize`操作后插入AutoAugment变换。 +2. 定义MindSpore算子到AutoAugment算子的映射: + + ```python + # define AutoAugment operators + + PARAMETER_MAX = 10 + + def float_parameter(level, maxval): + return float(level) * maxval / PARAMETER_MAX + + def int_parameter(level, maxval): + return int(level * maxval / PARAMETER_MAX) + + def shear_x(level): + v = float_parameter(level, 0.3) + return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, shear=(-v,-v)), c_vision.RandomAffine(degrees=0, shear=(v, v))]) + + def shear_y(level): + v = float_parameter(level, 0.3) + return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, shear=(0, 0, -v,-v)), c_vision.RandomAffine(degrees=0, shear=(0, 0, v, v))]) + + def translate_x(level): + v = float_parameter(level, 150 / 331) + return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, translate=(-v,-v)), c_vision.RandomAffine(degrees=0, translate=(v, v))]) + + def translate_y(level): + v = float_parameter(level, 150 / 331) + return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, translate=(0, 0, -v,-v)), c_vision.RandomAffine(degrees=0, translate=(0, 0, v, v))]) + + def color_impl(level): + v = float_parameter(level, 1.8) + 0.1 + return c_vision.RandomColor(degrees=(v, v)) + + def rotate_impl(level): + v = int_parameter(level, 30) + return c_transforms.RandomChoice([c_vision.RandomRotation(degrees=(-v, -v)), c_vision.RandomRotation(degrees=(v, v))]) + + def solarize_impl(level): + level = int_parameter(level, 256) + v = 256 - level + return c_vision.RandomSolarize(threshold=(0, v)) + + def posterize_impl(level): + level = int_parameter(level, 4) + v = 4 - level + return c_vision.RandomPosterize(bits=(v, v)) + + def contrast_impl(level): + v = float_parameter(level, 1.8) + 0.1 + return c_vision.RandomColorAdjust(contrast=(v, v)) + + def autocontrast_impl(level): + return c_vision.AutoContrast() + + def sharpness_impl(level): + v = float_parameter(level, 1.8) + 0.1 + return c_vision.RandomSharpness(degrees=(v, v)) + + def brightness_impl(level): + v = float_parameter(level, 1.8) + 0.1 + return c_vision.RandomColorAdjust(brightness=(v, v)) + + ``` + +3. 定义ImageNet数据集的AutoAugment策略: + ```python + # define AutoAugment policy + imagenet_policy = [ + [(posterize_impl(8), 0.4), (rotate_impl(9), 0.6)], + [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)], + [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)], + [(posterize_impl(7), 0.6), (posterize_impl(6), 0.6)], + [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)], + + [(c_vision.Equalize(), 0.4), (rotate_impl(8), 0.8)], + [(solarize_impl(3), 0.6), (c_vision.Equalize(), 0.6)], + [(posterize_impl(5), 0.8), (c_vision.Equalize(), 1.0)], + [(rotate_impl(3), 0.2), (solarize_impl(8), 0.6)], + [(c_vision.Equalize(), 0.6), (posterize_impl(6), 0.4)], + + [(rotate_impl(8), 0.8), (color_impl(0), 0.4)], + [(rotate_impl(9), 0.4), (c_vision.Equalize(), 0.6)], + [(c_vision.Equalize(), 0.0), (c_vision.Equalize(), 0.8)], + [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)], + [(color_impl(4), 0.6), (contrast_impl(8), 1.0)], + + [(rotate_impl(8), 0.8), (color_impl(2), 1.0)], + [(color_impl(8), 0.8), (solarize_impl(7), 0.8)], + [(sharpness_impl(7), 0.4), (c_vision.Invert(), 0.6)], + [(shear_x(5), 0.6), (c_vision.Equalize(), 1.0)], + [(color_impl(0), 0.4), (c_vision.Equalize(), 0.6)], + + [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)], + [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)], + [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)], + [(color_impl(4), 0.6), (contrast_impl(8), 1.0)], + [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)], + ] + + ``` + +4. 在`RandomCropDecodeResize`操作后插入AutoAugment变换。 ```python def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, shuffle=True, num_samples=5, target="Ascend"): # create a train or eval imagenet2012 dataset for resnet50 - ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, + ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=shuffle, num_samples=num_samples) image_size = 224 @@ -174,8 +186,6 @@ imagenet_policy = [ post_trans = [ c_vision.RandomHorizontalFlip(prob=0.5), - #c_vision.Normalize(mean=mean, std=std), - #c_vision.HWC2CHW() ] else: trans = [ @@ -185,22 +195,22 @@ imagenet_policy = [ c_vision.Normalize(mean=mean, std=std), c_vision.HWC2CHW() ] - ds = ds.map(input_columns="image", num_parallel_workers=8, operations=trans) + ds = ds.map(operations=trans, input_columns="image") if do_train: - ds = ds.map(input_columns=["image"], operations=c_vision.RandomSelectSubpolicy(imagenet_policy)) - ds = ds.map(input_columns=["image"], num_parallel_workers=8, operations=post_trans) + ds = ds.map(operations=c_vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"]) + ds = ds.map(operations=post_trans, input_columns="image") type_cast_op = c_transforms.TypeCast(mstype.int32) - ds = ds.map(input_columns="label", num_parallel_workers=8, operations=type_cast_op) - # apply batch operations + ds = ds.map(operations=type_cast_op, input_columns="label") + # apply batch operation ds = ds.batch(batch_size, drop_remainder=True) - # apply dataset repeat operation + # apply repeat operation ds = ds.repeat(repeat_num) return ds ``` -3. 验证自动数据增强效果。 +5. 验证自动数据增强效果。 ```python # path to imagefolder directory. This directory needs to contain sub-directories which contain the images @@ -219,16 +229,16 @@ imagenet_policy = [ step_num += 1 for index in range(rows): fig.add_subplot(rows, columns, ep_num * rows + index + 1) - plt.imshow(data['image'][index]) + plt.imshow(data['image'].asnumpy()[index]) plt.show() ``` - >为了更好演示效果从数据集中只读取5张图片并且不进行`shuffle`,并且为了更好显示图片不进行`Normalize`和`HWC2CHW`操作。 - - 运行结果可以看到,batch中每张图像的增强效果,X方向表示1个batch的5张图像,Y方向表示5个bacth。 + >为了更好演示效果,从数据集中只读取5张图片并且不进行`shuffle`且不进行`Normalize`和`HWC2CHW`操作。 ![augment](./images/auto_augmentation.png) + 运行结果可以看到,batch中每张图像的增强效果,水平方向表示1个batch的5张图像,垂直方向表示5个bacth。 + ## 参考文献 [1] [AutoAugment: Learning Augmentation Policies from Data](https://arxiv.org/abs/1805.09501) diff --git a/tutorials/source_zh_cn/advanced_use/bert_poetry.md b/tutorials/source_zh_cn/advanced_use/bert_poetry.md index 238e968cfab0a324b75a7fbbe798f961db424243..53fabddff172699773bd6dc3f10ca4e449c61d8a 100644 --- a/tutorials/source_zh_cn/advanced_use/bert_poetry.md +++ b/tutorials/source_zh_cn/advanced_use/bert_poetry.md @@ -85,7 +85,7 @@ BERT采用了Encoder结构,`attention_mask`为全1的向量,即每个token ## 样例代码 -样例代码可[点击下载](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com:443/DemoCode/bert_poetry.rar),可直接运行体验实现写诗效果,代码结构如下: +样例代码可[点击下载](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com:443/DemoCode/bert_poetry_c.rar),可直接运行体验实现写诗效果,代码结构如下: ``` └─bert_poetry @@ -96,9 +96,10 @@ BERT采用了Encoder结构,`attention_mask`为全1的向量,即每个token ├── fused_layer_norm.py # 定义fused_layer_norm ├── __init__.py # __init__ ├── utils.py # 定义Fine-tuning正向网络结构 - └── poetry_dataset.py # 部分代码取自[2],解析poetry.txt,生成所需dataset + ├── poetry_utils.py # 分词器 Tokenizer + └── poetry_dataset.py # 解析poetry.txt,生成所需dataset ├── vocab.txt # 词汇表 - ├── generator.py # 部分代码取自[2],推理生成诗句使用函数 + ├── generator.py # 推理生成诗句使用函数 ├── poetry.py # 训练、推理、导出函数 ├── serving ├── ms_serving # 启动服务器侧serving @@ -113,7 +114,7 @@ BERT采用了Encoder结构,`attention_mask`为全1的向量,即每个token ### 基础信息 -基于MindSpore 0.6.0-beta版本,在Ascend 910AI处理器平台上进行训练及推理。 +基于MindSpore 0.7.0-beta版本,在Ascend 910AI处理器平台上进行训练及推理。 ### 数据准备 @@ -125,7 +126,7 @@ pip install bottle 数据集为43030首诗词:可[下载](https://github.com/AaronJny/DeepLearningExamples/tree/master/keras-bert-poetry-generator)其中的`poetry.txt`。 -BERT-Base模型的预训练ckpt:可在[MindSpore官网](https://www.mindspore.cn/docs/zh-CN/master/network_list.html)下载。 +BERT-Base模型的预训练ckpt:可在[MindSpore官网](http://download.mindspore.cn/model_zoo/official/nlp/bert/bert_base_ascend_0.5.0_cn-wiki_official_nlp_20200720.tar.gz)下载。 ### 训练 diff --git a/tutorials/source_zh_cn/advanced_use/cache.md b/tutorials/source_zh_cn/advanced_use/cache.md deleted file mode 100644 index 797cac1e8548c3b899f129efeb962a6c5b3cac7b..0000000000000000000000000000000000000000 --- a/tutorials/source_zh_cn/advanced_use/cache.md +++ /dev/null @@ -1,237 +0,0 @@ -# 单节点缓存 - -`Ascend` `GPU` `CPU` `中级` `高级` - - - -- [单节点缓存](#单节点缓存) - - [概述](#概述) - - [缓存基础使用](#缓存基础使用) - - [缓存经过数据增强的数据](#缓存经过数据增强的数据) - - [缓存共享](#缓存共享) - - - - - -## 概述 - -对于需要重复访问远程的数据集或从需要重复从磁盘中读取数据集的情况,可以使用单节点缓存算子将数据集缓存于本地内存中,以加速数据集的读取。 - -缓存算子依赖于在当前节点启动的缓存服务器,缓存服务器作为守护进程独立于用户的训练脚本而存在,主要用于提供缓存数据的管理,支持包括存储、查找、读取,以及发生缓存未命中时对于缓存数据的写入等操作。 - -若用户的内存空间不足以缓存所有数据集,则用户可以配置缓存算子使其将剩余数据缓存至磁盘。 - -## 缓存基础使用 - -- **Step 1:** - - 在使用单节点缓存服务之前,首先需要启动缓存服务器: - - ```shell - cache_admin --start - ``` - - **cache_admin命令支持以下参数:** - - `-w`:设置缓存服务器的工作线程数量,默认情况下工作线程数量为32。 - - `-s`:设置若缓存数据的大小超过内存空间,则溢出至磁盘的数据文件路径,默认为`/tmp`路径。 - - `-h`:缓存服务器的ip地址,默认为127.0.0.1。 - - `-p`:缓存服务器的端口号。 - - `-g`: 生成一个缓存会话。 - - `-d`:删除一个缓存会话。 - - `-l`:设置日志等级。 - -- **Step 2:** - - 随后,在Python训练脚本中使用`DatasetCache` API来定义一个名为`test_cache`的缓存实例: - - ```python - import mindspore.dataset as ds - import mindspore.common.dtype as mstype - - test_cache = ds.DatasetCache(session_id=1, size=0, spilling=True) - ``` - - **DatasetCache支持以下参数:** - - `session_id`: 缓存会话的id。 - - `size`:缓存最大内存空间占用,该参数以MB为单位,例如512GB的缓存空间应设置size=524288。 - - `spilling`:当内存空间超出所设置的最大内存空间占用时,是否允许将剩余的数据溢出至磁盘,默认为False。 - - `hostname`:连接至缓存服务器的ip地址,默认为127.0.0.1。 - - `port`:连接至缓存服务器的端口号。 - - > - 在实际使用中,通常应当首先使用`cache_admin -g`命令从缓存服务器处获得一个缓存会话id并作为`session_id`参数,防止发生缓存会话冲突的状况。 - > - 设置`size=0`代表不限制缓存所使用的内存空间。使用此设置的用户需自行注意缓存的内存使用状况,防止因机器内存耗尽而导致缓存服务器进程被杀或机器重启的状况。 - > - 若设置`spilling=True`,则用户需确保所设置的磁盘路径具有写入权限以及足够的磁盘空间,以存储溢出至磁盘的缓存数据。 - > - 若设置`spilling=False`,则缓存服务器在耗尽所设置的内存空间后将不再写入新的数据。 - -- **Step 3:** - - 最后,在创建数据集算子时将所创建的`test_cache`作为其`cache`参数传入: - - ```python - schema = ds.Schema() - schema.add_column('image', de_type=mstype.uint8, shape=[2]) - schema.add_column('label', de_type=mstype.uint8, shape=[1]) - - # apply cache to dataset - data = ds.RandomDataset(schema=schema, total_rows=4, num_parallel_workers=1, cache=test_cache) - - num_iter = 0 - for item in data.create_dict_iterator(num_epochs=1): # each data is a dictionary - # in this example, each dictionary has keys "image" and "label" - print("{} image: {} label: {}".format(num_iter, item["image"], item["label"])) - num_iter += 1 - ``` - - ``` - 0 image: [135 135] label: [59] - 1 image: [53 53] label: [146] - 2 image: [99 99] label: [27] - 3 image: [208 208] label: [169] - ``` - -- **Step 4:** - - 在训练结束后,可以选择将当前的缓存销毁并释放内存: - - ```shell - # Destroy the session - cache_admin –-destroy_session $session_id - ``` - - 以上命令将销毁缓存会话id为`session_id`的缓存。 - - 若选择不销毁缓存,则该缓存会话中的缓存数据将继续存在,用户下次启动训练脚本时可以继续使用该缓存。 - -## 缓存经过数据增强的数据 - -缓存算子既支持对于原始数据集的缓存,也可以被应用于缓存经过数据增强处理后的数据。 - -直接缓存经过数据增强处理后的数据通常会带来更大的性能收益,因为被缓存的数据仅需要进行一次所需的数据增强处理,随后用户即可通过缓存直接获取经过增强处理后的数据。 - -- **Step 1:** - - 同样,缓存经过数据增强处理的数据也需要首先启动缓存服务器: - - ```shell - cache_admin --start - ``` - -- **Step 2:** - - 并在Python脚本中定义缓存实例: - - ```python - import mindspore.dataset as ds - import mindspore.common.dtype as mstype - import mindspore.dataset.transforms.vision.c_transforms as c_vision - - test_cache = ds.DatasetCache(session_id=1, size=0, spilling=True) - ``` - -- **Step 3:** - - 最后,在创建用于数据增强的`Map`算子是将所创建的缓存实例传入: - - ```python - schema = ds.Schema() - schema.add_column('image', de_type=mstype.uint8, shape=[640, 480, 3]) - schema.add_column('label', de_type=mstype.uint8, shape=[1]) - - data = ds.RandomDataset(schema=schema, total_rows=4, num_parallel_workers=1) - - # apply cache to map - rescale_op = c_vision.Rescale(1.0 / 255.0, -1.0) - data = data.map(input_columns=["image"], operations=rescale_op, cache=test_cache) - - num_iter = 0 - for item in data.create_dict_iterator(num_epochs=1): # each data is a dictionary - # in this example, each dictionary has keys "image" and "label" - print("{} image shape: {} label: {}".format(num_iter, item["image"].shape, item["label"])) - num_iter += 1 - ``` - - ``` - 0 image shape: (640, 480, 3) label: [99] - 1 image shape: (640, 480, 3) label: [203] - 2 image shape: (640, 480, 3) label: [37] - 3 image shape: (640, 480, 3) label: [242] - ``` - -- **Step 4:** - - 在训练结束后,可以选择将当前的缓存销毁并释放内存: - - ```shell - # Destroy the session - cache_admin –-destroy_session $session_id - ``` - -## 缓存共享 - -对于分布式训练的场景,缓存算子还允许多个相同的训练脚本共享同一个缓存,共同从缓存中读写数据。 - -- **Step 1:** - - 首先启动缓存服务器: - - ```shell - cache_admin --start - ``` - -- **Step 2:** - - 在启动训练脚本的shell脚本中,生成一个缓存会话id: - - ```shell - #!/bin/bash - # This shell script will launch parallel pipelines - - # generate a session id that these parallel pipelines can share - result=$(cache_admin -g 2>&1) - rc=$? - if [ $rc -ne 0 ]; then - echo "some error" - exit 1 - fi - - # grab the session id from the result string - session_id=$(echo $result | awk ‘{print $NF}’) - ``` - -- **Step 3:** - - 在启动训练脚本时将`session_id`以及其他参数传入: - - ```shell - # make the session_id available to the python scripts - num_devices=4 - - for p in $(seq 0 $((${num_devices}-1))); do - python my_training_script.py -–num_devices “$num_devices” –-device “$p” –-session_id $session_id & - done - ``` - -- **Step 4:** - - 在python脚本内部接收传入的`session_id`,并在定义缓存实例时将其作为参数传入: - - ```python - import mindspore.dataset as msds - import mindspore.dataset.engine as de - - parser.add_argument('--session_id', type=int, default=1, help='Device num.') - - # use the session id passed in from the outside script when defining the cache - test_cache = msds.DatasetCache(session_id = session_id, size = 0, spilling=False) - ds = de.ImageFolderDatasetV2(data_dir, num_samples=num_samples, cache = test_cache) - ``` - -- **Step 5:** - - 在训练结束后,可以选择将当前的缓存销毁并释放内存: - - ```shell - # Destroy the session - cache_admin –-destroy_session $session_id - ``` diff --git a/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md b/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md index 36f8ef9995c496720e36af88d46de3aa2885981c..bb1307dc069b45bcb9e1121c062a5481a8768320 100644 --- a/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md +++ b/tutorials/source_zh_cn/advanced_use/checkpoint_for_hybrid_parallel.md @@ -1,5 +1,3 @@ - - # 手动设置并行场景模型参数的保存和加载 `Linux` `Ascend` `GPU` `模型训练` `中级` `高级` diff --git a/tutorials/source_zh_cn/advanced_use/computer_vision_application.md b/tutorials/source_zh_cn/advanced_use/computer_vision_application.md index 4bed55f52a566f9e0265a6a39c1beb3f80018529..c00a1d9efd3df3ff8dbafc84fd2b0f70c4426e12 100644 --- a/tutorials/source_zh_cn/advanced_use/computer_vision_application.md +++ b/tutorials/source_zh_cn/advanced_use/computer_vision_application.md @@ -9,11 +9,11 @@ - [图像分类](#图像分类) - [任务描述及准备](#任务描述及准备) - [下载CIFAR-10数据集](#下载cifar-10数据集) - - [数据预加载和预处理](#数据预加载和预处理) - - [定义卷积神经网络](#定义卷积神经网络) - - [定义损失函数和优化器](#定义损失函数和优化器) - - [调用`Model`高阶API进行训练和保存模型文件](#调用model高阶api进行训练和保存模型文件) - - [加载保存的模型,并进行验证](#加载保存的模型并进行验证) + - [数据预加载和预处理](#数据预加载和预处理) + - [定义卷积神经网络](#定义卷积神经网络) + - [定义损失函数和优化器](#定义损失函数和优化器) + - [调用`Model`高阶API进行训练和保存模型文件](#调用model高阶api进行训练和保存模型文件) + - [加载保存的模型,并进行验证](#加载保存的模型并进行验证) - [参考文献](#参考文献) @@ -122,8 +122,8 @@ tar -zvxf cifar-10-binary.tar.gz c_trans += [resize_op, rescale_op, normalize_op, changeswap_op] # apply map operations on images - cifar_ds = cifar_ds.map(input_columns="label", operations=type_cast_op) - cifar_ds = cifar_ds.map(input_columns="image", operations=c_trans) + cifar_ds = cifar_ds.map(operations=type_cast_op, input_columns="label") + cifar_ds = cifar_ds.map(operations=c_trans, input_columns="image") ``` 3. 数据混洗和批处理 diff --git a/tutorials/source_zh_cn/advanced_use/customized_debugging_information.md b/tutorials/source_zh_cn/advanced_use/customized_debugging_information.md index 2099dded637436db862a9d39a0a104156e4036a2..a011db8aea1636a080031a875ce13bc584e3e6a4 100644 --- a/tutorials/source_zh_cn/advanced_use/customized_debugging_information.md +++ b/tutorials/source_zh_cn/advanced_use/customized_debugging_information.md @@ -11,7 +11,9 @@ - [自定义Callback](#自定义callback) - [MindSpore metrics功能介绍](#mindspore-metrics功能介绍) - [print算子功能介绍](#print算子功能介绍) - - [异步数据Dump功能介绍](#异步数据dump功能介绍) + - [数据Dump功能介绍](#数据dump功能介绍) + - [同步Dump功能介绍](#同步dump功能介绍) + - [异步Dump功能介绍](#异步dump功能介绍) - [日志相关的环境变量和配置](#日志相关的环境变量和配置) @@ -120,8 +122,8 @@ class Callback(): loss = cb_params.net_outputs cur_time = time.time() if (cur_time - cb_params.init_time) > self.run_time: - print("epoch: ", epoch_num, " step: ", step_num, " loss: ", loss) - run_context.request_stop() + print("epoch: ", epoch_num, " step: ", step_num, " loss: ", loss) + run_context.request_stop() stop_cb = StopAtTime(run_time=10) model.train(100, dataset, callbacks=stop_cb) @@ -197,7 +199,7 @@ output = model.eval(ds_eval) `model.eval`方法会返回一个字典,里面是传入metrics的指标和结果。 -在eval过程中也可以使用callback功能,用户可以调用相关API或自定义callback方法实现想要的功能。 +在eval过程中也可以使用`Callback`功能,用户可以调用相关API或自定义`Callback`方法实现想要的功能。 用户也可以定义自己的`metrics`类,通过继承`Metric`基类,并重写`clear`、`update`、`eval`三个方法即可实现。 @@ -205,7 +207,7 @@ output = model.eval(ds_eval) `accuracy`继承了`EvaluationBase`基类,重写了上述三个方法。 `clear`方法会把类中相关计算参数初始化。 -`update`方法接受预测值和标签值,更新accuracy内部变量。 +`update`方法接受预测值和标签值,更新`accuracy`内部变量。 `eval`方法会计算相关指标,返回计算结果。 调用`accuracy`的`eval`方法,即可得到计算结果。 @@ -263,48 +265,107 @@ val:[[1 1] [1 1]] ``` -## 异步数据Dump功能介绍 +## 数据Dump功能介绍 -在Ascend环境上执行训练,当训练结果和预期有偏差时,可以通过异步数据Dump功能保存算子的输入输出进行调试。 +训练网络时,当训练结果和预期有偏差时,可以通过数据Dump功能保存算子的输入输出进行调试。Dump功能分为同步Dump和异步Dump,同步Dump同时支持GPU和Ascend,而异步Dump只支持Ascend。 -> 异步数据Dump不支持`comm_ops`类别的算子,算子类别详见[算子支持列表](https://www.mindspore.cn/docs/zh-CN/master/operator_list.html)。 +### 同步Dump功能介绍 -1. 开启IR保存开关: `context.set_context(save_graphs=True)`。 -2. 执行网络脚本。 -3. 查看执行目录下的`hwopt_d_end_graph_{graph id}.ir`,找到需要Dump的算子名称。 -4. 配置Dump的json配置文件`data_dump.json`。 +1. 创建配置文件`data_dump.json`。 + + JSON文件的名称和位置可以自定义设置。 ```json { - "DumpSettings": { + "common_dump_settings": { + "dump_mode": 0, + "path": "/tmp/net/", "net_name": "ResNet50", + "iteration": 0, + "input_output": 0, + "kernels": ["Default/Conv-op12"], + "support_device": [0,1,2,3,4,5,6,7] + }, + "e2e_dump_settings": { + "enable": false, + "trans_flag": false + } + } + ``` + + - `dump_mode`:设置成0,表示Dump出改网络中的所有算子;设置成1,表示Dump`"kernels"`里面制定的算子。 + - `path`:Dump保存数据的绝对路径。 + - `net_name`:自定义的网络名称,例如:"ResNet50"。 + - `iteration`:指定需要Dump的迭代,若设置成0,表示Dump所有的迭代。 + - `input_output`:设置成0,表示Dump出算子的输入和算子的输出;设置成1,表示Dump出算子的输入;设置成2,表示Dump出算子的输出。该参数设置仅支持Ascend,GPU上只能Dump算子的输出。 + - `kernels`:算子的全称,可以通过开启IR保持开关`context.set_context(save_graphs=True)`执行用例,从生成的`ir`文件获取。例如,`device_target`为`Ascend`时,可以从`hwopt_d_end_graph_{graph_id}.ir`中获取算子全称,`device_target`为`GPU`时,可以从`hwopt_pm_7_getitem_tuple.ir`中获取算子全称。 + - `support_device`:支持的设备,默认设置成0到7即可;在分布式训练场景下,需要dump个别设备上的数据,可以只在`support_device`中指定需要Dump的设备Id。 + - `enable`:开启E2E Dump。 + - `trans_flag`:开启格式转换。将设备上的数据格式转换成NCHW格式。 + +2. 指定Dump的json配置文件。 + + ```bash + export MINDSPORE_DUMP_CONFIG={Absolute path of data_dump.json} + ``` + + - 在网络脚本执行前,设置好环境变量;网络脚本执行过程中设置将会不生效。 + - 在分布式场景下,Dump环境变量需要调用`mindspore.communication.management.init`之前配置。 + +3. 执行用例Dump数据。 + + 可以在训练脚本中设置`context.set_context(reserve_class_name_in_scope=False)`,避免Dump文件名称过长导致Dump数据文件生成失败。 + +4. 解析Dump数据。 + + 通过`numpy.fromfile`读取Dump数据文件即可解析。 + +### 异步Dump功能介绍 + +1. 创建配置文件`data_dump.json`。 + + JSON文件的名称和位置可以自定义设置。 + + ```json + { + "common_dump_settings": { "dump_mode": 0, - "op_debug_mode": 0, + "path": "/relative_path", + "net_name": "ResNet50", "iteration": 0, - "kernels": ["Default/Conv2D-op2", "Default/TensorAdd-op10"] + "input_output": 0, + "kernels": ["Default/Conv-op12"], + "support_device": [0,1,2,3,4,5,6,7] + }, + "async_dump_settings": { + "enable": false, + "op_debug_mode": 0 } } ``` - > - `net_name`:自定义的网络名称,例如:"Resnet50"。 - > - `dump_mode`:设置成0,表示Dump所有的算子;设置成1,表示Dump`"kernel"`里面制定的算子。 - > - `op_debug_mode`:该属性用于算子溢出调试,在使用Dump功能的时候,请设置成0。 - > - `iteration`:指定需要Dump的迭代。非数据下沉模式下,`iteration`需要设置成0,并且会Dump出每个迭代的数据。 - > - `kernels`:指定需要Dump的算子名称(`fullname_with_scope`)。 + - `dump_mode`:设置成0,表示Dump出改网络中的所有算子;设置成1,表示Dump`"kernels"`里面指定的算子。 + - `path`:Dump保存数据的相对路径,异步Dump生成的数据都会保存在`/var/log/npu/ide_daemon/dump/relative_path`目录下。 + - `net_name`:自定义的网络名称,例如:"ResNet50"。 + - `iteration`:指定需要Dump的迭代。非数据下沉模式下,`iteration`需要设置成0,并且会Dump出每个迭代的数据。 + - `input_output`:设置成0,表示Dump出算子的输入和算子的输出;设置成1,表示Dump出算子的输入;设置成2,表示Dump出算子的输出。 + - `kernels`:算子的全称。开启IR保持开关`context.set_context(save_graphs=True)`并执行用例,从生成的`hwopt_d_end_graph_{graph_id}.ir`文件获取。`kernels`仅支持TBE算子、AiCPU算子、通信算子,若设置成通信算子的名称,将会Dump出通信算子的输入算子的数据。 + - `support_device`:支持的设备,默认设置成0到7即可;在分布式训练场景下,需要dump个别设备上的数据,可以只在`support_device`中指定需要Dump的设备Id。 + - `enable`:开启异步Dump。 + - `op_debug_mode`:该属性用于算子溢出调试,在使用Dump功能的时候,请设置成0。 -5. 设置数据Dump的环境变量。 +2. 设置数据Dump的环境变量。 ```bash - export ENABLE_DATA_DUMP=1 - export DATA_DUMP_PATH=/test - export DATA_DUMP_CONFIG_PATH=data_dump.json + export MINDSPORE_DUMP_CONFIG={Absolute path of data_dump.json} ``` - > - 在网络脚本执行前,设置好环境变量;网络脚本执行过程中设置将会不生效。 - > - 在分布式场景下,Dump环境变量需要调用`mindspore.communication.management.init`之前配置。 + - 在网络脚本执行前,设置好环境变量;网络脚本执行过程中设置将会不生效。 + - 在分布式场景下,Dump环境变量需要调用`mindspore.communication.management.init`之前配置。 + +3. 执行用例Dump数据。 -6. 再次执行用例进行异步数据Dump。 -7. 解析文件。 +4. 解析文件。 执行完用例后去`/var/log/npu/ide_daemon/dump/`目录下,运行如下命令解析Dump数据: diff --git a/tutorials/source_zh_cn/advanced_use/dashboard.md b/tutorials/source_zh_cn/advanced_use/dashboard.md index 898695dfe24b58c764c62099b6188024601f250a..82e2ac38f11b4ac258ed7639660d91c074789a00 100644 --- a/tutorials/source_zh_cn/advanced_use/dashboard.md +++ b/tutorials/source_zh_cn/advanced_use/dashboard.md @@ -160,8 +160,7 @@ 图12将用户所记录的张量以表格的形式展示,包含以下功能: - 点击表格右边小方框按钮,可以将表格放大。 -- 表格中白色方框显示当前展示的是哪个维度下的张量数据,其中冒号`:`表示当前维度的所有值,可以在方框输入对应的索引或者`:`后按`Enter`键或者点击后边的打勾按钮来查询特定维度的张量数据。 - 假设某维度是32,则其索引范围是-32到31。注意:可以查询0维到2维的张量数据,不支持查询超过两维的张量数据,即不能设置超过两个冒号`:`的查询条件。 +- 表格中白色方框显示当前展示的是哪个维度下的张量数据,其中冒号`:`表示当前维度索引范围,和Python索引含义基本一致,不指定具体索引表示当前维度所有值,`2:5`表示索引2到5(不包括5)的值,可以在方框输入对应的索引或者含有`:`的索引范围后按`Enter`键或者点击后边的打勾按钮来查询特定维度的张量数据。假设某维度是32,则其索引范围是-32到31。注意:可以查询0维到2维的张量数据,不支持查询超过两维的张量数据,即不能设置超过两个冒号`:`的查询条件。 - 拖拽表格下方的空心圆圈可以查询特定步骤的张量数据。 ![tensor_histogram.png](./images/tensor_histogram.png) diff --git a/tutorials/source_zh_cn/advanced_use/data_processing_acceleration.md b/tutorials/source_zh_cn/advanced_use/data_processing_acceleration.md deleted file mode 100644 index 141b82df3c533effba56d44daa4f5fe3f150c3f6..0000000000000000000000000000000000000000 --- a/tutorials/source_zh_cn/advanced_use/data_processing_acceleration.md +++ /dev/null @@ -1,96 +0,0 @@ -# 数据处理性能调试 - -`Ascend` `GPU` `CPU` `中级` `高级` - - - -- [数据处理性能调试](#数据处理性能调试) - - [概述](#概述) - - [脚本撰写](#脚本撰写) - - [操作系统的影响](#操作系统的影响) - - - - - -## 概述 - -数据处理的性能涉及到多方面的因素,包括脚本撰写、操作系统等。 - -## 脚本撰写 - -数据处理的脚本大致分为以下几个模块: - -![dataset_pipeline](./images/dataset_pipeline.png) - -- 数据加载 - - 数据加载的方式有三种: - 1. 内置高性能的数据加载类算子,如CIFAR数据集、MNIST数据集等; - 2. 将数据集转换成MindRecord,使用MindDataset算子进行加载; - 3. 用户自定义数据集——GeneratorDataset。 - - > 优先使用内置的数据加载类算子以及MindDataset,如果无法满足用户需求,则在撰写用户自定数据集加载时,需要关注本身数据集加载的性能优化。 - -- 数据混洗 - - ![shuffle](./images/shuffle.png) - - > `shuffle`操作主要用来将数据混洗,设定的`buffer_size`越大,混洗程度越大,但时间、计算资源消耗会大。因此该算子我们不建议使用,现在数据加载类算子中可以支持`shuffle`的功能。 - -- 数据增强 - - 数据增强的方式有三种: - 1. C算子的数据增强(C++); - 2. Python算子的数据增强(Pillow); - 3. 用户自定义的数据增强(Python function)。 - - > 优先使用C算子的数据增强。根据用户自定义的数据增强的算子类型进行多线程还是多进程模式的选择,计算密集型使用多进程,IO密集型使用多线程。 - -- `batch` & `repeat` - - `batch`和`repeat`一般不会成为性能瓶颈。 - -## 操作系统的影响 - -由于数据处理是在host端进行,那么机器或者操作系统本身的一些配置会对数据处理存在影响,主要有存储、NUMA架构、CPU(计算资源)几个方面。 - -1. 存储 - - 当数据集较大时,我们推荐使用固态硬盘对数据进行存储,能够减少存储I/O对于数据处理的影响。 - - > 一般地,当数据集被加载之后,就会缓存在操作系统的page cache中,在一定程度上降低了存储开销,加快了后续epoch的数据读取。 - -2. NUMA架构 - - 非一致性内存架构(Non-uniform Memory Architecture)是为了解决传统的对称多处理(Symmetric Multi-processor)系统中的可扩展性问题而诞生的。NUMA系统拥有多条内存总线,于是将几个处理器通过内存总线与一块内存相连构成一个组,这样整个庞大的系统就可以被分为若干个组,这个组的概念在NUMA系统中被称为节点(node)。处于该节点中的内存被称为本地内存(local memory),处于其他节点中的内存对于该组而言被称为外部内存(foreign memory)。因此每个节点访问本地内存和访问其他节点的外部内存的延迟是不相同的,在数据处理的过程中需要尽可能避免这一情况的发生。一般我们可以使用以下命令进行进程与node节点的绑定: - - ```shell - numactl --cpubind=0 --membind=0 python train.py - ``` - - 上述例子表示将此次运行的`train.py`的进程绑定到`numa node` 0上。 - -3. CPU(计算资源) - - CPU对于数据处理的影响主要是计算资源的分配和CPU频率的设置两个方面。 - - - 计算资源的分配 - - 当我们进行分布式训练时,一台设备机器上会启动多个训练进程,而这些训练进程会通过操作系统本身的策略进行计算资源的分配与抢占,当进程较多时,可能会由于计算资源的竞争而导致数据处理性能的下降,因此这时需要进行人工分配计算资源,避免各个进程的计算资源竞争。 - - ```shell - numactl --cpubind=0 python train.py - or - taskset -c 0-15 python train.py - ``` - - > `numactl`的方式较为粗粒度,直接指定`numa node id`,而`taskset`的方式是细粒度的,它能够直接指定`numa node`上的`cpu core`,其中0-15表示的`core id`从0到15。 - - - CPU频率设置 - - 要想充分发挥host端CPU的最大算力,CPU频率的设置至关重要。一般地,linux内核支持调节CPU主频,降低功耗,已到达节能的效果。通过选择系统空闲状态不同的电源管理策略,可以实现不同程度降低服务器功耗。但是,更低的功耗策略意味着CPU唤醒更慢对性能影响更大。因此如果发现CPU模式为conservative或者powersave,可以使用cpupower设置CPU Performance模式,对数据处理的性能提升有非常大的效果。 - - ```shell - cpupower frequency-set -g performance - ``` diff --git a/tutorials/source_zh_cn/advanced_use/dataset_conversion.md b/tutorials/source_zh_cn/advanced_use/dataset_conversion.md index f4f4e73e2f349869d26988741b5c5cfa8ca3c7b7..c0b1ff0cae304f198a746a28ed4fa5ccee36a368 100644 --- a/tutorials/source_zh_cn/advanced_use/dataset_conversion.md +++ b/tutorials/source_zh_cn/advanced_use/dataset_conversion.md @@ -1,14 +1,14 @@ # MindSpore数据格式转换 -`Ascend` `GPU` `CPU` `初级` `中级` `高级` + `Linux` `Ascend` `GPU` `CPU` `数据准备` `中级` `高级` - [MindSpore数据格式转换](#mindspore数据格式转换) - [概述](#概述) - [基本概念](#基本概念) - - [相关接口说明](#相关接口说明) - [将数据集转换为MindRecord](#将数据集转换为mindrecord) + - [读取MindRecord数据集](#读取mindrecord数据集) @@ -16,7 +16,7 @@ ## 概述 -用户可以将非标准的数据集和常见的经典数据集转换为MindSpore数据格式即MindRecord,从而方便地加载到MindSpore中进行训练。同时,MindSpore在部分场景做了性能优化,使用MindSpore数据格式可以获得更好的性能体验。 +用户可以将非标准的数据集和常用的数据集转换为MindSpore数据格式即MindRecord,从而方便地加载到MindSpore中进行训练。同时,MindSpore在部分场景做了性能优化,使用MindSpore数据格式可以获得更好的性能体验。 MindSpore数据格式具备的特征如下: 1. 实现多变的用户数据统一存储、访问,训练数据读取更简便; @@ -24,17 +24,17 @@ MindSpore数据格式具备的特征如下: 3. 高效数据编解码操作,对用户透明、无感知; 4. 灵活控制分区大小,实现分布式训练。 -MindSpore的目标是将用户的数据集通过归一化操作生成MindSpore数据格式,进一步通过MindDataset实现数据的读取,并用于训练过程。 +MindSpore数据格式的目标是归一化用户的数据集,并进一步通过MindDataset实现数据的读取,并用于训练过程。 ![data_conversion_concept](./images/data_conversion_concept.png) ## 基本概念 -一个MindRecord文件由数据文件和索引文件组成: +一个MindRecord文件由数据文件和索引文件组成,且数据文件及索引文件暂不支持重命名操作: - 数据文件 - 包含文件头、标量数据页、块数据页,用于存储用户归一化后的训练数据。 + 包含文件头、标量数据页、块数据页,用于存储用户归一化后的训练数据,且单个MindRecord文件建议小于20G,用户可将大数据集进行分片存储为多个MindRecord文件。 - 索引文件 @@ -48,31 +48,13 @@ MindSpore的目标是将用户的数据集通过归一化操作生成MindSpore 文件头主要用来存储文件头大小、标量数据页大小、块数据页大小、Schema信息、索引字段、统计信息、文件分区信息、标量数据与块数据对应关系等,是MindRecord文件的元信息。 - > Schema为数据集结构定义文件,用于定义数据集包含哪些字段以及字段的类型。更多说明及介绍可参考[Schema相关规范](#将数据集转换为mindrecord)。 - - 标量数据页 标量数据页主要用来存储整型、字符串、浮点型数据,如图像的Label、图像的文件名、图像的长宽等信息,即适合用标量来存储的信息会保存在这里。 - 块数据页 - 块数据页主要用来存储二进制串、Numpy数组等数据,如二进制图像文件本身、文本转换成的字典等。 - -## 相关接口说明 - -| 接口名 | 接口说明 | -| --- | --- | -| FileWriter | 用于将用户定义的原始数据写为MindRecord文件。 | -| FileReader | 用于读取MindRecord文件。 | -| MindPage | 用于实现MindSpore数据格式的检索及统计功能。 | -| Cifar10ToMR | 用于将CIFAR-10数据集转换为MindRecord格式。 | -| Cifar100ToMR | 用于将CIFAR-100数据集转换为MindRecord格式。 | -| ImageNetToMR | 用于将ImageNet数据集转换为MindRecord格式。 | -| MnistToMR | 用于将MNIST数据集转换为MindRecord格式。 | -| TFRecordToMR | 用于将TFRecord格式数据集文件转换为MindRecord格式。 | -| CsvToMR | 用于将CSV格式数据集文件转换为MindRecord格式。 | - -更多详细接口说明,请参见[API文档](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.mindrecord.html)。 + 块数据页主要用来存储二进制串、NumPy数组等数据,如二进制图像文件本身、文本转换成的字典等。 ## 将数据集转换为MindRecord @@ -115,13 +97,15 @@ MindSpore的目标是将用户的数据集通过归一化操作生成MindSpore 5. 创建`FileWriter`对象,传入文件名及分片数量,然后添加Schema文件及索引,调用`write_raw_data`接口写入数据,最后调用`commit`接口生成本地数据文件。 ```python - writer = FileWriter(file_name="testWriter.mindrecord", shard_num=4) + writer = FileWriter(file_name="test.mindrecord", shard_num=4) writer.add_schema(cv_schema_json, "test_schema") writer.add_index(indexes) writer.write_raw_data(data) writer.commit() ``` + 该示例会生成`test.mindrecord0`、`test.mindrecord0.db`、`test.mindrecord1`、`test.mindrecord1.db`、`test.mindrecord2`、`test.mindrecord2.db`、`test.mindrecord3`、`test.mindrecord3.db`共8个文件,称为MindRecord数据集。`test.mindrecord0`和`test.mindrecord0.db`称为1个MindRecord文件,其中`test.mindrecord0`为数据文件,`test.mindrecord0.db`为索引文件。 + **接口说明:** - `write_raw_data`:将数据写入到内存之中。 - `commit`:将最终内存中的数据写入到磁盘。 @@ -129,7 +113,28 @@ MindSpore的目标是将用户的数据集通过归一化操作生成MindSpore 6. 如果需要在现有数据格式文件中增加新数据,可以调用`open_for_append`接口打开已存在的数据文件,继续调用`write_raw_data`接口写入新数据,最后调用`commit`接口生成本地数据文件。 ```python - writer = FileWriter.open_for_append("testWriter.mindrecord0") + writer = FileWriter.open_for_append("test.mindrecord0") writer.write_raw_data(data) writer.commit() ``` + +## 读取MindRecord数据集 + +下面将简单演示如何通过`MindDataset`读取MindRecord数据集。 + +1. 导入读取类`MindDataset`。 + + ```python + import mindspore.dataset as ds + ``` + +2. 使用`MindDataset`读取MindRecord数据集。 + + ```python + data_set = ds.MindDataset(dataset_file="test.mindrecord0") # Read full data set + count = 0 + for item in data_set.create_dict_iterator(output_numpy=True): + print("sample: {}".format(item)) + count += 1 + print("Got {} samples".format(count)) + ``` diff --git a/tutorials/source_zh_cn/advanced_use/debugger.md b/tutorials/source_zh_cn/advanced_use/debugger.md new file mode 100644 index 0000000000000000000000000000000000000000..f8ef41607e6835d5ca5f68057d69a3873dc6fc79 --- /dev/null +++ b/tutorials/source_zh_cn/advanced_use/debugger.md @@ -0,0 +1,161 @@ +# 使用调试器 + +`Linux` `Ascend` `GPU` `静态图` `模型调试` `中级` `高级` + + + +- [使用调试器](#使用调试器) + - [概述](#概述) + - [操作流程](#操作流程) + - [调试器环境准备](#调试器环境准备) + - [调试器页面介绍](#调试器页面介绍) + - [计算图](#计算图) + - [节点列表](#节点列表) + - [节点信息](#节点信息) + - [条件断点](#条件断点) + - [训练控制](#训练控制) + - [使用调试器进行调试](#使用调试器进行调试) + - [注意事项](#注意事项) + + + + + +## 概述 +MindSpore调试器是为图模式训练提供的调试工具,可以用来查看并分析计算图节点的中间结果。 + +在MindSpore图模式的训练过程中,用户无法从Python层获取到计算图中间节点的结果,使得训练调试变得很困难。使用MindSpore调试器,用户可以: + +- 在MindInsight调试器界面结合计算图,查看图节点的输出结果; +- 设置条件断点,监测训练异常情况(比如INF),在异常发生时追踪错误原因; +- 查看权重等参数的变化情况。 + +## 操作流程 + +- 以调试模式启动MindInsight,配置相关环境变量; +- 训练开始,在MindInsight调试器界面设置条件断点; +- 在MindInsight调试器界面分析训练执行情况。 + +## 调试器环境准备 +开始训练前,请先安装MindInsight,并以调试模式启动。调试模式下,MindSpore会将训练信息发送给MindInsight调试服务,用户可在MindInsight调试器界面进行查看和分析。 + +MindInsight调试服务启动命令: + +```shell script +mindinsight start --port {PORT} --enable-debugger True --debugger-port {DEBUGGER_PORT} +``` + +参数含义如下: + +|参数名|属性|功能描述|参数类型|默认值|取值范围| +|---|---|---|---|---|---| +|`--port {PORT}`|可选|指定Web可视化服务端口。|Integer|8080|1~65535| +|`--enable-debugger {ENABLE_DEBUGGER}`|必选|取值为True, 开启MindInsight侧调试器|Boolean|False|True/False| +|`--debugger-port {DEBUGGER_PORT}`|可选|指定调试服务端口。|Integer|50051|1~65535| + +更多启动参数请参考[MindInsight相关命令](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/mindinsight_commands.html)。 + +然后,设置环境变量`export ENABLE_MS_DEBUGGER=1`,将训练指定为调试模式,并设置训练要连接的调试服务和端口: +`export MS_DEBUGGER_HOST=127.0.0.1`(该服务地址需与MindInsight host一致); +`export MS_DEBUGGER_PORT=50051`(该端口需与MindInsight debugger-port一致)。 + +如果用户设备的内存空间有限,可在运行训练前开启内存复用模式,以降低运行内存占用:`export MS_DEBUGGER_PARTIAL_MEM=1`。 + +此外,训练时不要使用数据下沉模式(需设置`model.train`中的`dataset_sink_mode`为`False`),以保证调试器可以获取每个step的训练信息。 + +## 调试器页面介绍 + +调试器环境准备完成后,开始训练。在训练正式执行前,可以在MindInsight调试器界面查看计算图等训练元信息,调试器页面布局由如下部分组成。 + +![debugger_init_page](./images/debugger_init_page.png) + +图1: 调试器初始页面 + +### 计算图 +调试器将优化后的最终执行图展示在UI的中上位置,用户可以双击打开图上的方框 (代表一个`scope`) 将计算图进一步展开,查看`scope`中的节点信息。 + +面板的最上方展示了`训练端地址`(训练脚本所在进程的地址和端口),训练使用的`卡号`, 训练的`当前轮次`等元信息。 + +在GPU环境下,训练执行图面板的右上角会有`当前节点`和`下一个节点`两个按钮,分别用于回到当前执行节点、和执行下一个节点。 +用户可以方便地执行单个节点。 + +### 节点列表 + +如图1所示,在UI的左侧会展示计算图`节点列表`,可以将计算图中的节点按`scope`分层展开。点击`节点列表`中的节点,计算图也会联动展开到选中节点的位置。 +用户也可以使用`节点列表`上方的搜索框按名称进行节点的搜索。 + +### 节点信息 + +![debugger_tensor_info](./images/debugger_tensor_info.png) + +图2: 计算图节点信息查看 + +点击计算图上的节点后,可以在UI下方查看该节点的详细信息,如图2所示。该部分展示了节点的输出和输入,训练的`轮次`数目,`张量`的`类型`、`形状`和`数值`等信息。 + +在GPU环境下,选中图上的某个可执行节点后,单击鼠标右键,可选择`运行到该节点`,代表将训练脚本运行到被选中节点(不超过一个`轮次`)。选中后单击鼠标左键,训练脚本运行到该节点后会暂停。 + +![debugger_tensor_value](./images/debugger_tensor_value.png) + +图3: 查看`张量`值 + +一些`张量`的维度过多,无法直接在主页进行展示。用户可以点击对应的查看按钮,在弹出的TAB页中查看`张量`值的详细信息,如图3所示。 + +![debugger_tensor_compare](./images/debugger_tensor_compare.png) + +图4:查看上一步对比结果 + +此外,参数类型的节点输出可以和其自身在上一轮次的输出结果进行对比,点击`上一步对比`按钮即可进入到对比界面,如图4所示。 + +### 条件断点 + +![debugger_set_watch_point](./images/debugger_set_watch_point.png) + +图5: 条件断点设置 + +为了方便地对节点的计算结果进行监测分析,用户可以给计算图中的节点设置条件断点。图5展示了条件断点的设置方法,用户首先点击监测点列表右上角的 `+` 按钮新增条件断点并监控条件,比如INF, +然后在节点列表选择要监控的节点(勾选节点前的方框)。训练时,调试器会对这些监控节点的输出进行实时分析,一旦监控条件触发,训练暂停,用户可在UI上查看触发的条件断点信息。 + +![debugger_watch_point_hit](./images/debugger_watch_point_hit.png) + +图6: 查看触发的条件断点 + +图6展示了条件断点触发后的展示页面,该页面和`节点列表`所在位置相同。触发的节点以及监控条件会按照节点的执行序排列,用户点击某一行,会在计算图中跳转到对应节点,可以进一步查看节点信息分析INF等异常结果出现的原因。 + +### 训练控制 + +监测点设置面板的下方是训练控制面板,该面板展示了调试器的训练控制功能,有`继续`、`暂停`、`结束`、`确定`四个按钮。 +- `确定`代表训练向前执行若干个`轮次`,需要用户在上方的输入框内指定执行的`轮次`数目,直到条件断点触发、或`轮次`执行完毕后暂停; +- `继续`代表训练一直执行,直到条件断点触发后暂停、或运行至训练结束; +- `暂停`代表训练暂停; +- `结束`代表终止训练。 + +## 使用调试器进行调试 + +1. 在调试器环境准备完成后,打开调试器界面,如下图所示: + + ![debugger_waiting](./images/debugger_waiting.png) + + 图7: 调试器等待训练连接 + + 此时,调试器处于等待训练启动和连接的状态。 + +2. 运行训练脚本,稍后可以看到计算图显示在调试器界面,见图1。 + +3. 设置条件断点,见图5。 + + 图5中,选中检测条件,并勾选了部分节点,代表监控这些节点在计算过程是否存在满足监控条件的输出。 + 设置完条件断点后,可以在控制面板选择设置轮次点击`确定`或者`继续`继续训练。 + +4. 条件断点触发,见图6。 + + 条件断点触发后,用户查看对应的节点信息,找出异常原因后修改脚本,解掉bug。 + +## 注意事项 + +- 使用调试器时,会对训练性能产生一定影响。 +- 一个调试服务目前只能够连接一个训练进程。 +- 调试器暂不支持分布式训练场景。 +- 调试器暂不支持多图场景。 +- 设置的监测点数目过多时,可能会出现系统内存不足(Out-of-Memory)的异常。 +- 在D芯片环境下,调试器暂时无法获取神经网络的初始化参数。 +- 在GPU场景下,只有满足条件的参数节点可以与自身的上一步结果作对比:使用`下一个节点`执行过的节点、使用`运行到该节点`时选中的节点、作为`监测点`输入的参数节点。其他情况均无法使用`上一步对比`功能。 diff --git a/tutorials/source_zh_cn/advanced_use/deep_probability_program.md b/tutorials/source_zh_cn/advanced_use/deep_probability_program.md index 13e92b1452da668a0c9f9632c0bcbf307681ff9c..c3db5bcfda3d6dda551efe26ddcc390651957479 100644 --- a/tutorials/source_zh_cn/advanced_use/deep_probability_program.md +++ b/tutorials/source_zh_cn/advanced_use/deep_probability_program.md @@ -5,41 +5,52 @@ - [深度概率编程](#深度概率编程) - [概述](#概述) - - [贝叶斯神经网络](#贝叶斯神经网络) + - [使用贝叶斯神经网络](#使用贝叶斯神经网络) - [处理数据集](#处理数据集) + - [定义贝叶斯神经网络](#定义贝叶斯神经网络) - [定义损失函数和优化器](#定义损失函数和优化器) - [训练网络](#训练网络) - - [变分推断](#变分推断) + - [使用变分自编码器](#使用变分自编码器) - [定义变分自编码器](#定义变分自编码器) - - [定义损失函数和优化器](#定义损失函数和优化器) + - [定义损失函数和优化器](#定义损失函数和优化器-1) - [处理数据](#处理数据) - - [训练网络](#训练网络) - - [DNN一键转换成BNN](#DNN一键转换成BNN) - - [定义DNN模型](#定义DNN模型) - - [定义损失函数和优化器](#定义损失函数和优化器) - - [功能一:转换整个模型](#功能一:转换整个模型) - - [功能二:转换指定类型的层](#功能二:转换指定类型的层) - - [不确定性估计](#不确定性估计) + - [训练网络](#训练网络-1) + - [生成新样本或重构输入样本](#生成新样本或重构输入样本) + - [DNN一键转换成BNN](#dnn一键转换成bnn) + - [定义DNN模型](#定义dnn模型) + - [定义损失函数和优化器](#定义损失函数和优化器-2) + - [实例化TransformToBNN](#实例化transformtobnn) + - [实现功能一:转换整个模型](#实现功能一转换整个模型) + - [实现功能二:转换指定类型的层](#实现功能二转换指定类型的层) + - [使用不确定性估计工具箱](#使用不确定性估计工具箱) -## 概述 -MindSpore深度概率编程(MindSpore Deep Probabilistic Programming, MDP)的目标是将深度学习和贝叶斯学习结合,并能面向不同的开发者。具体来说,对于专业的贝叶斯学习用户,提供概率采样、推理算法和模型构建库;另一方面,为不熟悉贝叶斯深度学习的用户提供了高级的API,从而不用更改深度学习编程逻辑,即可利用贝叶斯模型。 - -本章将详细介绍深度概率编程在MindSpore上的应用。 + -### 贝叶斯神经网络 -本例子利用贝叶斯神经网络实现一个简单的图片分类功能,整体流程如下: -1. 处理MNIST数据集。 -2. 定义贝叶斯LeNet网络。 -3. 定义损失函数和优化器。 +## 概述 +深度学习模型具有强大的拟合能力,而贝叶斯理论具有很好的可解释能力。MindSpore深度概率编程(MindSpore Deep Probabilistic Programming, MDP)将深度学习和贝叶斯学习结合,通过设置网络权重为分布、引入隐空间分布等,可以对分布进行采样前向传播,由此引入了不确定性,从而增强了模型的鲁棒性和可解释性。MDP不仅包含通用、专业的概率学习编程语言,适用于“专业”用户,而且支持使用开发深度学习模型的逻辑进行概率编程,让初学者轻松上手;此外,还提供深度概率学习的工具箱,拓展贝叶斯应用功能。 + +本章将详细介绍深度概率编程在MindSpore上的应用。在动手进行实践之前,确保,你已经正确安装了MindSpore 0.7.0-beta及其以上版本。本章的具体内容如下: +1. 介绍如何使用[bnn_layers模块](https://gitee.com/mindspore/mindspore/tree/master/mindspore/nn/probability/bnn_layers)实现贝叶斯神经网(Bayesian Neural Network, BNN); +2. 介绍如何使用[variational模块](https://gitee.com/mindspore/mindspore/tree/master/mindspore/nn/probability/infer/variational)和[dpn模块](https://gitee.com/mindspore/mindspore/tree/master/mindspore/nn/probability/dpn)实现变分自编码器(Variational AutoEncoder, VAE); +3. 介绍如何使用[transforms模块](https://gitee.com/mindspore/mindspore/tree/master/mindspore/nn/probability/transforms)实现DNN(Deep Neural Network, DNN)一键转BNN; +4. 介绍如何使用[toolbox模块](https://gitee.com/mindspore/mindspore/tree/blob/mindspore/nn/probability/toolbox/uncertainty_evaluation.py)实现不确定性估计。 + +## 使用贝叶斯神经网络 +贝叶斯神经网络是由概率模型和神经网络组成的基本模型,它的权重不再是一个确定的值,而是一个分布。本例介绍了如何使用MDP中的bnn_layers模块实现贝叶斯神经网络,并利用贝叶斯神经网络实现一个简单的图片分类功能,整体流程如下: +1. 处理MNIST数据集; +2. 定义贝叶斯LeNet网络; +3. 定义损失函数和优化器; 4. 加载数据集并进行训练。 -#### 处理数据集 +> 本例面向GPU或Ascend 910 AI处理器平台,你可以在这里下载完整的样例代码: + +### 处理数据集 本例子使用的是MNIST数据集,数据处理过程与教程中的[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)一致。 -#### 定义贝叶斯神经网络 -本例子使用的是贝叶斯LeNet。利用bnn_layers构建贝叶斯神经网络的方法与构建普通的神经网络相同。值得注意的是,bnn_layers和普通的神经网络层可以互相组合。 +### 定义贝叶斯神经网络 +本例使用的是Bayesian LeNet。利用bnn_layers模块构建贝叶斯神经网络的方法与构建普通的神经网络相同。值得注意的是,`bnn_layers`和普通的神经网络层可以互相组合。 ``` import mindspore.nn as nn @@ -87,10 +98,13 @@ class BNNLeNet5(nn.Cell): x = self.fc3(x) return x ``` -#### 定义损失函数和优化器 +### 定义损失函数和优化器 接下来需要定义损失函数(Loss)和优化器(Optimizer)。损失函数是深度学习的训练目标,也叫目标函数,可以理解为神经网络的输出(Logits)和标签(Labels)之间的距离,是一个标量数据。 + 常见的损失函数包括均方误差、L2损失、Hinge损失、交叉熵等等。图像分类应用通常采用交叉熵损失(CrossEntropy)。 -优化器用于神经网络求解(训练)。由于神经网络参数规模庞大,无法直接求解,因而深度学习中采用随机梯度下降算法(SGD)及其改进算法进行求解。MindSpore封装了常见的优化器,如SGD、Adam、Momemtum等等。本例采用Adam优化器,通常需要设定两个参数,学习率(learnin _rate)和权重衰减项(weight decay)。 + +优化器用于神经网络求解(训练)。由于神经网络参数规模庞大,无法直接求解,因而深度学习中采用随机梯度下降算法(SGD)及其改进算法进行求解。MindSpore封装了常见的优化器,如`SGD`、`Adam`、`Momemtum`等等。本例采用`Adam`优化器,通常需要设定两个参数,学习率(`learning_rate`)和权重衰减项(`weight_decay`)。 + MindSpore中定义损失函数和优化器的代码样例如下: ``` @@ -101,7 +115,7 @@ criterion = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") optimizer = AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001) ``` -#### 训练网络 +### 训练网络 贝叶斯神经网络的训练过程与DNN基本相同,唯一不同的是将`WithLossCell`替换为适用于BNN的`WithBNNLossCell`。除了`backbone`和`loss_fn`两个参数之外,`WithBNNLossCell`增加了`dnn_factor`和`bnn_factor`两个参数。`dnn_factor`是由损失函数计算得到的网络整体损失的系数,`bnn_factor`是每个贝叶斯层的KL散度的系数,这两个参数是用来平衡网络整体损失和贝叶斯层的KL散度的,防止KL散度的值过大掩盖了网络整体损失。 ``` @@ -129,8 +143,8 @@ def train_model(train_net, net, dataset): accs = [] loss_sum = 0 for _, data in enumerate(dataset.create_dict_iterator()): - train_x = Tensor(data['image'].astype(np.float32)) - label = Tensor(data['label'].astype(np.int32)) + train_x = Tensor(data['image'].asnumpy().astype(np.float32)) + label = Tensor(data['label'].asnumpy().astype(np.int32)) loss = train_net(train_x, label) output = net(train_x) log_output = P.LogSoftmax(axis=1)(output) @@ -146,8 +160,8 @@ def train_model(train_net, net, dataset): def validate_model(net, dataset): accs = [] for _, data in enumerate(dataset.create_dict_iterator()): - train_x = Tensor(data['image'].astype(np.float32)) - label = Tensor(data['label'].astype(np.int32)) + train_x = Tensor(data['image'].asnumpy().astype(np.float32)) + label = Tensor(data['label'].asnumpy().astype(np.int32)) output = net(train_x) log_output = P.LogSoftmax(axis=1)(output) acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy()) @@ -157,9 +171,16 @@ def validate_model(net, dataset): return acc_mean ``` -### 变分推断 -#### 定义变分自编码器 -我们只需要自定义编码器和解码器,编码器和解码器都是神经网络。 +## 使用变分自编码器 +接下来介绍如何使用MDP中的variational模块和dpn模块实现变分自编码器。变分自编码器是经典的应用了变分推断的深度概率模型,用来学习潜在变量的表示,通过该模型,不仅可以压缩输入数据,还可以生成该类型的新图像。本例的整体流程如下: +1. 定义变分自编码器; +2. 定义损失函数和优化器; +3. 处理数据; +4. 训练网络; +5. 生成新样本或重构输入样本。 +> 本例面向GPU或Ascend 910 AI处理器平台,你可以在这里下载完整的样例代码: +### 定义变分自编码器 +使用dpn模块来构造变分自编码器尤为简单,你只需要自定义编码器和解码器(DNN模型),调用`VAE`接口即可。 ``` class Encoder(nn.Cell): @@ -198,7 +219,7 @@ decoder = Decoder() vae = VAE(encoder, decoder, hidden_size=400, latent_size=20) ``` ### 定义损失函数和优化器 -接下来需要定义损失函数(Loss)和优化器(Optimizer)。本例使用的损失函数是ELBO,ELBO是变分推断专用的损失函数;本例使用的优化器是Adam。 +接下来需要定义损失函数(Loss)和优化器(Optimizer)。本例使用的损失函数是`ELBO`,`ELBO`是变分推断专用的损失函数;本例使用的优化器是`Adam`。 MindSpore中定义损失函数和优化器的代码样例如下: ``` @@ -210,11 +231,11 @@ optimizer = nn.Adam(params=vae.trainable_params(), learning_rate=0.001) net_with_loss = nn.WithLossCell(vae, net_loss) ``` -#### 处理数据 +### 处理数据 本例使用的是MNIST数据集,数据处理过程与教程中的[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)一致。 -#### 训练网络 -使用`SVI`接口对VAE网络进行训练。 +### 训练网络 +使用variational模块中的`SVI`接口对VAE网络进行训练。 ``` from mindspore.nn.probability.infer import SVI @@ -224,20 +245,25 @@ vae = vi.run(train_dataset=ds_train, epochs=10) trained_loss = vi.get_train_loss() ``` 通过`vi.run`可以得到训练好的网络,使用`vi.get_train_loss`可以得到训练之后的损失。 -#### 生成新样本或重构输入样本 +### 生成新样本或重构输入样本 利用训练好的VAE网络,我们可以生成新的样本或重构输入样本。 ``` IMAGE_SHAPE = (-1, 1, 32, 32) generated_sample = vae.generate_sample(64, IMAGE_SHAPE) for sample in ds_train.create_dict_iterator(): - sample_x = Tensor(sample['image'], dtype=mstype.float32) + sample_x = Tensor(sample['image'].asnumpy(), dtype=mstype.float32) reconstructed_sample = vae.reconstruct_sample(sample_x) ``` -### DNN一键转换成BNN -对于不熟悉贝叶斯模型的DNN研究人员,MDP提供了高级API`TransformToBNN`,支持DNN模型一键转换成BNN模型。 -#### 定义DNN模型 +## DNN一键转换成BNN +对于不熟悉贝叶斯模型的DNN研究人员,MDP提供了高级API`TransformToBNN`,支持DNN模型一键转换成BNN模型。本例将会介绍如何使用transforms模块中的`TransformToBNN`API实现DNN一键转换成BNN,整体流程如下: +1. 定义DNN模型; +2. 定义损失函数和优化器; +3. 实现功能一:转换整个模型; +4. 实现功能二:转换指定类型的层。 +> 本例面向GPU或Ascend 910 AI处理器平台,你可以在这里下载完整的样例代码: +### 定义DNN模型 本例使用的DNN模型是LeNet。 ``` @@ -306,8 +332,22 @@ class LeNet5(nn.Cell): x = self.fc3(x) return x ``` -#### 定义损失函数和优化器 -接下来需要定义损失函数(Loss)和优化器(Optimizer)。本例使用交叉熵损失作为损失函数,Adam作为优化器。 +LeNet的网络结构如下: + +``` +LeNet5 + (conv1) Conv2dinput_channels=1, output_channels=6, kernel_size=(5, 5),stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, has_bias=False + (conv2) Conv2dinput_channels=6, output_channels=16, kernel_size=(5, 5),stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, has_bias=False + (fc1) Densein_channels=400, out_channels=120, weight=Parameter (name=fc1.weight), has_bias=True, bias=Parameter (name=fc1.bias) + (fc2) Densein_channels=120, out_channels=84, weight=Parameter (name=fc2.weight), has_bias=True, bias=Parameter (name=fc2.bias) + (fc3) Densein_channels=84, out_channels=10, weight=Parameter (name=fc3.weight), has_bias=True, bias=Parameter (name=fc3.bias) + (relu) ReLU + (max_pool2d) MaxPool2dkernel_size=2, stride=2, pad_mode=VALID + (flatten) Flatten +``` + +### 定义损失函数和优化器 +接下来需要定义损失函数(Loss)和优化器(Optimizer)。本例使用交叉熵损失作为损失函数,`Adam`作为优化器。 ``` network = LeNet5() @@ -321,7 +361,7 @@ optimizer = AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0 net_with_loss = WithLossCell(network, criterion) train_network = TrainOneStepCell(net_with_loss, optimizer) ``` -#### 实例化TransformToBNN +### 实例化TransformToBNN `TransformToBNN`的`__init__`函数定义如下: ``` @@ -343,7 +383,7 @@ from mindspore.nn.probability import transforms bnn_transformer = transforms.TransformToBNN(train_network, 60000, 0.000001) ``` -#### 功能一:转换整个模型 +### 实现功能一:转换整个模型 `transform_to_bnn_model`方法可以将整个DNN模型转换为BNN模型。其定义如下: ``` @@ -374,12 +414,86 @@ bnn_transformer = transforms.TransformToBNN(train_network, 60000, 0.000001) """ ``` 参数`get_dense_args`指定从DNN模型的全连接层中获取哪些参数,`get_conv_args`指定从DNN模型的卷积层中获取哪些参数,参数`add_dense_args`和`add_conv_args`分别指定了要为BNN层指定哪些新的参数值。需要注意的是,`add_dense_args`中的参数不能与`get_dense_args`重复,`add_conv_args`和`get_conv_args`也是如此。 + 在MindSpore中将整个DNN模型转换成BNN模型的代码如下: ``` train_bnn_network = bnn_transformer.transform_to_bnn_model() ``` -#### 功能二:转换指定类型的层 +整个模型转换后的结构如下: + +``` +LeNet5 + (conv1) ConvReparam + in_channels=1, out_channels=6, kernel_size=(5, 5), stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, weight_mean=Parameter (name=conv1.weight_posterior.mean), weight_std=Parameter (name=conv1.weight_posterior.untransformed_std), has_bias=False + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (conv2) ConvReparam + in_channels=6, out_channels=16, kernel_size=(5, 5), stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, weight_mean=Parameter (name=conv2.weight_posterior.mean), weight_std=Parameter (name=conv2.weight_posterior.untransformed_std), has_bias=False + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (fc1) DenseReparam + in_channels=400, out_channels=120, weight_mean=Parameter (name=fc1.weight_posterior.mean), weight_std=Parameter (name=fc1.weight_posterior.untransformed_std), has_bias=True, bias_mean=Parameter (name=fc1.bias_posterior.mean), bias_std=Parameter (name=fc1.bias_posterior.untransformed_std) + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + (bias_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (bias_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (fc2) DenseReparam + in_channels=120, out_channels=84, weight_mean=Parameter (name=fc2.weight_posterior.mean), weight_std=Parameter (name=fc2.weight_posterior.untransformed_std), has_bias=True, bias_mean=Parameter (name=fc2.bias_posterior.mean), bias_std=Parameter (name=fc2.bias_posterior.untransformed_std) + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + (bias_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (bias_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (fc3) DenseReparam + in_channels=84, out_channels=10, weight_mean=Parameter (name=fc3.weight_posterior.mean), weight_std=Parameter (name=fc3.weight_posterior.untransformed_std), has_bias=True, bias_mean=Parameter (name=fc3.bias_posterior.mean), bias_std=Parameter (name=fc3.bias_posterior.untransformed_std) + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + (bias_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (bias_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (relu) ReLU + (max_pool2d) MaxPool2dkernel_size=2, stride=2, pad_mode=VALID + (flatten) Flatten +``` +可以看到,整个LeNet网络中的卷积层和全连接层都转变成了相应的贝叶斯层。 + +### 实现功能二:转换指定类型的层 `transform_to_bnn_layer`方法可以将DNN模型中指定类型的层(nn.Dense或者nn.Conv2d)转换为对应的贝叶斯层。其定义如下: ``` @@ -401,9 +515,77 @@ train_bnn_network = bnn_transformer.transform_to_bnn_model() ``` 参数`dnn_layer`指定将哪个类型的DNN层转换成BNN层,`bnn_layer`指定DNN层将转换成哪个类型的BNN层,`get_args`和`add_args`分别指定从DNN层中获取哪些参数和要为BNN层的哪些参数重新赋值。 -### 不确定性估计 -不确定性估计工具箱基于MindSpore Deep probability Programming (MDP),适用于主流的深度学习模型,如回归、分类、目标检测等。在推理阶段,利用不确定性估计工具箱,开发人员只需通过训练模型和训练数据集,指定需要估计的任务和样本,即可得到任意不确定性(aleatoric uncertainty)和认知不确定性(epistemic uncertainty)。基于不确定性信息,开发人员可以更好地理解模型和数据集。 -以分类任务为例,本例中使用的模型是LeNet,数据集为MNist,数据处理过程与教程中的[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)一致。为了评估测试示例的不确定性,使用工具箱的方法如下: +在MindSpore中将DNN模型中的Dense层转换成相应贝叶斯层`DenseReparam`的代码如下: + +``` +train_bnn_network = bnn_transformer.transform_to_bnn_layer(nn.Dense, bnn_layers.DenseReparam) +``` +转换后网络的结构如下: + +``` +LeNet5 + (conv1) Conv2dinput_channels=1, output_channels=6, kernel_size=(5, 5),stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, has_bias=False + (conv2) Conv2dinput_channels=6, output_channels=16, kernel_size=(5, 5),stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, has_bias=False + (fc1) DenseReparam + in_channels=400, out_channels=120, weight_mean=Parameter (name=fc1.weight_posterior.mean), weight_std=Parameter (name=fc1.weight_posterior.untransformed_std), has_bias=True, bias_mean=Parameter (name=fc1.bias_posterior.mean), bias_std=Parameter (name=fc1.bias_posterior.untransformed_std) + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + (bias_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (bias_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (fc2) DenseReparam + in_channels=120, out_channels=84, weight_mean=Parameter (name=fc2.weight_posterior.mean), weight_std=Parameter (name=fc2.weight_posterior.untransformed_std), has_bias=True, bias_mean=Parameter (name=fc2.bias_posterior.mean), bias_std=Parameter (name=fc2.bias_posterior.untransformed_std) + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + (bias_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (bias_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (fc3) DenseReparam + in_channels=84, out_channels=10, weight_mean=Parameter (name=fc3.weight_posterior.mean), weight_std=Parameter (name=fc3.weight_posterior.untransformed_std), has_bias=True, bias_mean=Parameter (name=fc3.bias_posterior.mean), bias_std=Parameter (name=fc3.bias_posterior.untransformed_std) + (weight_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (weight_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + (bias_prior) NormalPrior + (normal) Normalmean = 0.0, standard deviation = 0.1 + + (bias_posterior) NormalPosterior + (normal) Normalbatch_shape = None + + + (relu) ReLU + (max_pool2d) MaxPool2dkernel_size=2, stride=2, pad_mode=VALID + (flatten) Flatten +``` +可以看到,LeNet网络中的卷积层保持不变,全连接层变成了对应的贝叶斯层`DenseReparam`。 + +## 使用不确定性估计工具箱 +贝叶斯神经网络的优势之一就是可以获取不确定性,MDP在上层提供了不确定性估计的工具箱,用户可以很方便地使用该工具箱计算不确定性。不确定性意味着深度学习模型对预测结果的不确定程度。目前,大多数深度学习算法只能给出预测结果,而不能判断预测结果的可靠性。不确定性主要有两种类型:偶然不确定性和认知不确定性。 +- 偶然不确定性(Aleatoric Uncertainty):描述数据中的内在噪声,即无法避免的误差,这个现象不能通过增加采样数据来削弱。 +- 认知不确定性(Epistemic Uncertainty):模型自身对输入数据的估计可能因为训练不佳、训练数据不够等原因而不准确,可以通过增加训练数据等方式来缓解。 + +不确定性估计工具箱,适用于主流的深度学习模型,如回归、分类等。在推理阶段,利用不确定性估计工具箱,开发人员只需通过训练模型和训练数据集,指定需要估计的任务和样本,即可得到任意不确定性和认知不确定性。基于不确定性信息,开发人员可以更好地理解模型和数据集。 +> 本例面向GPU或Ascend 910 AI处理器平台,你可以在这里下载完整的样例代码: + +以分类任务为例,本例中使用的模型是LeNet,数据集为MNIST,数据处理过程与教程中的[实现一个图片分类应用](https://www.mindspore.cn/tutorial/zh-CN/master/quick_start/quick_start.html)一致。为了评估测试示例的不确定性,使用工具箱的方法如下: ``` from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation @@ -424,9 +606,8 @@ evaluation = UncertaintyEvaluation(model=network, ale_uncer_model_path=None, save_model=False) for eval_data in ds_eval.create_dict_iterator(): - eval_data = Tensor(eval_data['image'], mstype.float32) + eval_data = Tensor(eval_data['image'].asnumpy(), mstype.float32) epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data) aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data) ``` - diff --git a/tutorials/source_zh_cn/advanced_use/differential_privacy.md b/tutorials/source_zh_cn/advanced_use/differential_privacy.md index 6813d7f001952205689e75654ab29a043128747d..5769737bbb8b3fcdbf3387398d9e84b93cefd71c 100644 --- a/tutorials/source_zh_cn/advanced_use/differential_privacy.md +++ b/tutorials/source_zh_cn/advanced_use/differential_privacy.md @@ -45,7 +45,7 @@ MindArmour的差分隐私模块Differential-Privacy,实现了差分隐私优 这里以LeNet模型,MNIST 数据集为例,说明如何在MindSpore上使用差分隐私优化器训练神经网络模型。 -> 本例面向Ascend 910 AI处理器,你可以在这里下载完整的样例代码: +> 本例面向Ascend 910 AI处理器,你可以在这里下载完整的样例代码: ## 实现阶段 @@ -67,16 +67,14 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net import mindspore.dataset as ds import mindspore.dataset.vision.c_transforms as CV import mindspore.dataset.transforms.c_transforms as C -from mindspore.dataset.vision.import Inter +from mindspore.dataset.vision import Inter import mindspore.common.dtype as mstype -from mindarmour.diff_privacy import DPModel -from mindarmour.diff_privacy import NoiseMechanismsFactory -from mindarmour.diff_privacy import ClipMechanismsFactory -from mindarmour.diff_privacy import PrivacyMonitorFactory +from mindarmour.privacy.diff_privacy import DPModel +from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory +from mindarmour.privacy.diff_privacy import ClipMechanismsFactory +from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory from mindarmour.utils.logger import LogUtil -from lenet5_net import LeNet5 -from lenet5_config import mnist_cfg as cfg LOGGER = LogUtil.get_instance() LOGGER.set_level('INFO') @@ -85,7 +83,7 @@ TAG = 'Lenet5_train' ### 参数配置 -1. 设置运行环境、数据集路径、模型训练参数、checkpoint存储参数、差分隐私参数,`data_path`数据路径替换成你的数据集所在路径。更多配置可以参考。 +1. 设置运行环境、数据集路径、模型训练参数、checkpoint存储参数、差分隐私参数,`data_path`数据路径替换成你的数据集所在路径。更多配置可以参考。 ```python cfg = edict({ @@ -99,7 +97,7 @@ TAG = 'Lenet5_train' 'save_checkpoint_steps': 234, # the interval steps for saving checkpoint file of the model 'keep_checkpoint_max': 10, # the maximum number of checkpoint files would be saved 'device_target': 'Ascend', # device used - 'data_path': './MNIST_unzip', # the path of training and testing data set + 'data_path': '../../common/dataset/MNIST', # the path of training and testing data set 'dataset_sink_mode': False, # whether deliver all training data to device one time 'micro_batches': 32, # the number of small batches split from an original batch 'norm_bound': 1.0, # the clip bound of the gradients of model's training parameters @@ -151,7 +149,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, # apply map operations on images if not sparse: one_hot_enco = C.OneHot(10) - ds1 = ds1.map(input_columns="label", operations=one_hot_enco, + ds1 = ds1.map(operations=one_hot_enco, input_columns="label", num_parallel_workers=num_parallel_workers) type_cast_op = C.TypeCast(mstype.float32) ds1 = ds1.map(operations=type_cast_op, input_columns="label", diff --git a/tutorials/source_zh_cn/advanced_use/distributed_training_ascend.md b/tutorials/source_zh_cn/advanced_use/distributed_training_ascend.md index 570a8ea4c42d2d9dc260f2d4afe0cbb491424f4a..064ac129a809bca1e2305566d3ac94b03ea81d8c 100644 --- a/tutorials/source_zh_cn/advanced_use/distributed_training_ascend.md +++ b/tutorials/source_zh_cn/advanced_use/distributed_training_ascend.md @@ -17,6 +17,11 @@ - [定义优化器](#定义优化器) - [训练网络](#训练网络) - [运行脚本](#运行脚本) + - [分布式训练模型参数保存和加载](#分布式训练模型参数保存和加载) + - [自动并行模式](#自动并行模式) + - [数据并行模式](#数据并行模式) + - [半自动并行模式](#半自动并行模式) + - [手动混合并行模式](#手动混合并行模式) @@ -120,12 +125,12 @@ def create_dataset(data_path, repeat_num=1, batch_size=32, rank_id=0, rank_size= resize_width = 224 rescale = 1.0 / 255.0 shift = 0.0 - + # get rank_id and rank_size rank_id = get_rank() rank_size = get_group_size() data_set = ds.Cifar10Dataset(data_path, num_shards=rank_size, shard_id=rank_id) - + # define map operations random_crop_op = vision.RandomCrop((32, 32), (4, 4, 4, 4)) random_horizontal_op = vision.RandomHorizontalFlip() @@ -139,7 +144,7 @@ def create_dataset(data_path, repeat_num=1, batch_size=32, rank_id=0, rank_size= c_trans += [resize_op, rescale_op, normalize_op, changeswap_op] # apply map operations on images - data_set = data_set.map(operations=type_cast_op, operations=type_cast_op, input_columns="label") + data_set = data_set.map(operations=type_cast_op, input_columns="label") data_set = data_set.map(operations=c_trans, input_columns="image") # apply shuffle operations @@ -193,7 +198,7 @@ class SoftmaxCrossEntropyExpand(nn.Cell): self.sparse = sparse self.max = P.ReduceMax(keep_dims=True) self.sub = P.Sub() - + def construct(self, logit, label): logit_max = self.max(logit, -1) exp = self.exp(self.sub(logit, logit_max)) @@ -338,3 +343,190 @@ epoch: 8 step: 156, loss is 1.2943741 epoch: 9 step: 156, loss is 1.2316195 epoch: 10 step: 156, loss is 1.1533381 ``` + +## 分布式训练模型参数保存和加载 + +在MindSpore中,支持四种分布式并行训练模式,即自动并行模式(Auto Parallel)、数据并行模式(Data Parallel)、半自动并行模式(Semi Auto Parallel)、手动混合并行模式(Hybrid Parallel),下面分别介绍四种分布式并行训练模式下模型的保存和加载。分布式训练进行模型参数的保存之前,需要先按照本教程配置分布式环境变量和集合通信库。 + +### 自动并行模式 + +自动并行模式(Auto Parallel)下模型参数的保存和加载非常方便,只需在本教程训练网络步骤中的`test_train_cifar`方法中添加配置`CheckpointConfig`和`ModelCheckpoint`,即可实现模型参数的保存,具体代码如下: + +```python +def test_train_cifar(epoch_size=10): + context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL, gradients_mean=True) + loss_cb = LossMonitor() + dataset = create_dataset(data_path) + batch_size = 32 + num_classes = 10 + net = resnet50(batch_size, num_classes) + loss = SoftmaxCrossEntropyExpand(sparse=True) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) + save_path = '...' + ckpt_config = CheckpointConfig() + ckpt_callback = ModelCheckpoint(prefix='auto_parallel', directory=save_path, config=ckpt_config) + model = Model(net, loss_fn=loss, optimizer=opt) + model.train(epoch_size, dataset, callbacks=[loss_cb, ckpt_callback], dataset_sink_mode=True) +``` + +保存好checkpoint文件后,用户可以很容易加载模型参数进行推理或再训练场景,如用于再训练场景可使用如下代码: + +```python +net = Net() +param_dict = load_checkpoint(save_path) +load_param_into_net(net, param_dict) +``` + +checkpoint配置策略和保存方法可以参考[模型参数的保存和加载](https://www.mindspore.cn/tutorial/zh-CN/master/use/saving_and_loading_model_parameters.html#checkpoint)。 + +### 数据并行模式 + +数据并行模式(Data Parallel)下checkpoint的使用方法如下,首先定义一个网络模型: + +```python +from mindspore.train import Model +from context import set_auto_parallel_context, reset_auto_parallel_context +from mindspore.nn import Momentum, Cell, Flatten, ReLU +from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor +from mindspore.communication.management import get_rank +from mindspore.common.parameter import Parameter +from mindspore import Tensor +import mindspore.ops.operations as P +import numpy as np +# define network +class DataParallelNet(Cell): + def __init__(self, test_size, transpose_a=False, transpose_b=False, strategy=None, layerwise_parallel=True): + super().__init__() + weight_np = np.full(test_size, 0.1, dtype=np.float32) + self.weight = Parameter(Tensor(weight_np), name="fc_weight", layerwise_parallel=layerwise_parallel) + self.relu = ReLU() + self.fc = P.MatMul(transpose_a=transpose_a, transpose_b=transpose_b) + if strategy is not None: + self.fc.shard(strategy) + + def construct(self, inputs, label): + x = self.relu(inputs) + x = self.fc(x, self.weight) + return x +``` + +假设在一台8P机器上使用数据并行模式进行训练和保存模型,首先需要获取数据,设置并行策略和并行模式,代码如下: + +```python +# create data sets +parallel_dataset = CreateData() +# set parallel strategy +strategy = ((1, 1), (1, 8)) +# create network model +net = DataParallelNet(strategy=strategy) +# reset parallel mode +context.reset_auto_parallel_context() +# set parallel mode, data parallel mode is selected for training and model saving. If you want to choose auto parallel +# mode, you can simply change the value of parallel_mode parameter to ParallelMode.AUTO_PARALLEL. +context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, device_num=8) +``` + +然后根据需要设置checkpoint保存策略,以及设置优化器和损失函数等,代码如下: + +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1) +# define checkpoint save path +ckpt_path = './rank_{}_ckpt'.format(get_rank) +# create a ModelCheckpoint object +ckpt_callback = ModelCheckpoint(prefix='data_parallel', directory=ckpt_path, config=ckpt_config) +# set optimizer and loss function +opt = Momentum() +loss = SoftmaxCrossEntropyExpand() +model = Model(net, loss_fb=loss, optimizer=opt) +# After training, the system will automatically save the checkpoint file. +model.train(train_dataset=parallel_dataset, callbacks=[ckpt_callback, loss]) +# After training, reset the parallel mode to avoid unnecessary trouble when retraining. +context.reset_auto_parallel_context() +``` + +保存好checkpoint文件后,用户同样可以使用`load_checkpoint`,`load_param_into_net`来加载模型参数。 + +### 半自动并行模式 + +半自动并行模式(Semi Auto Parallel)下checkpoint使用方法的完整流程,同样从定义一个网络模型开始: + +```python +class SemiAutoParallelNet(Cell): + def __init__(self, mul_size, test_size, strategy=None, strategy2=None): + super().__init__() + mul_np = np.full(mul_size, 0.5, dtype=np.float32) + equal_np = np.full(test_size, 0.1, dtype=np.float32) + self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight") + self.equal_weight = Parameter(Tensor(equal_np), name="equal_weight") + self.mul = P.Mul() + self.equal = P.Equal() + if strategy is not None: + self.mul.shard(strategy) + self.equal.shard(strategy2) + + def construct(self, inputs, label): + x = self.mul(inputs, self.mul_weight) + x = self.equal(x, self.equal_weight) + return x +``` + +假设半自动并行模式也是在一台8P机器上进行训练和保存模型。获取数据,设置并行策略和并行模式的代码如下: + +```python +# create data sets +parallel_dataset = CreateData() +# set parallel strategy +strategy = ((1, 1), (1, 8)) +# create network model +net = SemiAutoParallelNet(strategy=strategy, strategy2=strategy) +# reset parallel mode +context.reset_auto_parallel_context() +# set parallel mode, data parallel mode is selected for training and model saving. If you want to choose auto parallel +# mode, you can simply change the value of parallel_mode parameter to ParallelMode.AUTO_PARALLEL. +context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, + strategy_ckpt_save_file='./rank_{}_ckpt/strategy.txt'.format(get_rank)) +``` + +然后根据需要设置checkpoint保存策略,以及设置优化器和损失函数等,代码如下: + +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1) +# define checkpoint save path +ckpt_path = './rank_{}_ckpt'.format(get_rank) +# create a ModelCheckpoint object +ckpt_callback = ModelCheckpoint(prefix='semi_auto_parallel', directory=ckpt_path, config=ckpt_config) +# set optimizer and loss function +opt = Momentum() +loss = SoftmaxCrossEntropyExpand() +model = Model(net, loss_fb=loss, optimizer=opt) +# After you've trained your network, the system will automatically save the checkpoint file. +model.train(train_dataset=parallel_dataset, callbacks=[ckpt_callback, loss]) +# After training, reset the parallel mode to avoid unnecessary trouble when retraining. +context.reset_auto_parallel_context() +``` + +保存好checkpoint文件后,用户同样可以使用`load_checkpoint`,`load_param_into_net`来加载模型参数。 + +以上介绍的三种并行训练模式,checkpoint文件的保存方式都是每张卡上均保存完整的checkpoint文件,在以上三种并行训练模式上,用户还可以选择每张卡上只保存本卡的checkpoint文件,下面以半自动并行模式(Semi Auto Parallel)为例,进行说明。 + +只需要改动设置checkpoint保存策略的代码,将`CheckpointConfig`中的`integrated_save`参数设置为Fasle,便可实现每张卡上只保存本卡的checkpoint文件,具体改动如下: + +将checkpoint配置策略由 +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1) +``` + +改为 +```python +# config checkpoint +ckpt_config = CheckpointConfig(keep_checkpoint_max=1, integrated_save=False) +``` + +需要注意的是,如果用户选择了这种checkpoint保存方式,那么就需要用户自己对切分的checkpoint进行保存和加载,以便进行后续的推理或再训练。具体用法可参考[对保存的checkpoint文件做合并处理](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/checkpoint_for_hybrid_parallel.html#checkpoint)。 + +### 手动混合并行模式 + +手动混合并行模式(Hybrid Parallel)的模型参数保存和加载请参考[手动设置并行场景模型参数的保存和加载](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/checkpoint_for_hybrid_parallel.html)。 \ No newline at end of file diff --git a/tutorials/source_zh_cn/advanced_use/distributed_training_gpu.md b/tutorials/source_zh_cn/advanced_use/distributed_training_gpu.md index 13cb619328c7d2121b9d557e9b4294647922734f..d0888857a354c1d82c11dc8fa7d67beaa9c3160f 100644 --- a/tutorials/source_zh_cn/advanced_use/distributed_training_gpu.md +++ b/tutorials/source_zh_cn/advanced_use/distributed_training_gpu.md @@ -10,7 +10,6 @@ - [下载数据集](#下载数据集) - [配置分布式环境](#配置分布式环境) - [调用集合通信库](#调用集合通信库) - - [数据并行模式加载数据集](#数据并行模式加载数据集) - [定义网络](#定义网络) - [运行脚本](#运行脚本) - [运行多机脚本](#运行多机脚本) @@ -146,3 +145,4 @@ echo "start training" mpirun -n 16 --hostfile $HOSTFILE -x DATA_PATH=$DATA_PATH -x PATH -mca pml ob1 pytest -s -v ./resnet50_distributed_training.py > train.log 2>&1 & ``` +在GPU上进行分布式训练时,模型参数的保存和加载可参考[分布式训练模型参数保存和加载](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/distributed_training_ascend.html#id12) \ No newline at end of file diff --git a/tutorials/source_zh_cn/advanced_use/fuzzer.md b/tutorials/source_zh_cn/advanced_use/fuzzer.md index 516b9a7467383ff7e36ea4f712dcb30cf495bcd0..af801ee1da5991d900e2ad6f6266c94d5b8ee06a 100644 --- a/tutorials/source_zh_cn/advanced_use/fuzzer.md +++ b/tutorials/source_zh_cn/advanced_use/fuzzer.md @@ -9,7 +9,7 @@ - [实现阶段](#实现阶段) - [导入需要的库文件](#引入相关包) - [参数配置](#参数配置) - - [运用Fuzzer](#运用Fuzzer) + - [运用Fuzz Testing](#运用fuzz-testing)    @@ -18,33 +18,33 @@ 传统软件的决策逻辑由代码逻辑决定,传统软件通过代码行覆盖率来判断当前测试是否充分,理想情况下覆盖率越高,代码测试越充分。然而,对于深度神经网络而言,程序的决策逻辑由训练数据、网络模型结构和参数通过某种黑盒机制决定,代码行覆盖率已不足以评估测试的充分性。需要根据深度网络的特点选择更为适合的测试评价准则,指导神经网络进行更为充分的测试,发现更多的边缘错误用例,从而确保模型的通用性、鲁棒性。 -MindArmour的Fuzzer模块以神经元覆盖率作为测试评价准则。神经元覆盖率,是指通过一组输入观察到的、激活的神经元数量和神经元输出值的范围。我们通过神经元覆盖率来指导输入变异,让输入能够激活更多的神经元,神经元值的分布范围更广,从而探索不同类型的模型输出结果、错误行为。 +MindArmour的fuzz_testing模块以神经元覆盖率作为测试评价准则。神经元覆盖率,是指通过一组输入观察到的、激活的神经元数量和神经元输出值的范围。我们通过神经元覆盖率来指导输入变异,让输入能够激活更多的神经元,神经元值的分布范围更广,从而探索不同类型的模型输出结果、错误行为。 这里以LeNet模型,MNIST数据集为例,说明如何使用Fuzzer。 -> 本例面向CPU、GPU、Ascend 910 AI处理器,你可以在这里下载完整的样例代码: +> 本例面向CPU、GPU、Ascend 910 AI处理器,你可以在这里下载完整的样例代码: ## 实现阶段 ### 导入需要的库文件 -下列是我们需要的公共模块、MindSpore相关模块和Fuzzer特性模块,以及配置日志标签和日志等级。 +下列是我们需要的公共模块、MindSpore相关模块和fuzz_testing特性模块,以及配置日志标签和日志等级。 ```python -import sys - import numpy as np from mindspore import Model from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net -from lenet5_net import LeNet5 -from mindarmour.fuzzing.fuzzing import Fuzzer -from mindarmour.fuzzing.model_coverage_metrics import ModelCoverageMetrics +from mindarmour.fuzz_testing import Fuzzer +from mindarmour.fuzz_testing import ModelCoverageMetrics from mindarmour.utils.logger import LogUtil +from examples.common.dataset.data_processing import generate_mnist_dataset +from examples.common.networks.lenet5.lenet5_net import LeNet5 + LOGGER = LogUtil.get_instance() -TAG = 'Fuzz_test' +TAG = 'Fuzz_testing' LOGGER.set_level('INFO') ``` @@ -53,12 +53,12 @@ LOGGER.set_level('INFO') 配置必要的信息,包括环境信息、执行的模式。 ```python -context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") ``` 详细的接口配置信息,请参见`context.set_context`接口说明。 -### 运用Fuzzer +### 运用Fuzz Testing 1. 建立LeNet模型,加载MNIST数据集,操作同[模型安全]() @@ -67,24 +67,24 @@ context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) # Lenet model model = Model(net) # get training data - data_list = "./MNIST_unzip/train" + data_list = "../common/dataset/MNIST/train" batch_size = 32 ds = generate_mnist_dataset(data_list, batch_size, sparse=False) train_images = [] for data in ds.create_tuple_iterator(): - images = data[0].astype(np.float32) + images = data[0].asnumpy().astype(np.float32) train_images.append(images) - train_images = np.concatenate(train_images, axis=0) + train_images = np.concatenate(train_images, axis=0) # get test data - data_list = "./MNIST_unzip/test" + data_list = "../common/dataset/MNIST/test" batch_size = 32 ds = generate_mnist_dataset(data_list, batch_size, sparse=False) test_images = [] test_labels = [] for data in ds.create_tuple_iterator(): - images = data[0].astype(np.float32) - labels = data[1] + images = data[0].asnumpy().astype(np.float32) + labels = data[1].asnumpy() test_images.append(images) test_labels.append(labels) test_images = np.concatenate(test_images, axis=0) @@ -93,36 +93,40 @@ context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) 2. Fuzzer参数配置。 - 设置数据变异方法及参数。目前支持的数据变异方法包含三类: + 设置数据变异方法及参数。支持同时配置多种方法,目前支持的数据变异方法包含三类: - 图像仿射变换方法:Translate、Scale、Shear、Rotate。 - 基于图像像素值变化的方法: Contrast、Brightness、Blur、Noise。 - 基于对抗攻击的白盒、黑盒对抗样本生成方法:FGSM、PGD、MDIIM。 - 数据变异方法一定要包含基于图像像素值变化的方法。 + 数据变异方法中一定要包含基于图像像素值变化的方法。 - 前两种图像变化方法的可配置参数,以及推荐参数范围请参考:对应的类方法,也可以均设置为`'auto_param': True`,变异参数将在推荐范围内随机生成。 + 前两种类型的图像变化方法,支持用户自定义配置参数,也支持算法随机选择参数。用于自定义参数配置范围请参考: + 中对应的类方法。算法随机选择参数,则`params`设置为`'auto_param': [True]`,参数将在推荐范围内随机生成。 基于对抗攻击方法的参数配置请参考对应的攻击方法类。 + + 下面时变异方法及其参数配置的一个例子: ```python mutate_config = [{'method': 'Blur', - 'params': {'auto_param': True}}, + 'params': {'radius': [0.1, 0.2, 0.3], + 'auto_param': [True, False]}}, {'method': 'Contrast', - 'params': {'auto_param': True}}, + 'params': {'auto_param': [True]}}, {'method': 'Translate', - 'params': {'auto_param': True}}, + 'params': {'auto_param': [True]}}, {'method': 'Brightness', - 'params': {'auto_param': True}}, + 'params': {'auto_param': [True]}}, {'method': 'Noise', - 'params': {'auto_param': True}}, + 'params': {'auto_param': [True]}}, {'method': 'Scale', - 'params': {'auto_param': True}}, + 'params': {'auto_param': [True]}}, {'method': 'Shear', - 'params': {'auto_param': True}}, + 'params': {'auto_param': [True]}}, {'method': 'FGSM', - 'params': {'eps': 0.3, 'alpha': 0.1}} - ] + 'params': {'eps': [0.3, 0.2, 0.4], 'alpha': [0.1]}} + ] ``` 设置评价指标,目前支持5种评价指标,包括: @@ -135,13 +139,13 @@ context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) eval_metrics =['accuracy', 'kmnc', 'attack_success_rate'] ``` -3. 初始化种子队列,种子队列中的每个种子,包含3个值:原始图片、图片标签。 +3. 初始化种子队列,种子队列中的每个种子,包含2个值:原始图片、图片标签。这里取100个样本作为初始种子队列。 ```python # make initial seeds initial_seeds = [] for img, label in zip(test_images, test_labels): - initial_seeds.append([img, label]) + initial_seeds.append([img, label]) initial_seeds = initial_seeds[:100] ``` @@ -164,11 +168,14 @@ context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) 5. Fuzz测试。 ```python + eval_metrics = 'auto' model_fuzz_test = Fuzzer(model, train_images, neuron_num, segmented_num) _, _, _, _, metrics = model_fuzz_test.fuzzing(mutate_config, initial_seeds, eval_metrics=eval_metrics) ``` 6. 实验结果。 + + fuzzing的返回结果中包含了5个数据:fuzz生成的样本fuzz_samples、生成样本的真实标签true_labels、被测模型对于生成样本的预测值fuzz_preds、 生成样本使用的变异方法fuzz_strategies、fuzz testing的评估报告metrics_report。用户可使用这些返回结果进一步的分析模型的鲁棒性。这里只展开metrics_report,查看fuzz testing后的各个评估指标。 ```python if metrics: @@ -184,7 +191,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) Neural_coverage_KMNC: 0.4797 ``` - Fuzz测试前种子的KMNC神经元覆盖率为8.5%,Fuzz后,KMNC神经元覆盖率为47.97%,神经元覆盖率提升,样本的多样性提升。Fuzz后,模型对于Fuzz生成样本的准确率为79.29%,使用了对抗攻击方法的样本,攻击成功率为47.97%。由于初始化种子、变异方法和相应的参数均为随机选择的,结果有一定的浮动是正常的。 + Fuzz测试前种子的KMNC神经元覆盖率为8.5%,Fuzz后,KMNC神经元覆盖率为47.97%,神经元覆盖率提升,样本的多样性提升。Fuzz后,模型对于Fuzz生成样本的准确率为79.29%,使用了对抗攻击方法的样本,攻击成功率为39.39%。由于初始化种子、变异方法和相应的参数均为随机选择的,结果有一定的浮动是正常的。 原始图片: diff --git a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md index 8a232a39b320b1edef473ad28a5a3bc54b2b610f..3b4ef2bd87c18e87350504367b1ce09169e1fbec 100644 --- a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md +++ b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md @@ -1,6 +1,6 @@ # 梯度累积 -`Linux` `Ascend` `GPU` `模型调优` `中级` `高级` +`Linux` `GPU` `模型调优` `中级` `高级` @@ -30,7 +30,7 @@ 最终目的是为了达到跟直接用N*Mini-batch数据训练几乎同样的效果。 -> 本教程用于GPU、Ascend 910 AI处理器, 你可以在这里下载主要的训练样例代码: +> 本教程用于GPU, 你可以在这里下载主要的训练样例代码: ## 创建梯度累积模型 @@ -59,11 +59,11 @@ from model_zoo.official.cv.lenet.src.lenet import LeNet5 ### 加载数据集 -利用MindSpore的dataset提供的`MnistDataset`接口加载MNIST数据集,此部分代码由model_zoo中lenet目录下的[dataset.py]()导入。 +利用MindSpore的`dataset`提供的`MnistDataset`接口加载MNIST数据集,此部分代码由`model_zoo`中`lenet`目录下的[dataset.py]()导入。 ### 定义网络 -这里以LeNet网络为例进行介绍,当然也可以使用其它的网络,如ResNet-50、BERT等, 此部分代码由model_zoo中lenet目录下的[lenet.py]()导入。 +这里以LeNet网络为例进行介绍,当然也可以使用其它的网络,如ResNet-50、BERT等, 此部分代码由`model_zoo`中`lenet`目录下的[lenet.py]()导入。 ### 定义训练模型 将训练流程拆分为正向反向训练、参数更新和累积梯度清理三个部分: @@ -130,8 +130,8 @@ class TrainClear(Cell): self.hyper_map = C.HyperMap() def construct(self): - seccess = self.hyper_map(F.partial(_clear_op), self.grad_sum, self.zeros) - return seccess + success = self.hyper_map(F.partial(_clear_op), self.grad_sum, self.zeros) + return success ``` ### 定义训练过程 @@ -208,8 +208,8 @@ class GradientAccumulation: ```python if __name__ == "__main__": parser = argparse.ArgumentParser(description='MindSpore Gard Cumulative Example') - parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'], - help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--device_target', type=str, default="GPU", choices=['GPU'], + help='device where the code will be implemented (default: GPU)') parser.add_argument('--data_path', type=str, default="./Data", help='path where the dataset is saved') args = parser.parse_args() @@ -231,9 +231,11 @@ if __name__ == "__main__": **执行训练** 1. 运行训练代码,查看运行结果。 + ```shell $ python train.py --data_path=./MNIST_Data ``` + 输出如下,可以看到loss值随着训练逐步降低: ```shell @@ -246,17 +248,17 @@ if __name__ == "__main__": epoch: 10 step: 448 loss is 0.06443884 epoch: 10 step: 449 loss is 0.0067842817 ``` - + 2. 查看保存的CheckPoint文件。 训练过程中保存了CheckPoint文件`gradient_accumulation.ckpt`,即模型文件。 **验证模型** -通过model_zoo中lenet目录下的[eval.py](),使用保存的CheckPoint文件,加载验证数据集,进行验证。 +通过`model_zoo`中`lenet`目录下的[eval.py](),使用保存的CheckPoint文件,加载验证数据集,进行验证。 ```shell -$ python eval.py --data_path=./MNIST_Data --ckpt_path=./gradient_accumulation.ckpt +$ python eval.py --data_path=./MNIST_Data --ckpt_path=./gradient_accumulation.ckpt --device_target=GPU ``` 输出如下,可以看到使用验证的数据集,正确率在96.31%左右,与batch_size为32的验证结果一致。 diff --git a/tutorials/source_zh_cn/advanced_use/hub_tutorial.md b/tutorials/source_zh_cn/advanced_use/hub_tutorial.md index 5969c90d637fe8ac26124ea16dfff036b2316f8a..4447bd33731dba4309f4021701be1c75b8895ee9 100644 --- a/tutorials/source_zh_cn/advanced_use/hub_tutorial.md +++ b/tutorials/source_zh_cn/advanced_use/hub_tutorial.md @@ -1,58 +1,93 @@ -## 使用MindSpore Hub提交、加载和微调模型 +# 使用MindSpore Hub提交、加载和微调模型 -`Ascend` `GPU` `MindSpore Hub` `模型上传` `模型加载` `模型微调` `初级` `中级` `高级` +`Linux` `Ascend` `GPU` `MindSpore Hub` `模型上传` `模型加载` `模型微调` `初级` `中级` `高级` -- [使用MindSpore Hub提交、加载和微调模型](#使用MindSporeHub提交加载和微调模型) - - [概述](#概述) - - [模型上传](#模型上传) - - [步骤](#步骤) - - [模型加载](#模型加载) - - [模型微调](#模型微调) +- [使用MindSpore Hub提交、加载和微调模型](#使用mindspore-hub提交加载和微调模型) + - [概述](#概述) + - [模型上传](#模型上传) + - [步骤](#步骤) + - [模型加载](#模型加载) + - [模型微调](#模型微调) -### 概述 +## 概述 -本教程以Googlenet为例,对想要将模型发布到MindSpore Hub的算法开发者介绍了模型上传步骤,也对想要使用MindSpore Hub模型进行推理或者微调的开发应用者描述了具体操作流程。总之,本教程可以帮助算法开发者有效地提交模型,并使得应用开发者利用MindSpore Hub的接口快速实现模型推理或微调。 +MindSpore Hub是MindSpore生态的预训练模型应用工具,作为模型开发者和应用开发者的管道,它不仅向模型开发者提供了方便快捷的模型发布通道,而且向应用开发者提供了简单易用的模型加载和微调API。本教程以GoogleNet为例,对想要将模型发布到MindSpore Hub的模型开发者介绍了模型上传步骤,也对想要使用MindSpore Hub模型进行推理或者微调的应用开发者描述了具体操作流程。总之,本教程可以帮助模型开发者有效地提交模型,并使得应用开发者利用MindSpore Hub的接口快速实现模型推理或微调。 -### 模型上传 +## 模型上传 -我们接收用户通过向`hub`仓提交PR的方式向MindSpore Hub发布模型。这里我们用Googlenet为例,列出将模型提交到MindSpore Hub的步骤。 +我们接收用户通过向 [hub](https://gitee.com/mindspore/hub) 仓提交PR的方式向MindSpore Hub发布模型。这里我们以GoogleNet为例,列出模型提交到MindSpore Hub的步骤。 -#### 步骤 +### 步骤 -1. 将你的预训练模型托管在我们可以访问的存储位置。 +1. 将你的预训练模型托管在可以访问的存储位置。 -2. 按照 [模板](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/mindspore_hub_conf.py) 在你自己的代码仓中添加模型生成文件 `mindspore_hub_conf.py`。 +2. 按照 [模板](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/googlenet/mindspore_hub_conf.py) 在你自己的代码仓中添加模型生成文件 `mindspore_hub_conf.py`,文件放置的位置如下: -3. 按照 [模板](https://gitee.com/mindspore/hub/blob/master/mshub_res/assets/mindspore/gpu/0.6/alexnet_v1_cifar10.md) 在 `hub/mshub_res/assets` 中创建`{model_name}_{model_version}_{dataset}.md` 文件。对于每个预训练模型,执行以下命令,用来获得`.md`文件`asset-sha256` 处所需的哈希值: + ```shell script + googlenet + ├── src + │   ├── googlenet.py + ├── script + │   ├── run_train.sh + ├── train.py + ├── test.py + ├── mindspore_hub_conf.py + ``` + +3. 按照 [模板](https://gitee.com/mindspore/hub/blob/master/mshub_res/assets/mindspore/ascend/0.7/googlenet_v1_cifar10.md) 在 `hub/mshub_res/assets/mindspore/ascend/0.7` 文件夹下创建`{model_name}_{model_version}_{dataset}.md` 文件,其中 `ascend` 为模型运行的硬件平台,`0.7` 为MindSpore的版本号,`hub/mshub_res`的目录结构为: + + ```shell script + hub + ├── mshub_res + │   ├── assets + │   ├── mindspore + | ├── gpu + | ├── 0.7 + | ├── ascend + | ├── 0.7 + | ├── googlenet_v1_cifar10.md + │   ├── tools + | ├── md_validator.py + | └── md_validator.py + ``` + 注意,`{model_name}_{model_version}_{dataset}.md` 文件中需要补充如下所示的 `file-format`、`asset-link` 和 `asset-sha256` 信息,它们分别表示模型文件格式、模型存储位置(步骤1所得)和模型哈希值,其中MindSpore Hub支持的模型文件格式有 [MindSpore CKPT](https://www.mindspore.cn/tutorial/zh-CN/master/use/saving_and_loading_model_parameters.html#checkpoint-configuration-policies),[AIR](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html),[MindIR](https://www.mindspore.cn/tutorial/zh-CN/master/use/saving_and_loading_model_parameters.html#export-mindir-model),[ONNX](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html) 和 [MSLite](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html)。 + + ```shell script + file-format: ckpt + asset-link: https://download.mindspore.cn/model_zoo/official/cv/googlenet/goolenet_ascend_0.2.0_cifar10_official_classification_20200713/googlenet.ckpt + asset-sha256: 114e5acc31dad444fa8ed2aafa02ca34734419f602b9299f3b53013dfc71b0f7 + ``` + + 对于每个预训练模型,执行以下命令,用来获得`.md` 文件 `asset-sha256` 处所需的哈希值,其中 `googlenet.ckpt` 是从步骤1的存储位置处下载并保存到 `tools` 文件夹的预训练模型,运行后输出的哈希值为 `114e5acc31dad444fa8ed2aafa02ca34734419f602b9299f3b53013dfc71b0f7`。 ```python - cd ../tools + cd /hub/mshub_res/tools python get_sha256.py ../googlenet.ckpt ``` -4. 使用 `hub/mshub_res/tools/md_validator.py` 在本地核对`.md`文件的格式,执行的命令如下: +4. 使用 `hub/mshub_res/tools/md_validator.py` 在本地核对`.md`文件的格式,执行以下命令,输出结果为 `All Passed`,表示 `.md` 文件的格式和内容均符合要求。 ```python python md_validator.py ../assets/mindspore/ascend/0.7/googlenet_v1_cifar10.md ``` -5. 在 `mindspore/hub` 仓创建PR。 +5. 在 `mindspore/hub` 仓创建PR,详细创建方式可以参考[贡献者Wiki](https://gitee.com/mindspore/mindspore/blob/master/CONTRIBUTING.md)。 -一旦你的PR合并到 `mindspore/hub` 的master分支,你的模型将于24小时内在 [MindSpore Hub 网站](https://hub.mindspore.com/mindspore) 上显示。更多详细信息,请参考 [README](https://gitee.com/mindspore/hub/blob/master/mshub_res/README.md) 。 +一旦你的PR合并到 `mindspore/hub` 的master分支,你的模型将于24小时内在 [MindSpore Hub 网站](https://hub.mindspore.com/mindspore) 上显示。有关模型上传的更多详细信息,请参考 [README](https://gitee.com/mindspore/hub/blob/master/mshub_res/README.md) 。 -### 模型加载 +## 模型加载 `mindspore_hub.load` API用于加载预训练模型,可以实现一行代码加载模型。主要的模型加载流程如下: - 在MindSpore Hub官网上搜索感兴趣的模型。 - 例如,想使用Googlenet对CIFAR-10数据集进行分类,可以在MindSpore Hub官网上使用关键词`GoogleNet`进行搜索。页面将会返回与Googlenet相关的所有模型。进入相关模型页面之后,获得详情页 `url`。 + 例如,想使用GoogleNet对CIFAR-10数据集进行分类,可以在MindSpore Hub官网上使用关键词`GoogleNet`进行搜索。页面将会返回与GoogleNet相关的所有模型。进入相关模型页面之后,获得详情页 `url`。 - 使用`url`完成模型的加载,示例代码如下: @@ -62,9 +97,7 @@ from mindspore import context, Tensor, nn from mindspore.train.model import Model from mindspore.common import dtype as mstype - from mindspore.dataset.transforms import py_transforms - from PIL import Image - import cv2 + import mindspore.dataset.vision.py_transforms as py_transforms context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", @@ -72,18 +105,17 @@ model = "mindspore/ascend/0.7/googlenet_v1_cifar10" - image = Image.open('cifar10/a.jpg') - transforms = py_transforms.ComposeOp([py_transforms.ToTensor()]) - # Initialize the number of classes based on the pre-trained model. network = mshub.load(model, num_classes=10) network.set_train(False) - out = network(transforms(image)) + + # ... + ``` +- 完成模型加载后,可以使用MindSpore进行推理,参考[这里](https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html)。 +## 模型微调 -### 模型微调 - -在使用 `mindspore_hub.load` 进行模型加载时,可以增加一个额外的参数项只加载神经网络的特征提取部分。这样我们就能很容易地在之后增加一些新的层进行迁移学习。*当算法工程师将额外的参数(例如 include_top)添加到模型构造中时,可以在模型的详情页中找到这个功能。* +在使用 `mindspore_hub.load` 进行模型加载时,可以增加一个额外的参数项只加载神经网络的特征提取部分。这样我们就能很容易地在之后增加一些新的层进行迁移学习。*当模型开发者将额外的参数(例如 include_top)添加到模型构造中时,可以在模型的详情页中找到这个功能。`include_top` 取值为True或者False,表示是否保留顶层的全连接网络。* 下面我们以GoogleNet为例,说明如何加载一个基于ImageNet的预训练模型,并在特定的子任务数据集上进行迁移学习(重训练)。主要的步骤如下: @@ -93,54 +125,83 @@ ```python import mindspore - from mindspore import nn - from mindspore import context + from mindspore import nn, context, Tensor + from mindpsore.train.serialization import save_checkpoint + from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits + from mindspore.ops import operations as P + from mindspore.nn import Momentum + + import math + import numpy as np + import mindspore_hub as mshub + from src.dataset import create_dataset context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) - - network = mshub.load('mindspore/ascend/0.7/googlenet_v1_cifar10', include_top=False) + model_url = "mindspore/ascend/0.7/googlenet_v1_cifar10" + network = mshub.load(model_url, include_top=False, num_classes=1000) network.set_train(False) ``` 3. 在现有模型结构基础上增加一个与新任务相关的分类层。 ```python + class ReduceMeanFlatten(nn.Cell): + def __init__(self): + super(ReduceMeanFlatten, self).__init__() + self.mean = P.ReduceMean(keep_dims=True) + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.mean(x, (2, 3)) + x = self.flatten(x) + return x + # Check MindSpore Hub website to conclude that the last output shape is 1024. last_channel = 1024 # The number of classes in target task is 26. num_classes = 26 + + reducemean_flatten = ReduceMeanFlatten() + classification_layer = nn.Dense(last_channel, num_classes) classification_layer.set_train(True) - train_network = nn.SequentialCell([network, classification_layer]) + train_network = nn.SequentialCell([network, reducemean_flatten, classification_layer]) ``` 4. 为模型训练选择损失函数和优化器。 ```python - from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits + epoch_size = 60 # Wrap the backbone network with loss. - loss_fn = SoftmaxCrossEntropyWithLogits() + loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") loss_net = nn.WithLossCell(train_network, loss_fn) + lr = get_lr(global_step=0, + lr_init=0, + lr_max=0.05, + lr_end=0.001, + warmup_epochs=5, + total_epochs=epoch_size) + # Create an optimizer. - optim = opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), Tensor(lr), config.momentum, config.weight_decay) + optim = Momentum(filter(lambda x: x.requires_grad, loss_net.get_parameters()), Tensor(lr), 0.9, 4e-5) train_net = nn.TrainOneStepCell(loss_net, optim) ``` -5. 构建数据集,开始重训练。 +5. 构建数据集,开始重训练。如下所示,进行微调任务的数据集为垃圾分类数据集,存储位置为 `/ssd/data/garbage/train`。 ```python - from src.dataset import create_dataset - from mindspore.train.serialization import _exec_save_checkpoint - - dataset = create_dataset("/ssd/data/garbage/train", do_train=True, batch_size=32) - - epoch_size = 15 + dataset = create_dataset("/ssd/data/garbage/train", + do_train=True, + batch_size=32, + platform="Ascend", + repeat_num=1) + for epoch in range(epoch_size): for i, items in enumerate(dataset): data, label = items @@ -148,10 +209,10 @@ label = mindspore.Tensor(label) loss = train_net(data, label) - print(f"epoch: {epoch}, loss: {loss}") + print(f"epoch: {epoch}/{epoch_size}, loss: {loss}") # Save the ckpt file for each epoch. ckpt_path = f"./ckpt/garbage_finetune_epoch{epoch}.ckpt" - _exec_save_checkpoint(train_network, ckpt_path) + save_checkpoint(train_network, ckpt_path) ``` 6. 在测试集上测试模型精度。 @@ -159,21 +220,31 @@ ```python from mindspore.train.serialization import load_checkpoint, load_param_into_net - network = mshub.load('mindspore/ascend/0.7/googlenet_v1_cifar10', include_top=False) - train_network = nn.SequentialCell([network, nn.Dense(last_channel, num_classes)]) + network = mshub.load('mindspore/ascend/0.7/googlenet_v1_cifar10', pretrained=False, + include_top=False, num_classes=1000) + + reducemean_flatten = ReduceMeanFlatten() + + classification_layer = nn.Dense(last_channel, num_classes) + classification_layer.set_train(False) + softmax = nn.Softmax() + network = nn.SequentialCell([network, reducemean_flatten, + classification_layer, softmax]) # Load a pre-trained ckpt file. - ckpt_path = "./ckpt/garbage_finetune_epoch15.ckpt" + ckpt_path = "./ckpt/garbage_finetune_epoch59.ckpt" trained_ckpt = load_checkpoint(ckpt_path) - load_param_into_net(train_network, trained_ckpt) + load_param_into_net(network, trained_ckpt) # Define loss and create model. - loss_fn = SoftmaxCrossEntropyWithLogits() - model = Model(network, loss_fn=loss, metrics={'acc'}) + model = Model(network, metrics={'acc'}, eval_network=network) - eval_dataset = create_dataset("/ssd/data/garbage/train", do_train=False, - batch_size=32) + eval_dataset = create_dataset("/ssd/data/garbage/test", + do_train=True, + batch_size=32, + platform="Ascend", + repeat_num=1) res = model.eval(eval_dataset) print("result:", res, "ckpt=", ckpt_path) - ``` + ``` \ No newline at end of file diff --git a/tutorials/source_zh_cn/advanced_use/images/compose.png b/tutorials/source_zh_cn/advanced_use/images/compose.png index a1dcbf92d4ce37bd9b794b6c04bc38b131cc3a40..fd0dc71e07f632adcc8b63e1b350cff8817c3c01 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/compose.png and b/tutorials/source_zh_cn/advanced_use/images/compose.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/data_chart.png b/tutorials/source_zh_cn/advanced_use/images/data_chart.png index f698c682119efc886b46a911d3c61f50ab017879..017e09898f12a48f4db201bfa0b526ab1d2bac23 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/data_chart.png and b/tutorials/source_zh_cn/advanced_use/images/data_chart.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/data_conversion_concept.png b/tutorials/source_zh_cn/advanced_use/images/data_conversion_concept.png index 33e838ae3005814c97e308df12209cc163db6e5d..0bd540cac3e3570773171d8c1c336053d5d629d0 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/data_conversion_concept.png and b/tutorials/source_zh_cn/advanced_use/images/data_conversion_concept.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/data_enhancement_performance_scheme.png b/tutorials/source_zh_cn/advanced_use/images/data_enhancement_performance_scheme.png index a21caea16f4ee0852be47c3e56b32d184f06a7de..dc5807abb608b7856e5ec5997b66a88092d17552 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/data_enhancement_performance_scheme.png and b/tutorials/source_zh_cn/advanced_use/images/data_enhancement_performance_scheme.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/data_label.png b/tutorials/source_zh_cn/advanced_use/images/data_label.png index c761a9008c5b814da1913c84d2b113174d3f1947..4b7601ea6feb5218dcc91907260fb81bd4d89efd 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/data_label.png and b/tutorials/source_zh_cn/advanced_use/images/data_label.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/data_loading_performance_scheme.png b/tutorials/source_zh_cn/advanced_use/images/data_loading_performance_scheme.png index fd32feee9d720141fc1bfcf3bb03cd40363316e6..9b604bc42b07cd5577b34b4e1e329693e99898ff 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/data_loading_performance_scheme.png and b/tutorials/source_zh_cn/advanced_use/images/data_loading_performance_scheme.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/data_table.png b/tutorials/source_zh_cn/advanced_use/images/data_table.png index e368080648f1da89696efdd3fe280a371d5909c4..58d7ebfde31d83bc7ee3789f9ded9318f83486ab 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/data_table.png and b/tutorials/source_zh_cn/advanced_use/images/data_table.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_init_page.png b/tutorials/source_zh_cn/advanced_use/images/debugger_init_page.png new file mode 100644 index 0000000000000000000000000000000000000000..2b090691153fea4d70165c48f9664b2a34b37b2c Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_init_page.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_set_watch_point.png b/tutorials/source_zh_cn/advanced_use/images/debugger_set_watch_point.png new file mode 100644 index 0000000000000000000000000000000000000000..7a6365428108ca96dabf3df983ec428c989323fa Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_set_watch_point.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_compare.png b/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_compare.png new file mode 100644 index 0000000000000000000000000000000000000000..c85bf9631ea355440629d8498991e8c1734f61d5 Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_compare.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_info.png b/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_info.png new file mode 100644 index 0000000000000000000000000000000000000000..60b7c50fb9aa6121852de7113df5ef78407e115c Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_info.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_value.png b/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_value.png new file mode 100644 index 0000000000000000000000000000000000000000..ddb299155369b81ebbcf9839480477acb3d01776 Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_tensor_value.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_waiting.png b/tutorials/source_zh_cn/advanced_use/images/debugger_waiting.png new file mode 100644 index 0000000000000000000000000000000000000000..ca7b86cc56ca6a98558cd80a8a23322a63409771 Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_waiting.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/debugger_watch_point_hit.png b/tutorials/source_zh_cn/advanced_use/images/debugger_watch_point_hit.png new file mode 100644 index 0000000000000000000000000000000000000000..799e7f44ecde547f4fcab9b47ae9849290658d69 Binary files /dev/null and b/tutorials/source_zh_cn/advanced_use/images/debugger_watch_point_hit.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/lineage_label.png b/tutorials/source_zh_cn/advanced_use/images/lineage_label.png index eabd2dae20664cda83cc46d3d958a07e941a03f6..5b06ae43bdfc0a1488d0065644f541609713dcec 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/lineage_label.png and b/tutorials/source_zh_cn/advanced_use/images/lineage_label.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/lineage_model_chart.png b/tutorials/source_zh_cn/advanced_use/images/lineage_model_chart.png index 3c31840c8c2c89e849e71314b87ada0ba019eb44..dd6bbcfc698dd38ec7fba3f2939972fcfefdc662 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/lineage_model_chart.png and b/tutorials/source_zh_cn/advanced_use/images/lineage_model_chart.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/lineage_model_table.png b/tutorials/source_zh_cn/advanced_use/images/lineage_model_table.png index 4103eee6ee25a9aa602addc616b7d200f082bbca..3ebdb9f480b5b04e1d11a98dcac299192dd1c578 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/lineage_model_table.png and b/tutorials/source_zh_cn/advanced_use/images/lineage_model_table.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/mindrecord.png b/tutorials/source_zh_cn/advanced_use/images/mindrecord.png index d1d426776605f26cd1d8972b77989c223cf67807..b06cad25025a7d45c826e37cc7490fa8b05980e9 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/mindrecord.png and b/tutorials/source_zh_cn/advanced_use/images/mindrecord.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/operator_fusion.png b/tutorials/source_zh_cn/advanced_use/images/operator_fusion.png index bd3a88cfb04825f7469e76bcd48988a596ce222d..c16a91f5af557377dc8c978c2f6667818ccf2b1c 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/operator_fusion.png and b/tutorials/source_zh_cn/advanced_use/images/operator_fusion.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/pipeline.png b/tutorials/source_zh_cn/advanced_use/images/pipeline.png index 5fb3f3defd20eb700c0e16d6dff5d57a1d2007c9..f146b1ad88b8052eed0b358f09f8fa3d933c0873 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/pipeline.png and b/tutorials/source_zh_cn/advanced_use/images/pipeline.png differ diff --git a/tutorials/source_zh_cn/advanced_use/images/shuffle_performance_scheme.png b/tutorials/source_zh_cn/advanced_use/images/shuffle_performance_scheme.png index d09ca3dda379502827d58c1269599fa4381cbf76..a09c9b2222034fd419f671a7d2683acbdc315fc2 100644 Binary files a/tutorials/source_zh_cn/advanced_use/images/shuffle_performance_scheme.png and b/tutorials/source_zh_cn/advanced_use/images/shuffle_performance_scheme.png differ diff --git a/tutorials/source_zh_cn/advanced_use/lineage_and_scalars_comparision.md b/tutorials/source_zh_cn/advanced_use/lineage_and_scalars_comparision.md index eca4a19f995063e9419649d78e74a9da1187c3c0..a77e1edca60967289cf5f65cb1ead3084f1c7083 100644 --- a/tutorials/source_zh_cn/advanced_use/lineage_and_scalars_comparision.md +++ b/tutorials/source_zh_cn/advanced_use/lineage_and_scalars_comparision.md @@ -48,7 +48,7 @@ MindInsight中的模型溯源、数据溯源和对比看板同训练看板一样 图4:概览页 -图4展示的是优化目标分布、参数重要性和散点图。 +图4展示的是优化目标分布、参数重要性和散点图。用户可以选择优化目标来查看参数重要性,再通过点击柱状图来查看参数和优化目标的散点图。 ## 数据溯源 diff --git a/tutorials/source_zh_cn/advanced_use/membership_inference.md b/tutorials/source_zh_cn/advanced_use/membership_inference.md index c54935a650369fca03a1599add9bf628beae3014..50ca3b0c83b059f03e3de5021534dfd2e50a9b99 100644 --- a/tutorials/source_zh_cn/advanced_use/membership_inference.md +++ b/tutorials/source_zh_cn/advanced_use/membership_inference.md @@ -1,30 +1,31 @@ -# 成员推理攻击 +# 使用成员推理测试模型安全性 -- [成员推理攻击](#成员推理攻击) +- [使用成员推理测试模型安全性](#使用成员推理测试模型安全性) - [概述](#概述) - [实现阶段](#实现阶段) - [导入需要的库文件](#导入需要的库文件) - [加载数据集](#加载数据集) - [建立模型](#建立模型) - - [运用MembershipInference](#运用membershipinference) + - [运用MembershipInference进行隐私安全评估](#运用membershipinference进行隐私安全评估) - [参考文献](#参考文献) - + +    ## 概述 -成员推理攻击是一种窃取用户数据隐私的方法。隐私指的是单个用户的某些属性,一旦泄露可能会造成人身损害、名誉损害等后果。通常情况下,用户的隐私数据会作保密处理,但我们可以利用非敏感信息来进行推测。例如:”抽烟的人更容易得肺癌“,这个信息不属于隐私信息,但如果知道“张三抽烟”,就可以推断“张三”更容易得肺癌,这就是成员推理。 +成员推理是一种推测用户隐私数据的方法。隐私指的是单个用户的某些属性,一旦泄露可能会造成人身损害、名誉损害等后果。通常情况下,用户的隐私数据会作保密处理,但我们可以利用非敏感信息来进行推测。如果我们知道了某个私人俱乐部的成员都喜欢戴紫色墨镜、穿红色皮鞋,那么我们遇到一个戴紫色墨镜且穿红色皮鞋(非敏感信息)的人,就可以推断他/她很可能是这个私人俱乐部的成员(敏感信息)。这就是成员推理。 -机器学习/深度学习的成员推理攻击(Membership Inference),指的是攻击者拥有模型的部分访问权限(黑盒、灰盒或白盒),能够获取到模型的输出、结构或参数等部分或全部信息,并基于这些信息推断某个样本是否属于模型的训练集。 +机器学习/深度学习的成员推理(MembershipInference),指的是攻击者拥有模型的部分访问权限(黑盒、灰盒或白盒),能够获取到模型的输出、结构或参数等部分或全部信息,并基于这些信息推断某个样本是否属于模型的训练集。利用成员推理,我们可以评估机器学习/深度学习模型的隐私数据安全。如果在成员推理下能正确识别出60%+的样本,那么我们认为该模型存在隐私数据泄露风险。 -这里以VGG16模型,CIFAR-100数据集为例,说明如何使用MembershipInference。本教程使用预训练的模型参数进行演示,这里仅给出模型结构、参数设置和数据集预处理方式。 +这里以VGG16模型,CIFAR-100数据集为例,说明如何使用MembershipInference进行模型隐私安全评估。本教程使用预训练的模型参数进行演示,这里仅给出模型结构、参数设置和数据集预处理方式。 >本例面向Ascend 910处理器,您可以在这里下载完整的样例代码: > -> +> ## 实现阶段 @@ -49,7 +50,7 @@ from mindspore.common.initializer import initializer import mindspore.dataset as de import mindspore.dataset.transforms.c_transforms as C import mindspore.dataset.vision.c_transforms as vision -from mindarmour.diff_privacy.evaluation.membership_inference import MembershipInference +from mindarmour import MembershipInference from mindarmour.utils import LogUtil LOGGER = LogUtil.get_instance() @@ -176,7 +177,7 @@ def vgg16(num_classes=1000, args=None, phase="train"): return net ``` -### 运用MembershipInference +### 运用MembershipInference进行隐私安全评估 1. 构建VGG16模型并加载参数文件。 这里直接加载预训练完成的VGG16参数配置,您也可以使用如上的网络自行训练。 @@ -194,37 +195,34 @@ def vgg16(num_classes=1000, args=None, phase="train"): args.padding = 0 args.pad_mode = "same" args.weight_decay = 5e-4 - args.loss_scale = 1.0 - - data_path = "./cifar-100-binary" # Replace your data path here. - pre_trained = "./VGG16-100_781.ckpt" # Replace your pre trained checkpoint file here. + args.loss_scale = 1.0 # Load the pretrained model. net = vgg16(num_classes=100, args=args) - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9, weight_decay=args.weight_decay, loss_scale=args.loss_scale) load_param_into_net(net, load_checkpoint(args.pre_trained)) model = Model(network=net, loss_fn=loss, optimizer=opt) ``` -2. 加载CIFAR-100数据集,按8:2分割为成员推理攻击模型的训练集和测试集。 +2. 加载CIFAR-100数据集,按8:2分割为成员推理模型的训练集和测试集。 ```python # Load and split dataset. train_dataset = vgg_create_dataset100(data_home=args.data_path, image_size=(224, 224), - batch_size=64, num_samples=10000, shuffle=False) + batch_size=64, num_samples=5000, shuffle=False) test_dataset = vgg_create_dataset100(data_home=args.data_path, image_size=(224, 224), - batch_size=64, num_samples=10000, shuffle=False, training=False) + batch_size=64, num_samples=5000, shuffle=False, training=False) train_train, eval_train = train_dataset.split([0.8, 0.2]) train_test, eval_test = test_dataset.split([0.8, 0.2]) msg = "Data loading completed." LOGGER.info(TAG, msg) ``` -3. 配置攻击参数和评估参数 +3. 配置推理参数和评估参数 - 设置用于成员推理评估的方法和参数。目前支持的推理方法有:KNN、LR、MLPClassifier和RandomForest Classifier。 + 设置用于成员推理的方法和参数。目前支持的推理方法有:KNN、LR、MLPClassifier和RandomForestClassifier。推理参数数据类型使用list,各个方法使用key为"method"和"params"的字典表示。 ```python config = [ @@ -261,35 +259,41 @@ def vgg16(num_classes=1000, args=None, phase="train"): ] ``` - 设置评价指标,目前支持3种评价指标。包括: - * 准确率:accuracy。 - * 精确率:precision。 - * 召回率:recall。 - + 我们约定标签为数据集的是正类,标签为测试集的是负类。设置评价指标,目前支持3种评价指标。包括: + * 准确率:accuracy,正确推理的数量占全体样本中的比例。 + * 精确率:precision,正确推理的正类样本占所有推理为正类中的比例。 + * 召回率:recall,正确推理的正类样本占全体正类样本的比例。 + 在样本数量足够大时,如果上述指标均大于0.6,我们认为目标模型就存在隐私泄露的风险。 + ```python - metrics = ["precision", "accuracy", "recall"] + metrics = ["precision", "accuracy", "recall"] ``` - -4. 训练成员推理攻击模型,并给出评估结果。 + +4. 训练成员推理模型,并给出评估结果。 ```python - attacker = MembershipInference(model) # Get attack model. + inference = MembershipInference(model) # Get inference model. - attacker.train(train_train, train_test, config) # Train attack model. + inference.train(train_train, train_test, config) # Train inference model. msg = "Membership inference model training completed." LOGGER.info(TAG, msg) - result = attacker.eval(eval_train, eval_test, metrics) # Eval metrics. + result = inference.eval(eval_train, eval_test, metrics) # Eval metrics. count = len(config) for i in range(count): print("Method: {}, {}".format(config[i]["method"], result[i])) ``` 5. 实验结果。 + 执行如下指令,开始成员推理训练和评估: + + ``` + python membership_inference_example.py --data_path ./cifar-100-binary/ --pre_trained ./VGG16-100_781.ckpt + ``` 成员推理的指标如下所示,各数值均保留至小数点后四位。 - 以第一行结果为例:在使用lr(逻辑回归分类)进行成员推理时,推理的准确率(accuracy)为0.7132,推理精确率(precision)为0.6596,正类样本召回率为0.8810。在二分类任务下,指标表明我们的成员推理是有效的。 + 以第一行结果为例:在使用lr(逻辑回归分类)进行成员推理时,推理的准确率(accuracy)为0.7132,推理精确率(precision)为0.6596,正类样本召回率为0.8810,说明lr有71.32%的概率能正确分辨一个数据样本是否属于目标模型的训练数据集。在二分类任务下,指标表明成员推理是有效的,即该模型存在隐私泄露的风险。 ``` Method: lr, {'recall': 0.8810,'precision': 0.6596,'accuracy': 0.7132} diff --git a/tutorials/source_zh_cn/advanced_use/mindinsight_commands.md b/tutorials/source_zh_cn/advanced_use/mindinsight_commands.md index 0383711471a70801b5d341f5bd3556ee1b5557cb..957ab8624f81c2d82969b958ab541b77f6c355a1 100644 --- a/tutorials/source_zh_cn/advanced_use/mindinsight_commands.md +++ b/tutorials/source_zh_cn/advanced_use/mindinsight_commands.md @@ -30,10 +30,12 @@ mindinsight --version ## 启动服务 ```shell -mindinsight start [-h] [--config ] [--workspace ] - [--port ] [--url-path-prefix ] - [--reload-interval ] - [--summary-base-dir ] +mindinsight start [-h] [--config {CONFIG}] [--workspace {WORKSPACE}] + [--port {PORT}] [--url-path-prefix {URL_PATH_PREFIX}] + [--reload-interval {RELOAD_INTERVAL}] + [--summary-base-dir {SUMMARY_BASE_DIR}] + [--enable-debugger {ENABLE_DEBUGGER}] + [--debugger-port {DEBUGGER_PORT}] ``` 参数含义如下: @@ -41,12 +43,14 @@ mindinsight start [-h] [--config ] [--workspace ] |参数名|属性|功能描述|参数类型|默认值|取值范围|规则限制| |---|---|---|---|---|---|---| |`-h, --help`|可选|显示启动命令的帮助信息。|-|-|-|-| -|`--config `|可选|指定配置文件或配置模块。|String|空|-|物理文件路径(file:/path/to/config.py)或Python可识别的模块路径(python:path.to.config.module)。| -|`--workspace `|可选|指定工作目录路径。|String|$HOME/mindinsight|-|-| -|`--port `|可选|指定Web可视化服务端口。|Integer|8080|1~65535|-| -|`--url-path-prefix `|可选|指定Web服务URL地址前缀。|String|空|-|URL地址前缀由斜杠(/)分隔成多个部分,各部分支持由字母/数字/下划线/连字符/点号组成的字符串,但不能是单点号(.)或双点号(..)。| -|`--reload-interval `|可选|指定加载数据的时间间隔(单位:秒)。|Integer|3|-|设置为0时表示只加载一次数据。| -|`--summary-base-dir `|可选|指定加载训练日志数据的根目录路径。|String|./|-|MindInsight将遍历此路径下的直属子目录。若某个直属子目录包含日志文件,则该子目录被识别为日志文件目录,若根目录包含日志文件,则根目录被识别为日志文件目录。| +|`--config {CONFIG}`|可选|指定配置文件或配置模块。|String|空|-|物理文件路径(file:/path/to/config.py)或Python可识别的模块路径(python:path.to.config.module)。| +|`--workspace {WORKSPACE}`|可选|指定工作目录路径。|String|$HOME/mindinsight|-|-| +|`--port {PORT}`|可选|指定Web可视化服务端口。|Integer|8080|1~65535|-| +|`--url-path-prefix {URL_PATH_PREFIX}`|可选|指定Web服务URL地址前缀。|String|空|-|URL地址前缀由斜杠(/)分隔成多个部分,各部分支持由字母/数字/下划线/连字符/点号组成的字符串,但不能是单点号(.)或双点号(..)。| +|`--reload-interval {RELOAD_INTERVAL}`|可选|指定加载数据的时间间隔(单位:秒)。|Integer|3|-|设置为0时表示只加载一次数据。| +|`--summary-base-dir {SUMMARY_BASE_DIR}`|可选|指定加载训练日志数据的根目录路径。|String|./|-|MindInsight将遍历此路径下的直属子目录。若某个直属子目录包含日志文件,则该子目录被识别为日志文件目录,若根目录包含日志文件,则根目录被识别为日志文件目录。| +|`--enable-debugger {ENABLE_DEBUGGER}`|可选|是否开启Debugger功能|Boolean|False|True/False|-| +|`--debugger-port {DEBUGGER_PORT}`|可选|指定Debugger Server服务端口。|Integer|50051|1~65535|-| > 服务启动时,命令行参数值将被保存为进程的环境变量,并以 `MINDINSIGHT_` 开头作为标识,如 `MINDINSIGHT_CONFIG`,`MINDINSIGHT_WORKSPACE`,`MINDINSIGHT_PORT` 等。 diff --git a/tutorials/source_zh_cn/advanced_use/mixed_precision.md b/tutorials/source_zh_cn/advanced_use/mixed_precision.md index 6d31c1d743c6f6038bf7407aa0428078c97ec338..1bf82b8db27fe5d51d353bdeada7008f12c23f79 100644 --- a/tutorials/source_zh_cn/advanced_use/mixed_precision.md +++ b/tutorials/source_zh_cn/advanced_use/mixed_precision.md @@ -39,14 +39,14 @@ MindSpore混合精度典型的计算流程如下图所示: ## 自动混合精度 -使用自动混合精度,需要调用相应的接口,将待训练网络和优化器作为输入传进去;该接口会将整张网络的算子转换成FP16算子(除`BatchNorm`算子和Loss涉及到的算子外)。 +使用自动混合精度,需要调用相应的接口,将待训练网络和优化器作为输入传进去;该接口会将整张网络的算子转换成FP16算子(除`BatchNorm`算子和Loss涉及到的算子外)。可以使用`amp`接口和`Model`接口两种方式实现混合精度。 -具体的实现步骤为: +使用`amp`接口具体的实现步骤为: 1. 引入MindSpore的混合精度的接口`amp`; 2. 定义网络:该步骤和普通的网络定义没有区别(无需手动配置某个算子的精度); -3. 使用`amp.build_train_network`接口封装网络模型、优化器和损失函数,在该步骤中MindSpore会将有需要的算子自动进行类型转换。 +3. 使用`amp.build_train_network`接口封装网络模型、优化器和损失函数,设置level参数,参考。在该步骤中,MindSpore会将有需要的算子自动进行类型转换。 代码样例如下: @@ -92,6 +92,77 @@ train_network = amp.build_train_network(net, optimizer, loss, level="O3", loss_s output = train_network(predict, label) ``` +使用`Model`接口具体的实现步骤为: +1. 引入MindSpore的模型训练接口`Model`; + +2. 定义网络:该步骤和普通的网络定义没有区别(无需手动配置某个算子的精度); + +3. 创建数据集。该步骤可参考 ; + +4. 使用`Model`接口封装网络模型、优化器和损失函数,设置`amp_level`参数,参考。在该步骤中,MindSpore会将有需要的算子自动进行类型转换。 + +代码样例如下: + +```python +import numpy as np +import mindspore.nn as nn +from mindspore import context +from mindspore.common.initializer import Normal +from mindspore.train import Model +from src.dataset import create_dataset + +context.set_context(mode=context.GRAPH_MODE) +context.set_context(device_target="Ascend") + +# Define network +class LeNet5(nn.Cell): + """ + Lenet network + + Args: + num_class (int): Number of classes. Default: 10. + num_channel (int): Number of channels. Default: 1. + + Returns: + Tensor, output tensor + Examples: + >>> LeNet(num_class=10) + + """ + def __init__(self, num_class=10, num_channel=1): + super(LeNet5, self).__init__() + self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') + self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') + self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02)) + self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) + self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02)) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.flatten = nn.Flatten() + + def construct(self, x): + x = self.max_pool2d(self.relu(self.conv1(x))) + x = self.max_pool2d(self.relu(self.conv2(x))) + x = self.flatten(x) + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return x + +# create dataset +ds_train = create_dataset("/dataset/train", 32) + +# Initialize network +network = LeNet5(10) + +# Define Loss and Optimizer +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") +net_opt = nn.Momentum(network.trainable_params(),learning_rate=0.01, momentum=0.9) +model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O3") + +# Run training +model.train(epoch=10, train_dataset=ds_train) +``` ## 手动混合精度 diff --git a/tutorials/source_zh_cn/advanced_use/mobilenetv2_incremental_learning.md b/tutorials/source_zh_cn/advanced_use/mobilenetv2_incremental_learning.md index 38225518302fccb98afeb3861039024d12cb53ac..d846f387c3764724f11e5cb14998776eaf64433b 100644 --- a/tutorials/source_zh_cn/advanced_use/mobilenetv2_incremental_learning.md +++ b/tutorials/source_zh_cn/advanced_use/mobilenetv2_incremental_learning.md @@ -1,27 +1,27 @@ # MobileNetV2 增量学习 -`Linux` `CPU` `Ascend` `GPU` `模型开发` `中级` `高级` +`Linux` `Windows` `CPU` `Ascend` `GPU` `模型开发` `中级` `高级` - [MobileNetV2 增量学习](#mobilenetv2-增量学习) - - [概述](#概述) - - [任务描述及准备](#任务描述及准备) - - [环境配置](#环境配置) - - [下载代码](#下载代码) - - [准备预训练模型](#准备预训练模型) - - [准备数据](#准备数据) - - [预训练模型加载代码详解](#预训练模型加载代码详解) - - [参数简介](#参数简介) - - [运行Python文件](#运行python文件) - - [运行Shell脚本](#运行shell脚本) - - [加载增量学习训练](#加载增量学习训练) - - [CPU加载训练](#cpu加载训练) - - [GPU加载训练](#gpu加载训练) - - [Ascend加载训练](#ascend加载训练) - - [增量学习训练结果](#增量学习训练结果) - - [验证增量学习训练模型](#验证增量学习训练模型) - - [验证模型](#验证模型) - - [验证结果](#验证结果) + - [概述](#概述) + - [任务描述及准备](#任务描述及准备) + - [环境配置](#环境配置) + - [下载代码](#下载代码) + - [准备预训练模型](#准备预训练模型) + - [准备数据](#准备数据) + - [预训练模型加载代码详解](#预训练模型加载代码详解) + - [参数简介](#参数简介) + - [运行Python文件](#运行python文件) + - [运行Shell脚本](#运行shell脚本) + - [加载增量学习训练](#加载增量学习训练) + - [CPU加载训练](#cpu加载训练) + - [GPU加载训练](#gpu加载训练) + - [Ascend加载训练](#ascend加载训练) + - [增量学习训练结果](#增量学习训练结果) + - [验证增量学习训练模型](#验证增量学习训练模型) + - [验证模型](#验证模型) + - [验证结果](#验证结果) @@ -33,7 +33,7 @@ MindSpore是一个多元化的机器学习框架。既可以在手机等端侧和PC等设备上运行,也可以在云上的服务器集群上运行。目前MobileNetV2支持在Windows系统中使用单核CPU做增量学习,在EulerOS、Ubuntu系统中使用单个或者多个Ascend AI处理器或GPU中做增量学习,本教程将会介绍如何在不同系统与处理器下的MindSpore框架中做增量学习的训练与验证。 -目前,Window上暂只支持支持CPU,Ubuntu与EulerOS上支持CPU、GPU与Ascend AI处理器三种处理器。 +目前,Window上暂只支持支持CPU,Ubuntu与EulerOS上支持CPU、GPU与Ascend AI处理器三种处理器。 >你可以在这里找到完整可运行的样例代码: @@ -43,6 +43,8 @@ MindSpore是一个多元化的机器学习框架。既可以在手机等端侧 若在本地环境运行,需要安装MindSpore框架,配置CPU、GPU或Ascend AI处理器。若在华为云环境上运行,不需要安装MindSpore框架,不需要配置Ascend AI处理器、CPU与GPU,可以跳过本小节。 +Windows操作系统中使用`\`,Linux操作系统中使用`/`分割路径地址中不同层级目录,下文中默认使用`/`,若用户使用Windows操作系统,路径地址中`/`需自行更改为`\`。 + 1. 安装MindSpore框架 在EulerOS、Ubuntu或者Windows等系统上需要根据系统和处理器架构[安装对应版本MindSpore框架](https://www.mindspore.cn/install)。 @@ -120,7 +122,7 @@ cd ./mindspore/model_zoo/official/cv/mobilenetv2 代码结构如下: -``` +```bash ├─MobileNetV2 ├─README.md # descriptions about MobileNetV2 ├─scripts @@ -132,6 +134,7 @@ cd ./mindspore/model_zoo/official/cv/mobilenetv2 │ launch.py # start Python script │ lr_generator.py # learning rate config │ mobilenetV2.py # MobileNetV2 architecture + │ mobilenetV2_fusion.py # MobileNetV2 fusion architecture │ models.py # net utils to load ckpt_file, define_net... │ utils.py # net utils to switch precision, set_context and so on ├─train.py # training script @@ -144,13 +147,22 @@ cd ./mindspore/model_zoo/official/cv/mobilenetv2 ### 准备预训练模型 -[下载预训练模型](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ckpt)到以下目录: -`./pretrain_checkpoint/[pretrain_checkpoint_file]` +用户需要根据不同处理器种类[下载CPU/GPU预训练模型](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2_cpu_gpu.ckpt)或[下载Ascend预训练模型](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2_ascend.ckpt)到以下目录: +`./pretrain_checkpoint/` -```Python -mkdir pretrain_checkpoint -wget -P ./pretrain_checkpoint https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ckpt -``` +- CPU/GPU 处理器 + + ```bash + mkdir pretrain_checkpoint + wget -P ./pretrain_checkpoint https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2_cpu_gpu.ckpt + ``` + +- Ascend AI处理器 + + ```bash + mkdir pretrain_checkpoint + wget -P ./pretrain_checkpoint https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2_ascend.ckpt + ``` ### 准备数据 @@ -158,7 +170,7 @@ wget -P ./pretrain_checkpoint https://download.mindspore.cn/model_zoo/official/l 数据集结构如下: -``` +```bash └─ImageFolder ├─train │ class1Folder @@ -203,17 +215,20 @@ wget -P ./pretrain_checkpoint https://download.mindspore.cn/model_zoo/official/l 24: param.requires_grad = False ``` -## 参数简介 +## 参数简介 + +每个参数需要用户根据自己本地的处理器类型、数据地址与预训练模型地址等修改为相应的值。 ### 运行Python文件 + 在Windows与Linux系统上训练时,运行`train.py`时需要传入`dataset_path`、`platform`、`train_method`与`pretrain_ckpt`四个参数。验证时,运行`eval.py`并且传入`dataset_path`、`platform`、`pretrain_ckpt`与`head_ckpt`四个参数。 -```Shell +```bash # Windows/Linux train with Python file -python train.py --dataset_path [dataset_path] --platform [platform] --pretrain_ckpt [pretrain_checkpoint_path] --train_method[("train", "fine_tune", "incremental_learn")] +python train.py --platform [PLATFORM] --dataset_path [DATASET_PATH] --train_method[("train", "fine_tune", "incremental_learn")] --pretrain_ckpt [PRETRAIN_CHECKPOINT_PATH] # Windows/Linux eval with Python file -python eval.py --dataset_path [dataset_path] --platform [platform] --pretrain_ckpt [pretrain_checkpoint_path] --head_ckpt [head_ckpt_path] +python eval.py --platform [PLATFORM] --dataset_path [DATASET_PATH] --pretrain_ckpt [PRETRAIN_CHECKPOINT_PATH] --head_ckpt [HEAD_CHECKPOINT_PATH] ``` - `--dataset_path`:训练与验证数据集地址,无默认值,用户训练/验证时必须输入。 @@ -222,13 +237,13 @@ python eval.py --dataset_path [dataset_path] --platform [platform] --pretrain_ck - `--pretrain_ckpt`:增量训练或调优时,需要传入pretrain_checkpoint文件路径以加载预训练好的模型参数权重。 - `--head_ckpt`:增量训练模型验证时,需要传入head_net预训练模型路径以加载预训练好的模型参数权重。 - ### 运行Shell脚本 + 在Linux系统上时,可以选择运行Shell脚本文件`./scripts/run_train.sh`与`./scripts/run_eval.sh`。运行时需要在交互界面中同时传入参数。 -```Shell +```bash # Windows doesn't support Shell -# Linux train with Shell script +# Linux train with Shell script sh run_train.sh [PLATFORM] [DEVICE_NUM] [VISIABLE_DEVICES(0,1,2,3,4,5,6,7)] [RANK_TABLE_FILE] [DATASET_PATH] [TRAIN_METHOD] [CKPT_PATH] # Linux eval with Shell script for incremental learn @@ -249,7 +264,7 @@ sh run_eval.sh [PLATFORM] [DATASET_PATH] [PRETRAIN_CKPT_PATH] [HEAD_CKPT_PATH] Windows系统上,MobileNetV2做增量学习训练时,只能运行`train.py`。Linux系统上,使用MobileNetV2做增量学习训练时,可以选择运行`run_train.sh`, 并在运行Shell脚本文件时传入[参数](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/mobilenetv2_incremental_learning.html#id8)。 -Windows系统输出信息到交互式命令行,Linux系统环境下运行`run_train.sh`时,命令行结尾使用`&> [log_file_path]`将标准输出与错误输出写入log文件。 增量学习成功开始训练,`./train/device*/log*.log`中会持续写入每一个epoch的训练时间与Loss等信息。若未成功,上述log文件会写入失败报错信息。 +Windows系统输出信息到交互式命令行,Linux系统环境下运行`run_train.sh`时,命令行结尾使用`&> [log_file_path]`将标准输出与错误输出写入log文件。 增量学习成功开始训练,`./train/rank*/log*.log`中会持续写入每一个epoch的训练时间与Loss等信息。若未成功,上述log文件会写入失败报错信息。 ### CPU加载训练 @@ -261,16 +276,16 @@ Windows系统输出信息到交互式命令行,Linux系统环境下运行`run_ 使用样例1:通过Python文件调用1个CPU处理器。 - ```Shell + ```bash # Windows or Linux with Python - python train.py --platform CPU --dataset_path /store/dataset/OpenImage/train/ -- train_method incremental_learn --pretrain_ckpt ./pretrain_checkpoint/mobilenetV2.ckpt + python train.py --platform CPU --dataset_path [TRAIN_DATASET_PATH] -- train_method incremental_learn --pretrain_ckpt ./pretrain_checkpoint/mobilenetv2.ckpt ``` 使用样例2:通过Shell文件调用1个CPU处理器。 - ```Shell + ```bash # Linux with Shell - sh run_train.sh CPU /store/dataset/OpenImage/train/ incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt + sh run_train.sh CPU [TRAIN_DATASET_PATH] incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt ``` ### GPU加载训练 @@ -283,23 +298,23 @@ Windows系统输出信息到交互式命令行,Linux系统环境下运行`run_ - 使用样例1:通过Python文件调用1个GPU处理器。 - ```Shell + ```bash # Windows or Linux with Python - python train.py --platform GPU --dataset_path /store/dataset/OpenImage/train/ --pretrain_ckpt ./pretrain_checkpoint/mobilenetV2.ckpt --train_method incremental_learn + python train.py --platform GPU --dataset_path [TRAIN_DATASET_PATH] --train_method incremental_learn --pretrain_ckpt ./pretrain_checkpoint/mobilenetV2.ckpt ``` - 使用样例2:通过Shell脚本调用1个GPU处理器,设备ID为`“0”`。 - ```Shell + ```bash # Linux with Shell - sh run_train.sh GPU 1 0 /store/dataset/OpenImage/train/ incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt + sh run_train.sh GPU 1 0 [TRAIN_DATASET_PATH] incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt ``` - 使用样例3:通过Shell脚本调用8个GPU处理器,设备ID为`“0,1,2,3,4,5,6,7”`。 - ```Shell + ```bash # Linux with Shell - sh run_train.sh GPU 8 0,1,2,3,4,5,6,7 /store/dataset/OpenImage/train/ incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt + sh run_train.sh GPU 8 0,1,2,3,4,5,6,7 [TRAIN_DATASET_PATH] incremental_learn ../pretrain_checkpoint/mobilenetv2.ckpt ``` ### Ascend加载训练 @@ -312,23 +327,23 @@ Windows系统输出信息到交互式命令行,Linux系统环境下运行`run_ - 使用样例1:通过Python文件调用1个Ascend处理器。 - ```Shell + ```bash # Windows or Linux with Python - python train.py --platform Ascend --dataset_path /store/dataset/OpenImage/train/ --train_method incremental_learn --pretrain_ckpt ./pretrain_checkpoint/mobilenetV2.ckpt + python train.py --platform Ascend --dataset_path [TRAIN_DATASET_PATH] --train_method incremental_learn --pretrain_ckpt ./pretrain_checkpoint mobilenetv2.ckpt ``` - 使用样例2:通过Shell脚本调用1个Ascend AI处理器,设备ID为“0”。 - ```Shell + ```bash # Linux with Shell - sh run_train.sh Ascend 1 0 ~/rank_table.json /store/dataset/OpenImage/train/ incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt + sh run_train.sh Ascend 1 0 ~/rank_table.json [TRAIN_DATASET_PATH] incremental_learn ../pretrain_checkpoint/mobilenetv2.ckpt ``` - 使用样例3:通过Shell脚本调用8个Ascend AI处理器,设备ID为”0,1,2,3,4,5,6,7“。 - ```Shell + ```bash # Linux with Shell - sh run_train.sh Ascend 8 0,1,2,3,4,5,6,7 ~/rank_table.json /store/dataset/OpenImage/train/ incremental_learn ../pretrain_checkpoint/mobilenetV2.ckpt + sh run_train.sh Ascend 8 0,1,2,3,4,5,6,7 ~/rank_table.json [TRAIN_DATASET_PATH] incremental_learn ../pretrain_checkpoint/mobilenetv2.ckpt ``` ### 增量学习训练结果 @@ -337,23 +352,23 @@ Windows系统输出信息到交互式命令行,Linux系统环境下运行`run_ - 运行Python文件时在交互式命令行中查看打印信息,`Linux`上运行Shell脚本运行后使用`cat ./train/device0/log0.log`中查看打印信息,输出结果如下: - ``` - train args: Namespace(dataset_path='.\\dataset\\train', platform='CPU', \ - pretrain_ckpt='.\\pretrain_checkpoint\\mobilenetV2.ckpt', train_method='incremental_learn') + ```bash + train args: Namespace(dataset_path='./dataset/train', platform='CPU', \ + pretrain_ckpt='./pretrain_checkpoint/mobilenetv2.ckpt', train_method='incremental_learn') cfg: {'num_classes': 26, 'image_height': 224, 'image_width': 224, 'batch_size': 150, \ - 'epoch_size': 15, 'warmup_epochs': 0, 'lr_max': 0.03, 'lr_end': 0.03, 'momentum': 0.9, \ + 'epoch_size': 200, 'warmup_epochs': 0, 'lr_max': 0.03, 'lr_end': 0.03, 'momentum': 0.9, \ 'weight_decay': 4e-05, 'label_smooth': 0.1, 'loss_scale': 1024, 'save_checkpoint': True, \ 'save_checkpoint_epochs': 1, 'keep_checkpoint_max': 20, 'save_checkpoint_path': './checkpoint', \ 'platform': 'CPU'} Processing batch: 16: 100%|███████████████████████████████████████████ █████████████████████| 16/16 [00:00 @@ -8,6 +8,7 @@ - [概述](#概述) - [安装](#安装) - [用法](#用法) + - [使用场景](#使用场景) - [使用示例](#使用示例) - [基于AST的脚本转换示例](#基于AST的脚本转换示例) - [基于图结构的脚本生成示例](#基于图结构的脚本生成示例) @@ -34,35 +35,41 @@ MindConverter是一款将PyTorch模型脚本转换至MindSpore的脚本迁移工 MindConverter提供命令行(Command-line interface, CLI)的使用方式,命令如下。 ```bash -mindconverter [-h] [--version] --in_file IN_FILE [--output OUTPUT] - [--report REPORT] +usage: mindconverter [-h] [--version] [--in_file IN_FILE] + [--model_file MODEL_FILE] [--shape SHAPE] + [--output OUTPUT] [--report REPORT] + [--project_path PROJECT_PATH] optional arguments: - -h, --help Show this help message and exit. - --version Show program's version number and exit. - --in_file IN_FILE Specify path for script file to use AST schema to - do script conversation. - --model_file MODEL_FILE PyTorch .pth model file path to use graph - based schema to do script generation. When - `--in_file` and `--model_path` are both provided, - use AST schema as default. - --shape SHAPE Optional, excepted input tensor shape of - `--model_file`. It's required when use graph based - schema. - --output OUTPUT Optional, specify path for converted script file - directory. Default is output directory in the - current working directory. - --report REPORT Optional, specify report directory. Default is - the current working directorys. - --project_path PROJECT Optional, pytorch scripts project path. If pytorch - project is not in PYTHONPATH, please assign - `--project_path` when use graph based schema. + -h, --help show this help message and exit + --version show program version number and exit + --in_file IN_FILE Specify path for script file to use AST schema to do + script conversation. + --model_file MODEL_FILE + PyTorch .pth model file path to use graph based schema + to do script generation. When `--in_file` and + `--model_file` are both provided, use AST schema as + default. + --shape SHAPE Optional, excepted input tensor shape of + `--model_file`. It is required when use graph based + schema. Usage: --shape 3,244,244 + --output OUTPUT Optional, specify path for converted script file + directory. Default output directory is `output` folder + in the current working directory. + --report REPORT Optional, specify report directory. Default is + converted script directory. + --project_path PROJECT_PATH + Optional, PyTorch scripts project path. If PyTorch + project is not in PYTHONPATH, please assign + `--project_path` when use graph based schema. Usage: + --project_path ~/script_file/ + ``` -MindConverter提供两种模型脚本迁移方案: +**MindConverter提供两种模型脚本迁移方案:** -1. 基于抽象语法树(Abstract syntax tree, AST)的脚本转换:指定`--in_file`的值,将使用基于AST的脚本转换方案; -2. 基于图结构的脚本生成:指定`--model_file`与`--shape`将使用基于图结构的脚本生成方案。 +1. **基于抽象语法树(Abstract syntax tree, AST)的脚本转换**:指定`--in_file`的值,将使用基于AST的脚本转换方案; +2. **基于图结构的脚本生成**:指定`--model_file`与`--shape`将使用基于图结构的脚本生成方案。 > 若同时指定了`--in_file`,`--model_file`将默认使用AST方案进行脚本迁移。 @@ -70,8 +77,10 @@ MindConverter提供两种模型脚本迁移方案: 其中,`--output`与`--report`参数可省略。若省略,MindConverter将在当前工作目录(Working directory)下自动创建`output`目录,将生成的脚本、转换报告输出至该目录。 -另外,当使用基于图结构的脚本生成方案时,请确保原PyTorch项目已在PYTHONPATH中,可通过CLI进入Python交互式命令行,通过import的方式判断是否已满足;若未加入,可通过`--project_path`命令手动将项目路径传入,以确保MindConverter可引用到原PyTorch脚本。 +另外,当使用基于图结构的脚本生成方案时,请确保原PyTorch项目已在Python包搜索路径中,可通过CLI进入Python交互式命令行,通过import的方式判断是否已满足;若未加入,可通过`--project_path`命令手动将项目路径传入,以确保MindConverter可引用到原PyTorch脚本。 + +> 假设用户项目目录为`/home/user/project/model_training`,用户可通过如下命令手动项目添加至包搜索路径中:`export PYTHONPATH=/home/user/project/model_training:$PYTHONPATH` > 此处MindConverter需要引用原PyTorch脚本,是因为PyTorch模型反向序列化过程中会引用原脚本。 @@ -86,6 +95,12 @@ MindConverter提供两种技术方案,以应对不同脚本迁移场景: 对于上述第二种场景,推荐用户使用基于图结构的脚本生成方案,计算图作为一种标准的模型描述语言,可以消除用户代码风格多样导致的脚本转换率不稳定的问题。在已支持算子的情况下,该方案可提供优于AST方案的转换率。 +目前已基于典型图像分类网络(Resnet, VGG)对图结构的脚本转换方案进行测试。 + +> 1. 基于图结构的脚本生成方案,目前仅支持单输入、单输出模型,对于多输入模型暂不支持; +> 2. 基于图结构的脚本生成方案,由于要基于推理模式加载PyTorch模型,会导致转换后网络中Dropout算子丢失,需要用户手动补齐; +> 3. 基于图结构的脚本生成方案持续优化中。 + ## 使用示例 @@ -94,7 +109,9 @@ MindConverter提供两种技术方案,以应对不同脚本迁移场景: 若用户希望使用基于AST的方案进行脚本迁移,假设原PyTorch脚本路径为`/home/user/model.py`,希望将脚本输出至`/home/user/output`,转换报告输出至`/home/user/output/report`,则脚本转换命令为: ```bash -mindconverter --in_file /home/user/model.py --output /home/user/output --report /home/user/output/report +mindconverter --in_file /home/user/model.py \ + --output /home/user/output \ + --report /home/user/output/report ``` 转换报告中,对于未转换的代码行形式为如下,其中x, y指明的是原PyTorch脚本中代码的行、列号。对于未成功转换的算子,可参考[MindSporeAPI映射查询功能](https://www.mindspore.cn/docs/zh-CN/master/index.html#operator_api) 手动对代码进行迁移。对于工具无法迁移的算子,会保留原脚本中的代码。 @@ -114,7 +131,7 @@ line x:y: [UnConvert] 'operator' didn't convert. ... [Convert Over] ``` -对于部分未成功转换的算子,报告中会提供修改建议,如`line 157:23`,会建议将`torch.nn.AdaptiveAvgPool2d`替换为`mindspore.ops.operations.ReduceMean`。 +对于部分未成功转换的算子,报告中会提供修改建议,如`line 157:23`,MindConverter建议将`torch.nn.AdaptiveAvgPool2d`替换为`mindspore.ops.operations.ReduceMean`。 ### 基于图结构的脚本生成示例 @@ -122,7 +139,10 @@ line x:y: [UnConvert] 'operator' didn't convert. ... 若用户已将PyTorch模型保存为.pth格式,假设模型绝对路径为`/home/user/model.pth`,该模型期望的输入样本shape为(3, 224, 224),原PyTorch脚本位于`/home/user/project/model_training`,希望将脚本输出至`/home/user/output`,转换报告输出至`/home/user/output/report`,则脚本生成命令为: ```bash -mindconverter --model_file /home/user/model.pth --shape 3,224,224 --output /home/user/output --report /home/user/output/report --project_path /home/user/project/model_training +mindconverter --model_file /home/user/model.pth --shape 3,224,224 \ + --output /home/user/output \ + --report /home/user/output/report \ + --project_path /home/user/project/model_training ``` 执行该命令,MindSpore代码文件、转换报告生成至相应目录。 @@ -131,13 +151,53 @@ mindconverter --model_file /home/user/model.pth --shape 3,224,224 --output /home 基于图结构的脚本生成方案产生的转换报告格式与AST方案相同。然而,由于基于图结构方案属于生成式方法,转换过程中未参考原PyTorch脚本,因此生成的转换报告中涉及的代码行、列号均指生成后脚本。 -另外对于未成功转换的算子,在代码中会相应的标识该节点输入、输出Tensor的shape(以input_shape, output_shape标识),便于用户手动修改。 +另外对于未成功转换的算子,在代码中会相应的标识该节点输入、输出Tensor的shape(以`input_shape`, `output_shape`标识),便于用户手动修改。以Reshape算子为例(暂不支持Reshape),将生成如下代码: + +```python +class Classifier(nn.Cell): + + def __init__(self): + super(Classifier, self).__init__() + ... + self.reshape = onnx.Reshape(input_shape=(1, 1280, 1, 1), + output_shape=(1, 1280)) + ... + + def construct(self, x): + ... + # Suppose input of `reshape` is x. + reshape_output = self.reshape(x) + ... + +``` + +通过`input_shape`、`output_shape`参数,用户可以十分便捷地完成算子替换,替换结果如下: + +```python +from mindspore.ops import operations as P +... + +class Classifier(nn.Cell): + + def __init__(self): + super(Classifier, self).__init__() + ... + self.reshape = P.Reshape(input_shape=(1, 1280, 1, 1), + output_shape=(1, 1280)) + ... + + def construct(self, x): + ... + # Suppose input of `reshape` is x. + reshape_output = self.reshape(x, (1, 1280)) + ... + +``` > 注意:其中`--output`与`--report`参数可省略,若省略,该命令将在当前工作目录(Working directory)下自动创建`output`目录,将生成的脚本、转换报告输出至该目录。 ## 注意事项 -1. PyTorch不作为MindInsight明确声明的依赖库,但若想使用基于图结构的脚本生成工具,需要用户手动安装与生成PyTorch模型版本一致的PyTorch库; -2. MindConverter目前仅支持单输入模型,对于多输入模型暂不支持; -3. 脚本转换工具本质上为算子驱动,对于MindConverter未维护的PyTorch或ONNX算子与MindSpore算子映射,将会出现相应的算子无法转换的问题,对于该类算子,用户可手动修改,或基于MindConverter实现映射关系,向MindInsight仓库贡献。 \ No newline at end of file +1. PyTorch不作为MindInsight明确声明的依赖库。若想使用基于图结构的脚本生成工具,需要用户手动安装与生成PyTorch模型版本一致的PyTorch库(MindConverter推荐使用PyTorch 1.4.0或PyTorch 1.6.0进行脚本生成); +2. 脚本转换工具本质上为算子驱动,对于MindConverter未维护的PyTorch或ONNX算子与MindSpore算子映射,将会出现相应的算子无法转换的问题,对于该类算子,用户可手动修改,或基于MindConverter实现映射关系,向MindInsight仓库贡献。 diff --git a/tutorials/source_zh_cn/advanced_use/model_security.md b/tutorials/source_zh_cn/advanced_use/model_security.md index c5b97a4bae0c2216f655d1fc30739bf22804ce34..efcbe431a580bed18d1e9dffd93549b375d0266d 100644 --- a/tutorials/source_zh_cn/advanced_use/model_security.md +++ b/tutorials/source_zh_cn/advanced_use/model_security.md @@ -50,7 +50,7 @@ from mindspore import dataset as ds import mindspore.common.dtype as mstype import mindspore.dataset.vision.c_transforms as CV import mindspore.dataset.transforms.c_transforms as C -from mindspore.dataset.vision.import Inter +from mindspore.dataset.vision import Inter import mindspore.nn as nn from mindspore.nn import SoftmaxCrossEntropyWithLogits from mindspore.common.initializer import TruncatedNormal @@ -59,9 +59,9 @@ from mindspore import Tensor from mindspore import context from mindspore.train.callback import LossMonitor -from mindarmour.attacks.gradient_method import FastGradientSignMethod +from mindarmour.adv_robustness.attacks import FastGradientSignMethod from mindarmour.utils.logger import LogUtil -from mindarmour.evaluations.attack_evaluation import AttackEvaluate +from mindarmour.adv_robustness.evaluations import AttackEvaluate context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -178,7 +178,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, 2. 训练LeNet模型。利用上面定义的数据加载函数`generate_mnist_dataset`载入数据。 ```python - mnist_path = "./MNIST_unzip/" + mnist_path = "./MNIST/" batch_size = 32 # train original model ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), @@ -198,8 +198,8 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, inputs = [] labels = [] for data in ds_test.create_tuple_iterator(): - inputs.append(data[0].astype(np.float32)) - labels.append(data[1]) + inputs.append(data[0].asnumpy().astype(np.float32)) + labels.append(data[1].asnumpy()) test_inputs = np.concatenate(inputs) test_labels = np.concatenate(labels) ``` @@ -296,7 +296,7 @@ NaturalAdversarialDefense(NAD)是一种简单有效的对抗样本防御方 调用MindArmour提供的NAD防御接口(NaturalAdversarialDefense)。 ```python -from mindarmour.defenses import NaturalAdversarialDefense +from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense # defense diff --git a/tutorials/source_zh_cn/advanced_use/network_migration.md b/tutorials/source_zh_cn/advanced_use/network_migration.md index 663c24616ea67b38cdb546b29f276a077da6f9c1..a1d48cb6c9ff4073b0b2b0bd91d60571921184ae 100644 --- a/tutorials/source_zh_cn/advanced_use/network_migration.md +++ b/tutorials/source_zh_cn/advanced_use/network_migration.md @@ -67,7 +67,7 @@ MindSpore与TensorFlow、PyTorch在网络结构组织方式上,存在一定差 2. 加载数据集和预处理。 - 使用MindSpore构造你需要使用的数据集。目前MindSpore已支持常见数据集,你可以通过原始格式、`MindRecord`、`TFRecord`等多种接口调用,同时还支持数据处理以及数据增强等相关功能,具体用法可参考[准备数据教程](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_preparation.html)。 + 使用MindSpore构造你需要使用的数据集。目前MindSpore已支持常见数据集,你可以通过原始格式、`MindRecord`、`TFRecord`等多种接口调用,同时还支持数据处理以及数据增强等相关功能,具体用法可参考[准备数据教程](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation.html)。 本例中加载了Cifar-10数据集,可同时支持单卡和多卡的场景。 @@ -269,6 +269,6 @@ MindSpore与TensorFlow、PyTorch在网络结构组织方式上,存在一定差 ## 样例参考 -1. [常用数据集读取样例](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/loading_the_datasets.html) +1. [常用数据集读取样例](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html) 2. [Model Zoo](https://gitee.com/mindspore/mindspore/tree/master/model_zoo) diff --git a/tutorials/source_zh_cn/advanced_use/optimize_the_performance_of_data_preparation.md b/tutorials/source_zh_cn/advanced_use/optimize_the_performance_of_data_preparation.md index 83125c64b7c61030d38724ac266df4ab31ac5747..ab7a90055056ec65f20122a0192e549ecf2a23f4 100644 --- a/tutorials/source_zh_cn/advanced_use/optimize_the_performance_of_data_preparation.md +++ b/tutorials/source_zh_cn/advanced_use/optimize_the_performance_of_data_preparation.md @@ -1,50 +1,32 @@ # 优化数据准备的性能 -`Linux` `Ascend` `GPU` `CPU` `数据准备` `初级` `中级` `高级` +`Linux` `Ascend` `GPU` `CPU` `数据准备` `中级` `高级` - [优化数据准备的性能](#优化数据准备的性能) - [概述](#概述) - - [整体流程](#整体流程) - [准备环节](#准备环节) - - [导入模块](#导入模块) - - [下载所需数据集](#下载所需数据集) - [数据加载性能优化](#数据加载性能优化) - - [性能优化方案](#性能优化方案) - - [代码示例](#代码示例) - [shuffle性能优化](#shuffle性能优化) - - [性能优化方案](#性能优化方案-1) - - [代码示例](#代码示例-1) - [数据增强性能优化](#数据增强性能优化) - - [性能优化方案](#性能优化方案-2) - - [代码示例](#代码示例-2) + - [操作系统性能优化](#操作系统性能优化) - [性能优化方案总结](#性能优化方案总结) - - [多线程优化方案](#多线程优化方案) - - [多进程优化方案](#多进程优化方案) - - [Compose优化方案](#compose优化方案) - - [算子融合优化方案](#算子融合优化方案)    - ## 概述 -数据是整个深度学习中最重要的一环,因为数据的好坏决定了最终结果的上限,模型的好坏只是去无限逼近这个上限,所以高质量的数据输入,会在整个深度神经网络中起到积极作用,数据在整个数据处理和数据增强的过程像经过pipeline管道的水一样,源源不断地流向训练系统,如图所示: +数据是整个深度学习中最重要的一环,数据的好坏决定了最终结果的上限,模型的训练只是去无限逼近这个上限。所以高质量的数据输入,会在整个深度神经网络中起到积极作用。数据在整个数据处理和数据增强的过程中像经过pipeline管道的水一样,源源不断地流向训练系统。 ![title](./images/pipeline.png) -MindSpore为用户提供了数据处理以及数据增强的功能,在数据的整个pipeline过程中,其中的每一步骤,如果都能够进行合理的运用,那么数据的性能会得到很大的优化和提升。本次体验将基于CIFAR-10数据集来为大家展示如何在数据加载、数据处理和数据增强的过程中进行性能的优化。 +MindSpore为用户提供了数据处理和数据增强的功能,在整个pipeline过程中的每一步骤,如果都能够进行合理的运用,那么数据的性能会得到很大的优化和提升。下面将基于CIFAR-10数据集来为大家展示如何在数据加载、数据处理和数据增强的过程中进行性能的优化。 -## 整体流程 -- 准备环节。 -- 数据加载性能优化。 -- shuffle性能优化。 -- 数据增强性能优化。 -- 性能优化方案总结。 +此外,操作系统的存储、架构和计算资源也会一定程度上影响数据处理的性能。 ## 准备环节 @@ -52,14 +34,12 @@ MindSpore为用户提供了数据处理以及数据增强的功能,在数据 `dataset`模块提供API用来加载和处理数据集。 - ```python import mindspore.dataset as ds ``` `numpy`模块用于生成ndarray数组。 - ```python import numpy as np ``` @@ -73,26 +53,27 @@ import numpy as np 目录结构如下所示: - - dataset/Cifar10Data - ├── cifar-10-batches-bin - │   ├── batches.meta.txt - │   ├── data_batch_1.bin - │   ├── data_batch_2.bin - │   ├── data_batch_3.bin - │   ├── data_batch_4.bin - │   ├── data_batch_5.bin - │   ├── readme.html - │   └── test_batch.bin - └── cifar-10-batches-py - ├── batches.meta - ├── data_batch_1 - ├── data_batch_2 - ├── data_batch_3 - ├── data_batch_4 - ├── data_batch_5 - ├── readme.html - └── test_batch +``` +dataset/Cifar10Data +├── cifar-10-batches-bin +│   ├── batches.meta.txt +│   ├── data_batch_1.bin +│   ├── data_batch_2.bin +│   ├── data_batch_3.bin +│   ├── data_batch_4.bin +│   ├── data_batch_5.bin +│   ├── readme.html +│   └── test_batch.bin +└── cifar-10-batches-py + ├── batches.meta + ├── data_batch_1 + ├── data_batch_2 + ├── data_batch_3 + ├── data_batch_4 + ├── data_batch_5 + ├── readme.html + └── test_batch +``` 其中: - `cifar-10-batches-bin`目录为CIFAR-10二进制格式数据集目录。 @@ -100,7 +81,7 @@ import numpy as np ## 数据加载性能优化 -MindSpore为用户提供了多种数据加载方式,其中包括常用数据集加载、用户自定义数据集加载、MindSpore数据格式加载,详情内容请参考[加载数据集](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/loading_the_datasets.html)。对于数据集加载,底层实现方式的不同,会导致数据集加载的性能存在差异,如下所示: +MindSpore为用户提供了多种数据加载方式,其中包括常用数据集加载、用户自定义数据集加载、MindSpore数据格式加载,详情内容请参考[数据集加载](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html)。对于数据集加载,底层实现方式的不同,会导致数据集加载的性能存在差异,如下所示: | | 常用数据集 | 用户自定义 | MindRecord | | :----: | :----: | :----: | :----: | @@ -112,9 +93,9 @@ MindSpore为用户提供了多种数据加载方式,其中包括常用数据 ![title](./images/data_loading_performance_scheme.png) 数据加载性能优化建议如下: -- 已经支持的数据集格式优选内置加载算子,具体内容请参考[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](#id16)。 -- 不支持的数据集格式,优选转换为MindSpore数据格式后再使用`MindDataset`类进行加载,具体内容请参考[将数据集转换为MindSpore数据格式](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/converting_datasets.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](#id16)。 -- 不支持的数据集格式,算法快速验证场景,优选用户自定义`GeneratorDataset`类实现,如果性能仍无法满足需求,则可采取多进程并发方案,请参考本文[多进程优化方案](#id17)。 +- 已经支持的数据集格式优选内置加载算子,具体内容请参考[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id16)。 +- 不支持的数据集格式,优选转换为MindSpore数据格式后再使用`MindDataset`类进行加载,具体内容请参考[MindSpore数据格式转换](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_conversion.html),如果性能仍无法满足需求,则可采取多线程并发方案,请参考本文[多线程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id16)。 +- 不支持的数据集格式,算法快速验证场景,优选用户自定义`GeneratorDataset`类实现,如果性能仍无法满足需求,则可采取多进程并发方案,请参考本文[多进程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id17)。 ### 代码示例 @@ -122,19 +103,20 @@ MindSpore为用户提供了多种数据加载方式,其中包括常用数据 1. 使用内置算子`Cifar10Dataset`类加载CIFAR-10数据集,这里使用的是CIFAR-10二进制格式的数据集,加载数据时采取多线程优化方案,开启了4个线程并发完成任务,最后对数据创建了字典迭代器,并通过迭代器读取了一条数据记录。 - ```python cifar10_path = "./dataset/Cifar10Data/cifar-10-batches-bin/" # create Cifar10Dataset for reading data - cifar10_dataset = ds.Cifar10Dataset(cifar10_path,num_parallel_workers=4) + cifar10_dataset = ds.Cifar10Dataset(cifar10_path, num_parallel_workers=4) # create a dictionary iterator and read a data record through the iterator print(next(cifar10_dataset.create_dict_iterator())) ``` 输出: + ``` - {'image': array([[[235, 235, 235], + {'image': Tensor(shape=[32, 32, 3], dtype=UInt8, value= + [[[235, 235, 235], [230, 230, 230], [234, 234, 234], ..., @@ -144,55 +126,55 @@ MindSpore为用户提供了多种数据加载方式,其中包括常用数据 ..., [120, 120, 119], [146, 146, 146], - [177, 174, 190]]], dtype=uint8), 'label': array(9, dtype=uint32)} + [177, 174, 190]]]), 'label': Tensor(shape=[], dtype=UInt32, value= 9)} ``` 2. 使用`Cifar10ToMR`这个类将CIFAR-10数据集转换为MindSpore数据格式,这里使用的是CIFAR-10 python文件格式的数据集,然后使用`MindDataset`类加载MindSpore数据格式数据集,加载数据采取多线程优化方案,开启了4个线程并发完成任务,最后对数据创建了字典迭代器,并通过迭代器读取了一条数据记录。 - ```python from mindspore.mindrecord import Cifar10ToMR cifar10_path = './dataset/Cifar10Data/cifar-10-batches-py/' cifar10_mindrecord_path = './transform/cifar10.record' - cifar10_transformer = Cifar10ToMR(cifar10_path,cifar10_mindrecord_path) + cifar10_transformer = Cifar10ToMR(cifar10_path, cifar10_mindrecord_path) # executes transformation from Cifar10 to MindRecord cifar10_transformer.transform(['label']) # create MindDataset for reading data - cifar10_mind_dataset = ds.MindDataset(dataset_file=cifar10_mindrecord_path,num_parallel_workers=4) + cifar10_mind_dataset = ds.MindDataset(dataset_file=cifar10_mindrecord_path, num_parallel_workers=4) # create a dictionary iterator and read a data record through the iterator print(next(cifar10_mind_dataset.create_dict_iterator())) ``` 输出: + ``` - {'data': array([255, 216, 255, ..., 63, 255, 217], dtype=uint8), 'id': array(30474, dtype=int64), 'label': array(2, dtype=int64)} + {'data': Tensor(shape=[1431], dtype=UInt8, value= [255, 216, 255, ..., 63, 255, 217]), 'id': Tensor(shape=[], dtype=Int64, value= 30474), 'label': Tensor(shape=[], dtype=Int64, value= 2)} ``` 3. 使用`GeneratorDataset`类加载自定义数据集,并且采取多进程优化方案,开启了4个进程并发完成任务,最后对数据创建了字典迭代器,并通过迭代器读取了一条数据记录。 - ```python def generator_func(num): for i in range(num): yield (np.array([i]),) # create GeneratorDataset for reading data - dataset = ds.GeneratorDataset(source=generator_func(5),column_names=["data"],num_parallel_workers=4) + dataset = ds.GeneratorDataset(source=generator_func(5), column_names=["data"], num_parallel_workers=4) # create a dictionary iterator and read a data record through the iterator print(next(dataset.create_dict_iterator())) ``` 输出: + ``` - {'data': array([0], dtype=int64)} + {'data': Tensor(shape=[1], dtype=Int64, value= [0])} ``` ## shuffle性能优化 -shuffle操作主要是对有序的数据集或者进行过repeat的数据集进行混洗,MindSpore专门为用户提供了`shuffle`函数,其中设定的`buffer_size`参数越大,混洗程度越大,但时间、计算资源消耗也会大。该接口支持用户在整个pipeline的任何时候都可以对数据进行混洗,具体内容请参考[shuffle处理](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_processing_and_augmentation.html#shuffle)。但是因为底层的实现方式不同,该方式的性能不如直接在[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)中设置`shuffle`参数直接对数据进行混洗。 +shuffle操作主要是对有序的数据集或者进行过repeat的数据集进行混洗,MindSpore专门为用户提供了`shuffle`函数,其中设定的`buffer_size`参数越大,混洗程度越大,但时间、计算资源消耗也会大。该接口支持用户在整个pipeline的任何时候都可以对数据进行混洗,具体内容请参考[shuffle处理](https://www.mindspore.cn/api/zh-CN/master/programming_guide/pipeline.html#shuffle)。但是因为底层的实现方式不同,该方式的性能不如直接在[内置加载算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.html)中设置`shuffle`参数直接对数据进行混洗。 ### 性能优化方案 @@ -200,7 +182,7 @@ shuffle操作主要是对有序的数据集或者进行过repeat的数据集进 shuffle性能优化建议如下: - 直接使用内置加载算子的`shuffle`参数进行数据的混洗。 -- 如果使用的是`shuffle`函数,当性能仍无法满足需求,可通过调大`buffer_size`参数的值来优化提升性能。 +- 如果使用的是`shuffle`函数,当性能仍无法满足需求,可通过调整`buffer_size`参数的值来优化提升性能。 ### 代码示例 @@ -208,19 +190,20 @@ shuffle性能优化建议如下: 1. 使用内置算子`Cifar10Dataset`类加载CIFAR-10数据集,这里使用的是CIFAR-10二进制格式的数据集,并且设置`shuffle`参数为True来进行数据混洗,最后对数据创建了字典迭代器,并通过迭代器读取了一条数据记录。 - ```python cifar10_path = "./dataset/Cifar10Data/cifar-10-batches-bin/" # create Cifar10Dataset for reading data - cifar10_dataset = ds.Cifar10Dataset(cifar10_path,shuffle=True) + cifar10_dataset = ds.Cifar10Dataset(cifar10_path, shuffle=True) # create a dictionary iterator and read a data record through the iterator print(next(cifar10_dataset.create_dict_iterator())) ``` 输出: + ``` - {'image': array([[[254, 254, 254], + {'image': Tensor(shape=[32, 32, 3], dtype=UInt8, value= + [[[254, 254, 254], [255, 255, 254], [255, 255, 254], ..., @@ -230,18 +213,17 @@ shuffle性能优化建议如下: ..., [ 64, 61, 63], [ 63, 58, 60], - [ 61, 56, 58]]], dtype=uint8), 'label': array(9, dtype=uint32)} + [ 61, 56, 58]]]), 'label': Tensor(shape=[], dtype=UInt32, value= 9)} ``` 2. 使用`shuffle`函数进行数据混洗,参数`buffer_size`设置为3,数据采用`GeneratorDataset`类自定义生成。 - ```python def generator_func(): for i in range(5): - yield (np.array([i,i+1,i+2,i+3,i+4]),) + yield (np.array([i, i+1, i+2, i+3, i+4]),) - ds1 = ds.GeneratorDataset(source=generator_func,column_names=["data"]) + ds1 = ds.GeneratorDataset(source=generator_func, column_names=["data"]) print("before shuffle:") for data in ds1.create_dict_iterator(): print(data["data"]) @@ -253,6 +235,7 @@ shuffle性能优化建议如下: ``` 输出: + ``` before shuffle: [0 1 2 3 4] @@ -275,7 +258,7 @@ shuffle性能优化建议如下: - 使用内置Python算子(`py_transforms`模块)进行数据增强。 - 用户可根据自己的需求,自定义Python函数进行数据增强。 -具体的内容请参考[数据增强](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_processing_and_augmentation.html#id3)。因为底层的实现方式不同,所以性能还是有一定的差异,如下所示: +具体的内容请参考[数据增强](https://www.mindspore.cn/api/zh-CN/master/programming_guide/augmentation.html)。因为底层的实现方式不同,所以性能还是有一定的差异,如下所示: | 模块 | 底层接口 | 说明 | | :----: | :----: | :----: | @@ -289,10 +272,10 @@ shuffle性能优化建议如下: 数据增强性能优化建议如下: -- 优先使用`c_transforms`模块进行数据增强,因为性能最高,如果性能仍无法满足需求,可采取[多线程优化方案](#id16)、[Compose优化方案](#compose)或者[算子融合优化方案](#id18)。 -- 如果使用了`py_transforms`模块进行数据增强,当性能仍无法满足需求,可采取[多线程优化方案](#id16)、[多进程优化方案](#id17)、[Compose优化方案](#compose)或者[算子融合优化方案](#id18)。 +- 优先使用`c_transforms`模块进行数据增强,因为性能最高,如果性能仍无法满足需求,可采取[多线程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id16)、[Compose优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#compose)或者[算子融合优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id18)。 +- 如果使用了`py_transforms`模块进行数据增强,当性能仍无法满足需求,可采取[多线程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id16)、[多进程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id17)、[Compose优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#compose)或者[算子融合优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id18)。 - `c_transforms`模块是在C++内维护buffer管理,`py_transforms`模块是在Python内维护buffer管理。因为Python和C++切换的性能成本,建议不要混用算子。 -- 如果用户使用了自定义Python函数进行数据增强,当性能仍无法满足需求,可采取[多线程优化方案](#id16)或者[多进程优化方案](#id17),如果还是无法提升性能,就需要对自定义的Python代码进行优化。 +- 如果用户使用了自定义Python函数进行数据增强,当性能仍无法满足需求,可采取[多线程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id16)或者[多进程优化方案](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/optimize_the_performance_of_data_preparation.html#id17),如果还是无法提升性能,就需要对自定义的Python代码进行优化。 ### 代码示例 @@ -300,7 +283,6 @@ shuffle性能优化建议如下: 1. 使用`c_transforms`模块进行数据增强,数据增强时采用多线程优化方案,开启了4个线程并发完成任务,并且采用了算子融合优化方案,使用`RandomResizedCrop`融合类替代`RandomResize`类和`RandomCrop`类。 - ```python import mindspore.dataset.transforms.c_transforms as c_transforms import mindspore.dataset.vision.c_transforms as C @@ -308,13 +290,13 @@ shuffle性能优化建议如下: cifar10_path = "./dataset/Cifar10Data/cifar-10-batches-bin/" # create Cifar10Dataset for reading data - cifar10_dataset = ds.Cifar10Dataset(cifar10_path,num_parallel_workers=4) + cifar10_dataset = ds.Cifar10Dataset(cifar10_path, num_parallel_workers=4) transforms = C.RandomResizedCrop((800,800)) # apply the transform to the dataset through dataset.map() cifar10_dataset = cifar10_dataset.map(operations=transforms, input_columns="image", num_parallel_workers=4) data = next(cifar10_dataset.create_dict_iterator()) - plt.imshow(data["image"]) + plt.imshow(data["image"].asnumpy()) plt.show() ``` @@ -325,25 +307,25 @@ shuffle性能优化建议如下: 2. 使用自定义Python函数进行数据增强,数据增强时采用多进程优化方案,开启了4个进程并发完成任务。 - ```python def generator_func(): for i in range(5): - yield (np.array([i,i+1,i+2,i+3,i+4]),) + yield (np.array([i, i+1, i+2, i+3, i+4]),) - ds3 = ds.GeneratorDataset(source=generator_func,column_names=["data"]) + ds3 = ds.GeneratorDataset(source=generator_func, column_names=["data"]) print("before map:") for data in ds3.create_dict_iterator(): print(data["data"]) func = lambda x:x**2 - ds4 = ds3.map(operations=func, input_columns="data", python_multiprocessing=True,num_parallel_workers=4) + ds4 = ds3.map(operations=func, input_columns="data", python_multiprocessing=True, num_parallel_workers=4) print("after map:") for data in ds4.create_dict_iterator(): print(data["data"]) ``` 输出: + ``` before map: [0 1 2 3 4] @@ -359,6 +341,50 @@ shuffle性能优化建议如下: [16 25 36 49 64] ``` +## 操作系统性能优化 + +由于数据处理是在host端进行,那么机器或者操作系统本身的一些配置会对数据处理存在影响,主要有存储、NUMA架构、CPU(计算资源)几个方面。 + +1. 存储 + + 当数据集较大时,推荐使用固态硬盘对数据进行存储,能够减少存储I/O对数据处理的影响。 + + > 一般地,当数据集被加载之后,就会缓存在操作系统的page cache中,在一定程度上降低了存储开销,加快了后续epoch的数据读取。 + +2. NUMA架构 + + 非一致性内存架构(Non-uniform Memory Architecture)是为了解决传统的对称多处理(Symmetric Multi-processor)系统中的可扩展性问题而诞生的。NUMA系统拥有多条内存总线,于是将几个处理器通过内存总线与一块内存相连构成一个组,这样整个庞大的系统就可以被分为若干个组,这个组的概念在NUMA系统中被称为节点(node)。处于该节点中的内存被称为本地内存(local memory),处于其他节点中的内存对于该组而言被称为外部内存(foreign memory)。因此每个节点访问本地内存和访问其他节点的外部内存的延迟是不相同的,在数据处理的过程中需要尽可能避免这一情况的发生。一般我们可以使用以下命令进行进程与node节点的绑定: + + ```shell + numactl --cpubind=0 --membind=0 python train.py + ``` + + 上述例子表示将此次运行的`train.py`的进程绑定到`numa node` 0上。 + +3. CPU(计算资源) + + CPU对于数据处理的影响主要是计算资源的分配和CPU频率的设置两个方面。 + + - 计算资源的分配 + + 当我们进行分布式训练时,一台设备机器上会启动多个训练进程,而这些训练进程会通过操作系统本身的策略进行计算资源的分配与抢占,当进程较多时,可能会由于计算资源的竞争而导致数据处理性能的下降,因此这时需要进行人工分配计算资源,避免各个进程的计算资源竞争。 + + ```shell + numactl --cpubind=0 python train.py + or + taskset -c 0-15 python train.py + ``` + + > `numactl`的方式较为粗粒度,直接指定`numa node id`,而`taskset`的方式是细粒度的,它能够直接指定`numa node`上的`cpu core`,其中0-15表示的`core id`从0到15。 + + - CPU频率设置 + + 要想充分发挥host端CPU的最大算力,CPU频率的设置至关重要。一般地,linux内核支持调节CPU主频,降低功耗,已到达节能的效果。通过选择系统空闲状态不同的电源管理策略,可以实现不同程度降低服务器功耗。但是,更低的功耗策略意味着CPU唤醒更慢对性能影响更大。因此如果发现CPU模式为conservative或者powersave,可以使用cpupower设置CPU Performance模式,对数据处理的性能提升有非常大的效果。 + + ```shell + cpupower frequency-set -g performance + ``` + ## 性能优化方案总结 ### 多线程优化方案 @@ -387,3 +413,10 @@ Map算子可以接收Tensor算子列表,并将按照顺序应用所有的这 提供某些融合算子,这些算子将两个或多个算子的功能聚合到一个算子中。具体内容请参考[数据增强算子](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.dataset.vision.html),与它们各自组件的流水线相比,这种融合算子提供了更好的性能。如图所示: ![title](./images/operator_fusion.png) + +### 操作系统优化方案 + +- 使用固态硬盘进行数据存储。 +- 将进程与node节点绑定。 +- 人工分配更多的计算资源。 +- 提高CPU运算频率。 diff --git a/tutorials/source_zh_cn/advanced_use/parameter_server_training.md b/tutorials/source_zh_cn/advanced_use/parameter_server_training.md index 934e6bbeb260a2d07acd56512db016e0342382b1..7871066cad3ddc2e9fbf2a7b48e619e56506d36c 100644 --- a/tutorials/source_zh_cn/advanced_use/parameter_server_training.md +++ b/tutorials/source_zh_cn/advanced_use/parameter_server_training.md @@ -25,7 +25,7 @@ Parameter Server(参数服务器)是分布式训练中一种广泛使用的架 - Server:保存模型的权重和反向计算的梯度值,并使用优化器通过Worker上传的梯度值对模型进行更新。 -- Worker:执行网络的正反向计算,正向计算的梯度值通过Push接口上传至Server中,通过Pull接口把Server更新好的模型下载到Worker本地。 +- Worker:执行网络的正反向计算,反向计算的梯度值通过Push接口上传至Server中,通过Pull接口把Server更新好的模型下载到Worker本地。 - Scheduler:用于建立Server和Worker的通信关系。 @@ -40,21 +40,21 @@ Parameter Server(参数服务器)是分布式训练中一种广泛使用的架 1. 首先调用`mindspore.context.set_ps_context(enable_ps=True)`开启Parameter Server训练模式. -- 此接口需在`mindspore.communication.management.init()`之前调用。 -- 若没有调用此接口,下面的[环境变量设置](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/parameter_server_training.html#id5)则不会生效。 -- 调用`mindspore.context.reset_ps_context()`可以关闭Parameter Server训练模式。 + - 此接口需在`mindspore.communication.management.init()`之前调用。 + - 若没有调用此接口,下面的[环境变量设置](https://www.mindspore.cn/tutorial/zh-CN/master/advanced_use/parameter_server_training.html#id5)则不会生效。 + - 调用`mindspore.context.reset_ps_context()`可以关闭Parameter Server训练模式。 2. 在本训练模式下,有以下两种调用接口方式以控制训练参数是否通过Parameter Server进行更新: -- 通过`mindspore.nn.Cell.set_param_ps()`对`nn.Cell`中所有权重递归设置。 -- 通过`mindspore.common.Parameter.set_param_ps()`对此权重进行设置。 + - 通过`mindspore.nn.Cell.set_param_ps()`对`nn.Cell`中所有权重递归设置。 + - 通过`mindspore.common.Parameter.set_param_ps()`对此权重进行设置。 3. 在[原训练脚本](https://gitee.com/mindspore/mindspore/blob/master/model_zoo/official/cv/lenet/train.py)基础上,设置LeNet模型所有权重通过Parameter Server训练: -```python -context.set_ps_context(enable_ps=True) -network = LeNet5(cfg.num_classes) -network.set_param_ps() -``` + ```python + context.set_ps_context(enable_ps=True) + network = LeNet5(cfg.num_classes) + network.set_param_ps() + ``` ### 环境变量设置 diff --git a/tutorials/source_zh_cn/advanced_use/performance_profiling.md b/tutorials/source_zh_cn/advanced_use/performance_profiling.md index c94106f8f77840d3be27cc14dfba85d918f6bd92..7d6791da9d01db802654417e9d74473cbc47b537 100644 --- a/tutorials/source_zh_cn/advanced_use/performance_profiling.md +++ b/tutorials/source_zh_cn/advanced_use/performance_profiling.md @@ -30,7 +30,7 @@ - 在训练列表找到对应训练,点击性能分析,即可在页面中查看训练性能数据。 ## 环境准备 -在使用性能分析工具之前,要确保后台工具进程(ada)正确启动,要求用户使用root启动ada进程,启动命令为:`/usr/local/Ascend/driver/tools/ada`。 +在使用性能分析工具之前,要确保后台工具进程(ada)正确启动,要求用户使用HwHiAiUser用户组的用户或root启动ada进程,并使用同用户跑训练脚本 root启动ada进程,启动命令为:`/usr/local/Ascend/driver/tools/ada`。 ## 准备训练脚本 @@ -194,6 +194,6 @@ Timeline主要包含如下几个部分: > 如何控制step数目请参考数据准备教程: > - > + > - Timeline数据的解析比较耗时,且一般几个step的数据即足够分析出结果。出于数据解析和UI展示性能的考虑,Profiler最多展示20M数据(对大型网络20M可以显示10+条step的信息)。 diff --git a/tutorials/source_zh_cn/advanced_use/performance_profiling_gpu.md b/tutorials/source_zh_cn/advanced_use/performance_profiling_gpu.md index 68a7ef86cb87e5c2e7fee8d6178850b34a168718..dbe2a38043fc8be919e477b8f4f44dfdb60c5c77 100644 --- a/tutorials/source_zh_cn/advanced_use/performance_profiling_gpu.md +++ b/tutorials/source_zh_cn/advanced_use/performance_profiling_gpu.md @@ -26,6 +26,10 @@ > > +> 普通用户默认情况下无权访问目标设备上的NVIDIA GPU性能计数器。如果普通用户需要在训练脚本中使用profiler性能统计能力,则需参考以下网址的说明进行权限配置。 +> +> + ## 准备训练脚本 为了收集神经网络的性能数据,需要在训练脚本中添加MindSpore Profiler相关接口。 @@ -79,8 +83,9 @@ class StopAtStep(Callback): 图1:性能数据总览 -图1展示了性能数据总览页面,包含了迭代轨迹(Step Trace)、算子性能、MindData性能和Timeline等组件的数据总体呈现。目前GPU场景下只支持算子性能统计功能: +图1展示了性能数据总览页面,包含了迭代轨迹(Step Trace)、算子性能、MindData性能和Timeline等组件的数据总体呈现: - 算子性能:统计单算子以及各算子类型的执行时间,进行排序展示;总览页中展示了各算子类型平均执行时间占比的饼状图。 +- Timeline:统计了算子以及CUDA activity,在时间轴排列展示;总览页展示了Timeline中执行情况汇总。 用户可以点击查看详情链接,进入组件页面进行详细分析。 diff --git a/tutorials/source_zh_cn/advanced_use/quantization_aware.md b/tutorials/source_zh_cn/advanced_use/quantization_aware.md index 4d72f7f6a71f0d47d3f4ff79d7585fb3a0d99f1b..d8ba1412f560384da7c4da147ff51b962f623cbf 100644 --- a/tutorials/source_zh_cn/advanced_use/quantization_aware.md +++ b/tutorials/source_zh_cn/advanced_use/quantization_aware.md @@ -60,21 +60,19 @@ MindSpore的感知量化训练是在训练基础上,使用低精度数据替 感知量化训练模型与一般训练步骤一致,在定义网络和最后生成模型阶段后,需要进行额外的操作,完整流程如下: 1. 数据处理加载数据集。 -2. 定义网络。 -3. 定义融合网络。在完成定义网络后,替换指定的算子,完成融合网络的定义。 +2. 定义原始非量化网络。 +3. 定义融合网络。在完成定义原始非量化网络后,替换指定的算子,完成融合网络的定义。 4. 定义优化器和损失函数。 -5. 进行模型训练。基于融合网络训练生成融合模型。 -6. 转化量化网络。基于融合网络训练后得到的融合模型,使用转化接口在融合模型中插入伪量化节点,生成的量化网络。 -7. 进行量化训练。基于量化网络训练,生成量化模型。 +5. 转化量化网络。基于融合网络,使用转化接口在融合网络中插入伪量化节点,生成量化网络。 +6. 进行量化训练。基于量化网络训练,生成量化模型。 -在上面流程中,第3、6、7步是感知量化训练区别普通训练需要额外进行的步骤。 +在上面流程中,第3、5、6步是感知量化训练区别普通训练需要额外进行的步骤。 > - 融合网络:使用指定算子(`nn.Conv2dBnAct`、`nn.DenseBnAct`)替换后的网络。 -> - 融合模型:使用融合网络训练生成的checkpoint格式的模型。 > - 量化网络:融合模型使用转换接口(`convert_quant_network`)插入伪量化节点后得到的网络。 > - 量化模型:量化网络训练后得到的checkpoint格式的模型。 -接下来,以LeNet网络为例,展开叙述3、6两个步骤。 +接下来,以LeNet网络为例,展开叙述2、3两个步骤。 > 你可以在这里找到完整可运行的样例代码: 。 @@ -132,8 +130,8 @@ class LeNet5(nn.Cell): super(LeNet5, self).__init__() self.num_class = num_class - self.conv1 = nn.Conv2dBnAct(1, 6, kernel_size=5, batchnorm=True, activation='relu') - self.conv2 = nn.Conv2dBnAct(6, 16, kernel_size=5, batchnorm=True, activation='relu') + self.conv1 = nn.Conv2dBnAct(1, 6, kernel_size=5, activation='relu') + self.conv2 = nn.Conv2dBnAct(6, 16, kernel_size=5, activation='relu') self.fc1 = nn.DenseBnAct(16 * 5 * 5, 120, activation='relu') self.fc2 = nn.DenseBnAct(120, 84, activation='relu') @@ -155,9 +153,9 @@ class LeNet5(nn.Cell): 使用`convert_quant_network`接口自动在融合模型中插入伪量化节点,将融合模型转化为量化网络。 ```python -from mindspore.train.quant import quant as qat +from mindspore.train.quant import quant -net = qat.convert_quant_network(net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) +net = quant.convert_quant_network(network, quant_delay=900, bn_fold=False, per_channel=[True, False], symmetric=[False, False]) ``` ## 重训和推理 @@ -167,16 +165,16 @@ net = qat.convert_quant_network(net, quant_delay=0, bn_fold=False, freeze_bn=100 上面介绍了从零开始进行感知量化训练。更常见情况是已有一个模型文件,希望生成量化模型,这时已有正常网络模型训练得到的模型文件及训练脚本,进行感知量化训练。这里使用checkpoint文件重新训练的功能,详细步骤为: 1. 数据处理加载数据集。 - 2. 定义网络。 - 3. 定义融合网络。 - 4. 定义优化器和损失函数。 - 5. 加载模型文件模型重训。加载已有模型文件,基于融合网络重新训练生成融合模型。详细模型重载训练,请参见 - 6. 转化量化网络。 - 7. 进行量化训练。 + 2. 定义原始非量化网络。 + 3. 训练原始网络生成非量化模型。 + 4. 定义融合网络。 + 5. 定义优化器和损失函数。 + 6. 基于融合网络转化生成量化网络。 + 7. 加载模型文件重训。加载已有非量化模型文件,基于量化网络重新训练生成量化模型。详细模型重载训练,请参见。 ### 进行推理 -使用量化模型进行推理,与普通模型推理一致,分为直接checkpoint文件推理及转化为通用模型格式(ONNX、AIR等)进行推理。 +使用量化模型进行推理,与普通模型推理一致,分为直接checkpoint文件推理及转化为通用模型格式(AIR、MINDIR等)进行推理。 > 推理详细说明请参见。 diff --git a/tutorials/source_zh_cn/advanced_use/second_order_optimizer_for_resnet50_application.md b/tutorials/source_zh_cn/advanced_use/second_order_optimizer_for_resnet50_application.md index b63c08ab952bac78d5067b479dbd5a3aea66ad01..048c5ab101b2e8cbedf42c026b66d5b34d6e730c 100644 --- a/tutorials/source_zh_cn/advanced_use/second_order_optimizer_for_resnet50_application.md +++ b/tutorials/source_zh_cn/advanced_use/second_order_optimizer_for_resnet50_application.md @@ -9,16 +9,24 @@ - [准备环节](#准备环节) - [准备数据集](#准备数据集) - [配置分布式环境变量](#配置分布式环境变量) + - [Ascend 910](#ascend-910) + - [GPU](#gpu) - [加载处理数据集](#加载处理数据集) - [定义网络](#定义网络) - [定义损失函数及THOR优化器](#定义损失函数及thor优化器) - [定义损失函数](#定义损失函数) - [定义优化器](#定义优化器) - [训练网络](#训练网络) - - [配置模型保存](#配置模型保存) + - [配置模型保存](#配置模型保存) - [配置训练网络](#配置训练网络) - - [运行脚本](#运行脚本) + - [运行脚本](#运行脚本) + - [Ascend 910](#ascend-910-1) + - [GPU](#gpu-1) - [模型推理](#模型推理) + - [定义推理网络](#定义推理网络) + - [执行推理](#执行推理) + - [Ascend 910](#ascend-910-2) + - [GPU](#gpu-2)    @@ -34,7 +42,7 @@ MindSpore开发团队在现有的自然梯度算法的基础上,对FIM矩阵 > 你可以在这里下载完整的示例代码: 。 -### 示例代码目录结构 +示例代码目录结构 ```shell ├── resnet_thor @@ -80,12 +88,12 @@ MindSpore开发团队在现有的自然梯度算法的基础上,对FIM矩阵 ``` └─ImageNet2012 ├─ilsvrc - │ n03676483/ - │ n04067472/ - │ n01622779/ + │ n03676483 + │ n04067472 + │ n01622779 │ ...... └─ilsvrc_eval - │ n03018349/ + │ n03018349 │ n02504013 │ n07871810 │ ...... @@ -155,7 +163,7 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target=" return ds ``` -> MindSpore支持进行多种数据处理和增强的操作,各种操作往往组合使用,具体可以参考[数据处理与数据增强](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_processing_and_augmentation.html)章节。 +> MindSpore支持进行多种数据处理和增强的操作,各种操作往往组合使用,具体可以参考[数据处理与数据增强](https://www.mindspore.cn/api/zh-CN/master/programming_guide/data_pipeline.html)章节。 ## 定义网络 @@ -172,7 +180,7 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target=" ... from src.resnet_thor import resnet50 ... -f __name__ == "__main__": +if __name__ == "__main__": ... # define the net net = resnet50(class_num=config.class_num, damping=damping, loss_scale=config.loss_scale, @@ -331,7 +339,7 @@ epoch: 2 step: 5004, loss is 3.740064 epoch: 3 step: 5004, loss is 4.0546017 epoch: 4 step: 5004, loss is 3.7598825 epoch: 5 step: 5004, loss is 3.3744206 -...... +... epoch: 40 step: 5004, loss is 1.6907625 epoch: 41 step: 5004, loss is 1.8217756 epoch: 42 step: 5004, loss is 1.6453942 @@ -346,6 +354,7 @@ epoch: 42 step: 5004, loss is 1.6453942 ├─resnet-2_5004.ckpt │ ...... ├─resnet-42_5004.ckpt + │ ...... ``` 其中, @@ -370,7 +379,7 @@ epoch: 2 step: 5004, loss is 4.0819564 epoch: 3 step: 5004, loss is 3.7005644 epoch: 4 step: 5004, loss is 3.2668946 epoch: 5 step: 5004, loss is 3.023509 -...... +... epoch: 36 step: 5004, loss is 1.645802 ... ``` @@ -384,12 +393,14 @@ epoch: 36 step: 5004, loss is 1.645802 ├─resnet-2_5004.ckpt │ ...... ├─resnet-36_5004.ckpt - ...... + │ ...... + ...... ├─ckpt_7 ├─resnet-1_5004.ckpt ├─resnet-2_5004.ckpt │ ...... ├─resnet-36_5004.ckpt + │ ...... ``` diff --git a/tutorials/source_zh_cn/advanced_use/summary_record.md b/tutorials/source_zh_cn/advanced_use/summary_record.md index 65913760f539fa01f49aca6b54babb8dd29a1955..096638cb03834c63ab88070f4bab2476fdbe7bc0 100644 --- a/tutorials/source_zh_cn/advanced_use/summary_record.md +++ b/tutorials/source_zh_cn/advanced_use/summary_record.md @@ -11,7 +11,7 @@ - [方式一:通过SummaryCollector自动收集](#方式一通过summarycollector自动收集) - [方式二:结合Summary算子和SummaryCollector,自定义收集网络中的数据](#方式二结合summary算子和summarycollector自定义收集网络中的数据) - [方式三:自定义Callback记录数据](#方式三自定义callback记录数据) - - [运行MindInsight](#运行MindInsight) + - [运行MindInsight](#运行mindinsight) - [注意事项](#注意事项) @@ -129,10 +129,10 @@ model.eval(ds_eval, callbacks=[summary_collector]) MindSpore除了提供 `SummaryCollector` 能够自动收集一些常见数据,还提供了Summary算子,支持在网络中自定义收集其他的数据,比如每一个卷积层的输入,或在损失函数中的损失值等。 当前支持的Summary算子: -- [ScalarSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=scalarsummary#mindspore.ops.operations.ScalarSummary): 记录标量数据 -- [TensorSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=tensorsummary#mindspore.ops.operations.TensorSummary): 记录张量数据 -- [ImageSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=imagesummary#mindspore.ops.operations.ImageSummary): 记录图片数据 -- [HistogramSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html?highlight=histogramsummar#mindspore.ops.operations.HistogramSummary): 将张量数据转为直方图数据记录 +- [ScalarSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=scalarsummary#mindspore.ops.ScalarSummary): 记录标量数据 +- [TensorSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=tensorsummary#mindspore.ops.TensorSummary): 记录张量数据 +- [ImageSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=imagesummary#mindspore.ops.ImageSummary): 记录图片数据 +- [HistogramSummary](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.html?highlight=histogramsummar#mindspore.ops.HistogramSummary): 将张量数据转为直方图数据记录 记录方式如下面的步骤所示。 @@ -372,4 +372,6 @@ mindinsight stop model.train(epoch=2, train_dataset, callbacks=[confusion_callback, summary_collector]) ``` -3. 每个summary日志文件目录中,应该只放置一次训练的数据。一个summary日志目录中如果存放了多次训练的summary数据,MindInsight在可视化数据时会将这些训练的summary数据进行叠加展示,可能会与预期可视化效果不相符。 \ No newline at end of file +3. 每个summary日志文件目录中,应该只放置一次训练的数据。一个summary日志目录中如果存放了多次训练的summary数据,MindInsight在可视化数据时会将这些训练的summary数据进行叠加展示,可能会与预期可视化效果不相符。 + +4. 当前 `SummaryCollector` 和 `SummaryRecord` 不支持GPU多卡运行的场景。 \ No newline at end of file diff --git a/tutorials/source_zh_cn/advanced_use/visualization_tutorials.rst b/tutorials/source_zh_cn/advanced_use/visualization_tutorials.rst index aeebef69432ad889e5d45fa62f5be3eeb10cad97..65f7cb553c2d1f604c2e5435b58e84cbdf85a091 100644 --- a/tutorials/source_zh_cn/advanced_use/visualization_tutorials.rst +++ b/tutorials/source_zh_cn/advanced_use/visualization_tutorials.rst @@ -9,4 +9,5 @@ lineage_and_scalars_comparision performance_profiling performance_profiling_gpu + debugger mindinsight_commands diff --git a/tutorials/source_zh_cn/index.rst b/tutorials/source_zh_cn/index.rst index d382445417c30c6e4160546301131025cbdc7ee2..725993ce9a895bacdbd177039d3577f6ccbcf47a 100644 --- a/tutorials/source_zh_cn/index.rst +++ b/tutorials/source_zh_cn/index.rst @@ -11,8 +11,8 @@ MindSpore教程 :maxdepth: 1 :caption: 快速入门 - quick_start/linear_regression quick_start/quick_start + quick_start/linear_regression quick_start/quick_video .. toctree:: @@ -59,7 +59,6 @@ MindSpore教程 advanced_use/gradient_accumulation advanced_use/dataset_conversion advanced_use/auto_augmentation - advanced_use/auto_data_acceleration .. toctree:: :glob: diff --git a/tutorials/source_zh_cn/quick_start/images/linear_regression.gif b/tutorials/source_zh_cn/quick_start/images/linear_regression.gif index 41cc751e9d5d0d1205de31187bfa4e6103958795..ff616e2782ba2fecb54064ffb867d944d5b29f10 100644 Binary files a/tutorials/source_zh_cn/quick_start/images/linear_regression.gif and b/tutorials/source_zh_cn/quick_start/images/linear_regression.gif differ diff --git a/tutorials/source_zh_cn/quick_start/images/linear_regression_eval_datasets.png b/tutorials/source_zh_cn/quick_start/images/linear_regression_eval_datasets.png index 13fe0b3774b4c0e6b2b9375bc3582089f1452da7..7dc474508bf5241a038fee6d9b5c093199d93691 100644 Binary files a/tutorials/source_zh_cn/quick_start/images/linear_regression_eval_datasets.png and b/tutorials/source_zh_cn/quick_start/images/linear_regression_eval_datasets.png differ diff --git a/tutorials/source_zh_cn/quick_start/images/model_net_and_eval_datasets.png b/tutorials/source_zh_cn/quick_start/images/model_net_and_eval_datasets.png index f465ac789f03ca726f18c8694c90923741e24130..c99e0bd2155c4c42befeab1ead6820f9edf7c059 100644 Binary files a/tutorials/source_zh_cn/quick_start/images/model_net_and_eval_datasets.png and b/tutorials/source_zh_cn/quick_start/images/model_net_and_eval_datasets.png differ diff --git a/tutorials/source_zh_cn/quick_start/linear_regression.md b/tutorials/source_zh_cn/quick_start/linear_regression.md index 4d6a041baa2665e4752acb568135ae5d799bc006..69e968f03595e62038f04f54d4ca2d06d45ea78d 100644 --- a/tutorials/source_zh_cn/quick_start/linear_regression.md +++ b/tutorials/source_zh_cn/quick_start/linear_regression.md @@ -1,28 +1,24 @@ # 实现简单线性函数拟合 -`Linux` `GPU` `全流程` `初级` `中级` `高级` - -作者:[杨奕](https://github.com/helloyesterday)    编辑:[吕明赋](https://gitee.com/lvmingfu) +`Linux` `Windows` `Ascend` `CPU` `GPU` `全流程` `初级` `中级` `高级` -- [实现简单线性函数拟合](#实现简单线性函数拟合) - - [概述](#概述) - - [环境准备](#环境准备) - - [生成数据集](#生成数据集) - - [定义数据集生成函数](#定义数据集生成函数) - - [生成测试数据](#生成测试数据) +- [概述](#概述) +- [环境准备](#环境准备) +- [生成数据集](#生成数据集) + - [定义数据集生成函数](#定义数据集生成函数) + - [定义数据增强函数](#定义数据增强函数) +- [定义训练网络](#定义训练网络) +- [定义前向传播网络与反向传播网络并关联](#定义前向传播网络与反向传播网络并关联) - [定义前向传播网络](#定义前向传播网络) - - [初始化网络模型](#初始化网络模型) - - [查看初始化的网络模型](#查看初始化的网络模型) - - [定义损失函数](#定义损失函数) - - [损失函数与网络结合](#损失函数与网络结合) - [定义反向传播网络](#定义反向传播网络) - - [实现梯度函数](#实现梯度函数) - - [反向传播更新权重](#反向传播更新权重) - - [定义模型拟合过程可视化函数](#定义模型拟合过程可视化函数) - - [执行训练](#执行训练) - - [总结](#总结) + - [关联前向和反向传播网络](#关联前向和反向传播网络) +- [拟合过程可视化准备](#拟合过程可视化准备) + - [定义绘图函数](#定义绘图函数) + - [定义回调函数](#定义回调函数) +- [执行训练](#执行训练) +- [总结](#总结) @@ -30,374 +26,331 @@    + ## 概述 回归问题算法通常是利用一系列属性来预测一个值,预测的值是连续的。例如给出一套房子的一些特征数据,如面积、卧室数等等来预测房价,利用最近一周的气温变化和卫星云图来预测未来的气温情况等。如果一套房子实际价格为500万元,通过回归分析的预测值为499万元,则认为这是一个比较好的回归分析。在机器学习问题中,常见的回归分析有线性回归、多项式回归、逻辑回归等。本例子介绍线性回归算法,并通过MindSpore进行线性回归AI训练体验。 -主要流程如下: +整体流程如下: 1. 生成数据集 -2. 定义前向传播网络 -3. 定义反向传播网络 -4. 定义线性拟合过程的可视化函数 +2. 定义训练网络 +3. 定义前向传播网络与反向传播网络并关联 +4. 拟合过程可视化准备 5. 执行训练 -本次样例源代码请参考:。 +本例的源代码地址:。 ## 环境准备 -系统:Ubuntu18.04 - -MindSpore版本:GPU - 设置MindSpore运行配置 -第三方支持包:`matplotlib`,未安装此包的,可使用命令`pip install matplotlib`预先安装。 ```python from mindspore import context -context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") ``` -`PYNATIVE_MODE`:自定义调试模式。 +`GRAPH_MODE`:自定义调试模式。 + +`device_target`:设置MindSpore的训练硬件为CPU。 -`device_target`:设置MindSpore的训练硬件为GPU。 +> 本教程代码依赖`matplotlib`第三方支持包,可使用命令`pip install matplotlib`安装。 ## 生成数据集 ### 定义数据集生成函数 -`get_data`用于生成训练数据集和测试数据集。由于拟合的是线性数据,假定要拟合的目标函数为:$y=2x+3$,那么我们需要的训练数据集应随机分布于函数周边,这里采用了$y=2x+3+noise$的方式生成,其中`noise`为遵循标准正态分布规律的随机数值。 +`get_data`用于生成训练数据集和测试数据集。由于拟合的是线性数据,假定要拟合的目标函数为:$f(x)=2x+3$,那么我们需要的训练数据集应随机分布于函数周边,这里采用了$f(x)=2x+3+noise$的方式生成,其中`noise`为遵循标准正态分布规律的随机数值。 ```python import numpy as np -import mindspore as ms -from mindspore import Tensor - -def get_data(num,w=2.0, b=3.0): - np_x = np.ones([num, 1]) - np_y = np.ones([num, 1]) + +def get_data(num, w=2.0, b=3.0): for i in range(num): x = np.random.uniform(-10.0, 10.0) - np_x[i] = x noise = np.random.normal(0, 1) y = x * w + b + noise - np_y[i] = y - return Tensor(np_x,ms.float32), Tensor(np_y,ms.float32) + yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32) ``` -数据生成函数将有以下两个作用。 - -1. 生成训练数据,对模型函数进行训练。 -2. 生成验证数据,在训练结束后,对模型函数进行精度验证。 - -### 生成测试数据 - -使用数据生成函数`get_data`随机生成50组验证数据,并可视化展示。 +使用`get_data`生成50组测试数据,可视化展示。 ```python import matplotlib.pyplot as plt -eval_x, eval_label = get_data(50) -x1, y1 = eval_x.asnumpy(), eval_label.asnumpy() -plt.scatter(x1, y1, color="red", s=5) -plt.title("Eval_data") +eval_data = list(get_data(50)) +x_target_label = np.array([-10, 10, 0.1]) +y_target_label = x_target_label * 2 + 3 +x_eval_label,y_eval_label = zip(*eval_data) + +plt.scatter(x_eval_label, y_eval_label, color="red", s=5) +plt.plot(x_target_label, y_target_label, color="green") +plt.title("Eval data") plt.show() ``` 输出结果: -![png](./images/linear_regression_eval_datasets.png) +![png](./images/linear_regression_eval_datasets.png) -## 定义前向传播网络 -### 初始化网络模型 +上图中绿色线条部分为目标函数,红点部分为验证数据`eval_data`。 -使用`nn.Dense`定义了网络模型,即为线性模型, +### 定义数据增强函数 -$$y=wx+b\tag{1}$$ +先使用MindSpore的数据转换函数`GeneratorDataset`转换成适应MindSpore训练的数据类型,然后再使用`batch`、`repeat`对数据进行增强操作,操作解释如下: -其中,权重值$w$对应`weight`,$b$对应`bias`,并将其打印出来。 +- `ds.GeneratorDataset`:将生成的数据转换为MindSpore的数据集,并且将生成的数据的x,y值存入到`data`和`label`的数组中。 +- `batch`:将`batch_size`个数据组合成一个batch。 +- `repeat`:将数据集数量倍增。 ```python -from mindspore.common.initializer import TruncatedNormal -from mindspore import nn +from mindspore import dataset as ds -net = nn.Dense(1,1,TruncatedNormal(0.02),TruncatedNormal(0.02)) -print("weight:", net.weight.set_data([0][0]), "bias:", net.bias.set_data([0])) +def create_dataset(num_data, batch_size=16, repeat_size=1): + input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label']) + input_data = input_data.batch(batch_size) + input_data = input_data.repeat(repeat_size) + return input_data ``` -输出结果: - - weight: -0.00034249047 bias: -0.019308656 - +使用数据集增强函数生成训练数据,并查看训练数据的格式。 -### 查看初始化的网络模型 -我们将验证数据集和初始化的模型函数可视化。 +```python +num_data = 1600 +batch_size = 16 +repeat_size = 1 +ds_train = create_dataset(num_data, batch_size=batch_size, repeat_size=repeat_size) +print("The dataset size of ds_train:", ds_train.get_dataset_size()) +dict_datasets = ds_train.create_dict_iterator().get_next() -```python -x = np.arange(-10, 10, 0.1) -y = x * (net.weight.set_data([0][0]).asnumpy()) + (net.bias.set_data([0]).asnumpy()) -plt.scatter(x1, y1, color="red", s=5) -plt.plot(x, y, "blue") -plt.title("Eval data and net") -plt.show() +print(dict_datasets.keys()) +print("The x label value shape:", dict_datasets["data"].shape) +print("The y label value shape:", dict_datasets["label"].shape) ``` 输出结果: -![png](./images/model_net_and_eval_datasets.png) - - -红色的点:为之前生成的50组验证数据集。 - -蓝色的线:初始化的模型网络。 - -### 定义损失函数 - -我们的网络模型表达式为: - -$$h(x)=wx+b\tag{2}$$ - -一般地,数学上对线性回归模型采用均方差的方式来判断模型是否拟合得很好,即均方差的值$J(w)$值越小,函数模型便拟合得越好,验证数据代入后,预测得到的y值就越准确。公式2对应m个数据的均方差公式为: - -$$J(w)=\frac{1}{m}\sum_{i=1}^m(h(x_i)-y^{(i)})^2\tag{3}$$ - -为了方便后续的计算,我们采用0.5倍的均方差的表达式来进行计算,均方差值整体缩小至0.5倍的计算方式对判断模型拟合的好坏没有影响。 - -$$J(w)=\frac{1}{2m}\sum_{i=1}^m(h(x_i)-y^{(i)})^2\tag{4}$$ - -公式4即为网络训练中的损失函数,其中参数: + The dataset size of ds_train: 100 + dict_keys(['data', 'label']) + The x label value shape: (16, 1) + The y label value shape: (16, 1) + -- $J(w)$为均方差。 +通过定义的`create_dataset`将生成的1600个数据增强为了100组shape为16x1的数据集。 -- $m$为样本数据的数量。 +## 定义训练网络 -- $h(x_i)$为第$i$个数据的$x_i$值代入模型网络(公式2)后的预测值。 +在MindSpore中使用`nn.Dense`生成单个数据输入,单个数据输出的线性函数模型: -- $y^{(i)}$为第$i$个数据中的$y$值(label值)。 +$$f(x)=wx+b\tag{1}$$ -在MindSpore中定义损失函数的方法如下。 +并使用Normal算子随机初始化权重$w$和$b$。 ```python -from mindspore.ops import operations as P - -class MyLoss(nn.loss.loss._Loss): - def __init__(self,reduction='mean'): - super().__init__(reduction) - self.square = P.Square() - def construct(self, data, label): - x = self.square(data-label) * 0.5 - return self.get_loss(x) -``` - -其中: - -- `nn.loss.loss._Loss`:是MindSpore自定义loss算子的一个基类。 +from mindspore.common.initializer import Normal +from mindspore import nn -- `P.Square`:MindSpore训练的框架中的平方算子,算子需要注册过才能在框架的计算图中使用。 +class LinearNet(nn.Cell): + def __init__(self): + super(LinearNet, self).__init__() + self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02)) -### 损失函数与网络结合 + def construct(self, x): + x = self.fc(x) + return x +``` -接下来我们需要将loss函数的表达式和网络net关联在一起,在MindSpore中需要`nn.WithLossCell`,实现方法如下: +调用网络查看初始化的模型参数。 ```python -criterion = MyLoss() -loss_opeartion = nn.WithLossCell(net, criterion) +net = LinearNet() +model_params = net.trainable_params() +print(model_params) ``` -其中: - -- `net`:网络模型。 - -- `criterion`:即为实例化的loss函数。 - -上述从数据代入到计算出loss值的过程为AI训练中的前向传播过程。 - -## 定义反向传播网络 +输出结果: -有了损失函数后,我们如何使得损失函数最小呢?我们可以将公式1代入到损失函数公式4中展开: + [Parameter (name=fc.weight, value=Tensor(shape=[1, 1], dtype=Float32, + [[-7.35660456e-003]])), Parameter (name=fc.bias, value=Tensor(shape=[1], dtype=Float32, [-7.35660456e-003]))] + -$$J(w,b)=\frac{1}{2m}\sum_{i=1}^m(wx_i+b-y^{(i)})^2\tag{5}$$ +初始化网络模型后,接下来将初始化的网络函数和训练数据集进行可视化,了解拟合前的模型函数情况。 -公式5可以将$J(w)$看作为凹函数,对权重值$w$微分可求得: -$$\frac{\partial{J(w)}}{\partial{w}}=\frac{1}{m}\sum_{i=1}^mx_i(wx_i+b-y^{(i)})\tag{6}$$ +```python +from mindspore import Tensor +x_model_label = np.array([-10, 10, 0.1]) +y_model_label = (x_model_label * Tensor(model_params[0]).asnumpy()[0][0] + + Tensor(model_params[1]).asnumpy()[0]) -由凹函数的特性可以知道,当公式6等于0时,损失函数有最小值: +plt.scatter(x_eval_label, y_eval_label, color="red", s=5) +plt.plot(x_model_label, y_model_label, color="blue") +plt.plot(x_target_label, y_target_label, color="green") +plt.show() +``` -$$\sum_{i=1}^mx_i(wx_i+b-y^{(i)})=0\tag{7}$$ +输出结果: -假设有一个$w_{min}$使得公式7成立。我们如何将初始的权重$w_{s}$逐步的变成$w_{min}$,在这里采取迭代法,也就是梯度下降方法 -当权重$w_{s}w_{min}$,权重值需要左移即权重值变小接近$w_{min}$,才能使得损失函数逐步的变小,由凹函数的性质可知,在$w_{s}$处的导数为正(损失函数在$w_{min}$右边单调上升),公式8的值为正。其权重的更新公式为: +- $h(x_i)$为第$i$个数据的$x_i$值代入模型网络(公式1)后的预测值。 -$$w_{ud}=w_{s}-\alpha\frac{\partial{J(w_{s})}}{\partial{w}}\tag{10}$$ +- $y^{(i)}$为第$i$个数据中的$y^{(i)}$值(label值)。 +### 定义前向传播网络 -当$w_{s}=w_{min}$时,到$\frac{\partial{J(w_{s})}}{\partial{w}}$=0,即梯度消失,其表达式也可写为公式9的样式。 +前向传播网络包含两个部分,其中: -在考虑了全区间的情况后,可以得出权重$w$的更新公式即为: +1. 将参数带入到模型网络中得出预测值。 +2. 使用预测值和训练数据计算出loss值。 -$$w_{ud}=w_{s}-\alpha\frac{\partial{J(w_{s})}}{\partial{w}}\tag{11}$$ +在MindSpore中使用如下方式实现。 -当权重$w$在更新的过程中假如临近$w_{min}$在增加或者减少一个$\Delta{w}$,从左边或者右边越过了$w_{min}$,公式11都会使权重往反的方向移动,那么最终$w_{s}$的值会在$w_{min}$附近来回迭代,在实际训练中我们也是这样采用迭代的方式取得最优权重$w$,使得损失函数无限逼近局部最小值。 -同理:对于公式5中的另一个权重$b$容易得出其更新公式为: +```python +net = LinearNet() +net_loss = nn.loss.MSELoss() +``` -$$b_{ud}=b_{s}-\alpha\frac{\partial{J(b_{s})}}{\partial{b}}\tag{12}$$ +### 定义反向传播网络 +反向传播网络的目标是不断变换权重值,使得loss值取得最小值,一般的在线性网络中采用权重更新公式: -当所有的权重更新完成后,将新的权重赋值给初始权重:即$w_{s}$=$w_{ud}$,$b_{s}$=$b_{ud}$。将新的初始权重传递回到模型函数中,这样就完成了反向传播的过程。 +$$w_{t}=w_{t-1}-\alpha\frac{\partial{J(w_{t-1})}}{\partial{w}}\tag{3}$$ -> 当遇到多项式的回归模型时,上述梯度方法也适用,由于权重数量的增加,需要将权重的名称更新为$w_0,w_1,w_2,...,w_n$,引入矩阵的表达方式,公式将会更加简洁,这里就不多介绍了。 +公式3参数解释: -### 实现梯度函数 +- $w_{t}$为迭代后的权重值。 +- $w_{t-1}$为迭代前的权重值。 +- $\alpha$为学习率。 +- $\frac{\partial{J(w_{t-1}\ )}}{\partial{w}}$为损失函数对权重$w_{t-1}$的微分。 -在MindSpore中的所有要编入计算图的类都需要继承`nn.Cell`算子。MindSpore的梯度计算函数采用如下方式。 +函数中所有的权重值更新完成后,将值传入到模型函数中,这个过程就是反向传播过程,实现此过程需要使用MindSpore中的优化器函数,如下: ```python -from mindspore.ops import composite as C - -class GradWrap(nn.Cell): - """ GradWrap definition """ - def __init__(self, network): - super().__init__(auto_prefix=False) - self.network = network - self.weights = ms.ParameterTuple(filter(lambda x: x.requires_grad, - network.get_parameters())) - - def construct(self, data, label): - weights = self.weights - return C.GradOperation(get_by_list=True) \ - (self.network, weights)(data, label) - +opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9) ``` -上述代码中`GradWrap`实现的是对各个权重的微分$\frac{\partial{J(w)}}{\partial{w}}$,其展开式子参考公式6。 - -### 反向传播更新权重 +### 关联前向和反向传播网络 -`nn.RMSProp`为完成权重更新的函数,更新方式大致为公式11,但是考虑的因素更多,具体信息请参考[官网说明](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html?highlight=rmsprop#mindspore.nn.RMSProp)。 +定义完成前向传播和反向传播后,在MindSpore中需要调用`Model`函数,将前面定义的网络,损失函数,优化器函数关联起来,使之变成完整的计算网络。 ```python -train_network = GradWrap(loss_opeartion) -train_network.set_train() -optim = nn.RMSProp(params=net.trainable_params(),learning_rate=0.02) +from mindspore.train import Model + +model = Model(net, net_loss, opt) ``` -通过以上操作,我们就完成了前向传播网络和反向传播网络的定义,接下来可以加载训练数据进行线性拟合了。 +## 拟合过程可视化准备 -## 定义模型拟合过程可视化函数 +### 定义绘图函数 -定义一个可视化函数`plot_model_and_datasets`,将模型函数和验证数据集打印出来,观察其变化。 +为了使得整个训练过程更容易理解,需要将训练过程的测试数据、目标函数和模型网络进行可视化,这里定义了可视化函数,将在每个step训练结束后调用,展示模型网络的拟合过程。 ```python -import time +import matplotlib.pyplot as plt +import time -def plot_model_and_datasets(weight, bias, data_x, data_y): +def plot_model_and_datasets(net, eval_data): + weight = net.trainable_params()[0] + bias = net.trainable_params()[1] x = np.arange(-10, 10, 0.1) - y = x * ((weight[0][0]).asnumpy()) + ((bias[0]).asnumpy()) - plt.scatter(x1,y1,color="red",s=5) - plt.scatter(data_x.asnumpy(), data_y.asnumpy(), color="black", s=5) - plt.plot(x, y, "blue") + y = x * Tensor(weight).asnumpy()[0][0] + Tensor(bias).asnumpy()[0] + x1, y1 = zip(*eval_data) + x_target = x + y_target = x_target * 2 + 3 + plt.axis([-11, 11, -20, 25]) + plt.scatter(x1, y1, color="red", s=5) + plt.plot(x, y, color="blue") + plt.plot(x_target, y_target, color="green") plt.show() time.sleep(0.02) ``` -上述函数的参数: - -- `weight`:模型函数的权重,即$w$。 +### 定义回调函数 -- `bias`:模型函数的权重,即$b$。 +MindSpore提供的工具,可对模型训练过程进行自定义控制,这里在`step_end`中调用可视化函数,展示拟合过程。更多的使用可参考[官网说明]()。 -- `data_x`:训练数据的$x$值。 -- `data_y`:训练数据的$y$值。 - -> 可视化过程中,红色的点是验证数据集,黑色的点是单个batch的训练数据,蓝色的线条是正在训练的回归模型。 +```python +from IPython import display +from mindspore.train.callback import Callback + +class ImageShowCallback(Callback): + def __init__(self, net, eval_data): + self.net = net + self.eval_data = eval_data + + def step_end(self, run_context): + plot_model_and_datasets(self.net, self.eval_data) + display.clear_output(wait=True) +``` ## 执行训练 -其训练过程如下: - -1. 设置训练的迭代次数`step_size`。 -2. 设置单次迭代的训练数据量`batch_size`。 -3. 正向传播训练`grads`。 -4. 反向传播训练`optim`。 -5. 图形展示模型函数和数据集。 -6. 清除本轮迭代的输出`display.clear_output`,起到动态可视化效果。 +完成以上过程后,可以使用训练数`ds_train`对模型训练,这里调用`model.train`进行,其中参数解释: -迭代完成后,输出网络模型的权重值$w$和$b$。 +- `epoch`:训练迭代的整个数据集的次数。 +- `ds_train`:训练数据集。 +- `callbacks`:训练过程中需要调用的回调函数。 +- `dataset_sink_model`:数据集下沉模式,支持Ascend、GPU计算平台,本例为CPU计算平台设置为False。 ```python -from IPython import display -step_size = 200 -batch_size = 16 +from mindspore.train.callback import LossMonitor -for i in range(step_size): - data_x,data_y = get_data(batch_size) - grads = train_network(data_x,data_y) - optim(grads) - plot_model_and_datasets(net.weight.data, - net.bias.data, data_x, data_y) - display.clear_output(wait=True) - -output = net(eval_x) -loss_output = criterion(output, eval_label) -print("loss_value:", loss_output.asnumpy()) -plot_model_and_datasets(net.weight.data, net.bias.data, data_x,data_y) -print("weight:", net.weight.set_data([0][0]), "bias:", net.bias.set_data([0])) +epoch = 1 +imageshow_cb = ImageShowCallback(net, eval_data) +model.train(epoch, ds_train, callbacks=[imageshow_cb], dataset_sink_mode=False) + +plot_model_and_datasets(net,eval_data) +print(net.trainable_params()[0], "\n%s" % net.trainable_params()[1]) ``` 输出结果: - loss_value: 0.42879593 - - - ![gif](./images/linear_regression.gif) - weight: 1.9990227 bias: 2.9115517 + Parameter (name=fc.weight, value=[[2.0065749]]) + Parameter (name=fc.bias, value=[3.0089042]) -可以看到最终得到的线性拟合的权重值非常接近目标函数权重weight=2、bias=3。 +训练完成后打印出最终模型的权重参数,其中weight接近于2.0,bias接近于3.0,模型训练完成,符合预期。 ## 总结 diff --git a/tutorials/source_zh_cn/quick_start/quick_start.md b/tutorials/source_zh_cn/quick_start/quick_start.md index 0a2b025632145062550dbc8c5bda515dcf80e768..94a00478e5929155dfe6f51856fd61935a9f7277 100644 --- a/tutorials/source_zh_cn/quick_start/quick_start.md +++ b/tutorials/source_zh_cn/quick_start/quick_start.md @@ -28,6 +28,9 @@    +   + + ## 概述 @@ -183,7 +186,7 @@ def create_dataset(data_path, batch_size=32, repeat_size=1, 先进行shuffle、batch操作,再进行repeat操作,这样能保证1个epoch内数据不重复。 -> MindSpore支持进行多种数据处理和增强的操作,各种操作往往组合使用,具体可以参考[数据处理与数据增强](https://www.mindspore.cn/tutorial/zh-CN/master/use/data_preparation/data_processing_and_augmentation.html)章节。 +> MindSpore支持进行多种数据处理和增强的操作,各种操作往往组合使用,具体可以参考[数据处理与数据增强](https://www.mindspore.cn/api/zh-CN/master/programming_guide/pipeline.html)章节。 ## 定义网络 diff --git a/tutorials/source_zh_cn/quick_start/quick_video.md b/tutorials/source_zh_cn/quick_start/quick_video.md index 115ef7d67eb12823b13bf53497ec534d318aeefd..cd6af62eb490445c8fdd6a866376538205ba1d10 100644 --- a/tutorials/source_zh_cn/quick_start/quick_video.md +++ b/tutorials/source_zh_cn/quick_start/quick_video.md @@ -337,6 +337,30 @@ + diff --git a/tutorials/source_zh_cn/quick_start/quick_video/ascend910_operator_development.md b/tutorials/source_zh_cn/quick_start/quick_video/ascend910_operator_development.md new file mode 100644 index 0000000000000000000000000000000000000000..1e7d45170d5401b2e0ce348af3631fedd8581d67 --- /dev/null +++ b/tutorials/source_zh_cn/quick_start/quick_video/ascend910_operator_development.md @@ -0,0 +1,7 @@ +# Ascend 910算子开发 + +[comment]: <> (本文档中包含手把手系列视频,码云Gitee不支持展示,请于官方网站对应教程中查看) + + diff --git a/tutorials/source_zh_cn/quick_start/quick_video/loading_the_dataset_and_converting_data_format.md b/tutorials/source_zh_cn/quick_start/quick_video/loading_the_dataset_and_converting_data_format.md index ae48c7ba8e0c378dc3fb93ff7959320d53e8e309..535522cecd1fc52fc7be3b55fd8f2f03df4736a7 100644 --- a/tutorials/source_zh_cn/quick_start/quick_video/loading_the_dataset_and_converting_data_format.md +++ b/tutorials/source_zh_cn/quick_start/quick_video/loading_the_dataset_and_converting_data_format.md @@ -8,6 +8,6 @@ **查看更多内容**: - + - \ No newline at end of file + \ No newline at end of file diff --git a/tutorials/source_zh_cn/quick_start/quick_video/quick_start_video.md b/tutorials/source_zh_cn/quick_start/quick_video/quick_start_video.md index 6fe8e0142e72689c414c7c7ac0ab1f8d40cc7835..74c16621e12be2baf670316ea0a98d26553d63ea 100644 --- a/tutorials/source_zh_cn/quick_start/quick_video/quick_start_video.md +++ b/tutorials/source_zh_cn/quick_start/quick_video/quick_start_video.md @@ -6,6 +6,6 @@ -**查看代码**: +**查看代码**: **查看完整教程**: \ No newline at end of file diff --git a/tutorials/source_zh_cn/use/image_loading.md b/tutorials/source_zh_cn/use/image_loading.md index 0c51bec4a3265c60ee2db6e397aed9b0535c24c1..4a6e227593e6d936576d28ae11fda5ee70037993 100644 --- a/tutorials/source_zh_cn/use/image_loading.md +++ b/tutorials/source_zh_cn/use/image_loading.md @@ -1,6 +1,6 @@ # 加载图像 -`Ascend` `GPU` `CPU` `数据准备` `初级` `中级` `高级` +`Linux` `Ascend` `GPU` `CPU` `数据准备` `初级` `中级` `高级` @@ -19,12 +19,6 @@ 在计算机视觉任务中,图像数据往往因为容量限制难以直接全部读入内存。MindSpore提供的`mindspore.dataset`库可以帮助用户构建数据集对象,分批次地读取图像数据。同时,在各个数据集类中还内置了数据处理和数据增强算子,使得数据在训练过程中能够像经过pipeline管道的水一样源源不断地流向训练系统,提升数据训练效果。此外,MindSpore还支持分布式场景数据加载。 -MindSpore目前支持加载图像领域常用的经典数据集和多种数据存储格式下的数据集,用户也可以通过构建自定义数据集类实现自定义方式的数据加载。各种数据集的详细加载方法,可参考编程指南中[数据集加载](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html)章节。 - -MindSpore目前支持的数据处理和数据增强算子及其详细使用方法,可参考编程指南中[数据处理](https://www.mindspore.cn/api/zh-CN/master/programming_guide/pipeline.html)与[数据增强](https://www.mindspore.cn/api/zh-CN/master/programming_guide/augmentation.html)章节。 - -MindSpore目前支持的数据采样器及其详细使用方法,可参考编程指南中[采样器](https://www.mindspore.cn/api/zh-CN/master/programming_guide/sampler.html)章节。 - 下面,本教程将以加载MNIST数据集为例,演示如何使用MindSpore加载和处理图像数据。 ## 准备 @@ -45,13 +39,14 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 ## 加载数据集 -使用`mindspore.dataset`库中的`MnistDataset`类加载MNIST数据集。 +MindSpore目前支持加载图像领域常用的数据集和多种数据存储格式下的数据集,用户也可以通过构建自定义数据集类实现自定义方式的数据加载。各种数据集的详细加载方法,可参考编程指南中[数据集加载](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html)章节。 + +下面演示使用`mindspore.dataset`库中的`MnistDataset`类加载MNIST数据集。 1. 配置数据集目录,创建MNIST数据集对象。 ```python DATA_DIR = "./MNIST" - # 为方便展示,指定num_samples只获取6个样本 mnist_dataset = ds.MnistDataset(DATA_DIR, num_samples=6, shuffle=False) ``` @@ -60,26 +55,32 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 ```python import matplotlib.pyplot as plt - # 创建迭代器并展示样本及标签 mnist_it = mnist_dataset.create_dict_iterator() data = mnist_it.get_next() - plt.imshow(data['image'].squeeze(), cmap=plt.cm.gray) - plt.title(data['label'], fontsize=20) + plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray) + plt.title(data['label'].asnumpy(), fontsize=20) plt.show() ``` + 图片展示如下: + ![mnist_5](./images/mnist_5.png) +此外,用户还可以在数据集加载时传入sampler指定数据采样方式。MindSpore目前支持的数据采样器及其详细使用方法,可参考编程指南中[采样器](https://www.mindspore.cn/api/zh-CN/master/programming_guide/sampler.html)章节。 + ## 数据处理 -构建pipeline,对MNIST数据集进行`shuffle`、`batch`、`repeat`等操作。 +MindSpore目前支持的数据处理算子及其详细使用方法,可参考编程指南中[数据处理](https://www.mindspore.cn/api/zh-CN/master/programming_guide/pipeline.html)章节。 + +下面演示构建pipeline,对MNIST数据集进行`shuffle`、`batch`、`repeat`等操作。 ```python -# 查看原始数据label for data in mnist_dataset.create_dict_iterator(): print(data['label']) ``` +输出结果如下: + ```python 5 0 @@ -92,15 +93,15 @@ for data in mnist_dataset.create_dict_iterator(): 1. 对数据集进行混洗。 ```python - # 固定随机种子便于展示混洗结果 ds.config.set_seed(58) - ds1 = mnist_dataset.shuffle(buffer_size=6) - # 查看混洗后数据label + for data in ds1.create_dict_iterator(): print(data['label']) ``` + 输出结果如下: + ```python 4 2 @@ -114,11 +115,13 @@ for data in mnist_dataset.create_dict_iterator(): ```python ds2 = ds1.batch(batch_size=2) - # 查看分批后数据label + for data in ds2.create_dict_iterator(): print(data['label']) ``` + 输出结果如下: + ```python [4 2] [1 0] @@ -129,11 +132,13 @@ for data in mnist_dataset.create_dict_iterator(): ```python ds3 = ds2.repeat(count=2) - # 查看复制后数据label + for data in ds3.create_dict_iterator(): print(data['label']) ``` + 输出结果如下: + ```python [4 2] [1 0] @@ -149,25 +154,26 @@ for data in mnist_dataset.create_dict_iterator(): ## 数据增强 -使用`c_transforms`模块对MNIST数据集进行数据增强。 +MindSpore目前支持的数据增强算子及其详细使用方法,可参考编程指南中[数据增强](https://www.mindspore.cn/api/zh-CN/master/programming_guide/augmentation.html)章节。 + +下面演示使用`c_transforms`模块对MNIST数据集进行数据增强。 1. 导入相关模块,重新加载数据集。 ```python - from mindspore.dataset.transforms.vision import Inter - import mindspore.dataset.transforms.vision.c_transforms as transforms + from mindspore.dataset.vision import Inter + import mindspore.dataset.vision.c_transforms as transforms - # 重新加载数据集 mnist_dataset = ds.MnistDataset(DATA_DIR, num_samples=6, shuffle=False) ``` -2. 定义数据增强算子,对数据集执行`Resize`操作。 +2. 定义数据增强算子,对数据集执行`Resize`和`RandomCrop`操作。 ```python resize_op = transforms.Resize(size=(200,200), interpolation=Inter.LINEAR) crop_op = transforms.RandomCrop(150) transforms_list = [resize_op, crop_op] - ds4 = mnist_dataset.map(input_columns="image", operations=transforms_list) + ds4 = mnist_dataset.map(operations=transforms_list, input_columns="image") ``` 3. 查看数据增强效果。 @@ -175,8 +181,8 @@ for data in mnist_dataset.create_dict_iterator(): ```python mnist_it = ds4.create_dict_iterator() data = mnist_it.get_next() - plt.imshow(data['image'].squeeze(), cmap=plt.cm.gray) - plt.title(data['label'], fontsize=20) + plt.imshow(data['image'].asnumpy().squeeze(), cmap=plt.cm.gray) + plt.title(data['label'].asnumpy(), fontsize=20) plt.show() ``` diff --git a/tutorials/source_zh_cn/use/multi_platform_inference.md b/tutorials/source_zh_cn/use/multi_platform_inference.md index 5628bdc5c1a2269ce5eee3254ca36efa3cf57951..7aaac69064a83c5e133791fe5077ee4b8203d0c2 100644 --- a/tutorials/source_zh_cn/use/multi_platform_inference.md +++ b/tutorials/source_zh_cn/use/multi_platform_inference.md @@ -79,18 +79,17 @@ CPU | ONNX格式 | 支持ONNX推理的runtime/SDK,如TensorRT。 `model.eval`为模型验证接口,对应接口说明:。 > 推理样例代码:。 - 1.2 模型保存在华为云 + 1.2 使用MindSpore Hub从华为云加载模型 - 首先构建模型,然后使用`hub.load_weights`从云端加载模型参数,传入验证数据集后即可进行推理,验证数据集的处理方式与训练数据集相同。 + 首先构建模型,然后使用`mindspore_hub.load`从云端加载模型参数,传入验证数据集后即可进行推理,验证数据集的处理方式与训练数据集相同。 ```python - network = LeNet5(cfg.num_classes) + model_uid = "mindspore/ascend/0.7/googlenet_v1_cifar10" # using GoogleNet as an example. + network = mindspore_hub.load(model_uid, num_classes=10) net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) print("============== Starting Testing ==============") - hub.load_weights(network, network_name="lenet", **{"device_target": - "ascend", "dataset":"mnist", "version": "0.5.0"}) dataset = create_dataset(os.path.join(args.data_path, "test"), cfg.batch_size, 1) @@ -98,8 +97,8 @@ CPU | ONNX格式 | 支持ONNX推理的runtime/SDK,如TensorRT。 print("============== {} ==============".format(acc)) ``` 其中, - `hub.load_weights`为加载模型参数接口,对应接口说明:。 - + `mindspore_hub.load`为加载模型参数接口,对应接口说明:。 + 2. 使用`model.predict`接口来进行推理操作。 ```python model.predict(input_data) diff --git a/tutorials/source_zh_cn/use/saving_and_loading_model_parameters.md b/tutorials/source_zh_cn/use/saving_and_loading_model_parameters.md index f624b8712ce5633cb5ec7973a3811d34acf0525b..d4bca323b355602d8756b56dac06d2949f2e941f 100644 --- a/tutorials/source_zh_cn/use/saving_and_loading_model_parameters.md +++ b/tutorials/source_zh_cn/use/saving_and_loading_model_parameters.md @@ -118,7 +118,7 @@ resnet = ResNet50() load_checkpoint("resnet50-2_32.ckpt", net=resnet) dateset_eval = create_dataset(os.path.join(mnist_path, "test"), 32, 1) # define the test dataset loss = CrossEntropyLoss() -model = Model(resnet, loss) +model = Model(resnet, loss, metrics={"accuracy"}) acc = model.eval(dataset_eval) ``` @@ -150,6 +150,9 @@ model.train(epoch, dataset) 当有了CheckPoint文件后,如果想继续做推理,需要通过网络和CheckPoint生成对应的模型。`export`接口支持导出多种类型的模型文件格式,用于不同硬件平台的推理。 +> `input`为`export`方法的入参,代表网络的输入,如果网络有多个输入,需要一同传进`export`方法。 +> 例如:`export(network, Tensor(input1), Tensor(input2), file_name='network.mindir', file_format='MINDIR')` + ### 导出AIR格式文件 AIR格式文件仅支持昇腾AI处理器,导出该格式文件的代码样例如下: @@ -162,8 +165,8 @@ resnet = ResNet50() param_dict = load_checkpoint("resnet50-2_32.ckpt") # load the parameter into net load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size = [32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name = 'resnet50-2_32.air', file_format = 'AIR') +input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) +export(resnet, Tensor(input), file_name='resnet50-2_32.air', file_format='AIR') ``` 使用`export`接口之前,需要先导入`mindspore.train.serialization`。 @@ -184,8 +187,8 @@ resnet = ResNet50() param_dict = load_checkpoint("resnet50-2_32.ckpt") # load the parameter into net load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size = [32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name = 'resnet50-2_32.onnx', file_format = 'ONNX') +input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) +export(resnet, Tensor(input), file_name='resnet50-2_32.onnx', file_format='ONNX') ``` 建议使用`.onnx`作为ONNX格式文件的后缀名。 @@ -202,8 +205,8 @@ resnet = ResNet50() param_dict = load_checkpoint("resnet50-2_32.ckpt") # load the parameter into net load_param_into_net(resnet, param_dict) -input = np.random.uniform(0.0, 1.0, size = [32, 3, 224, 224]).astype(np.float32) -export(resnet, Tensor(input), file_name = 'resnet50-2_32.mindir', file_format = 'MINDIR') +input = np.random.uniform(0.0, 1.0, size=[32, 3, 224, 224]).astype(np.float32) +export(resnet, Tensor(input), file_name='resnet50-2_32.mindir', file_format='MINDIR') ``` -建议使用`.mindir`作为MINDIR格式文件的后缀名。 +建议使用`.mindir`作为MINDIR格式文件的后缀名。 \ No newline at end of file diff --git a/tutorials/source_zh_cn/use/text_loading.md b/tutorials/source_zh_cn/use/text_loading.md index 7150cfd67df497d52dfa35da8bdb7988a632a0ce..7eeff51e796e0056fa490acea676f64b006014d7 100644 --- a/tutorials/source_zh_cn/use/text_loading.md +++ b/tutorials/source_zh_cn/use/text_loading.md @@ -1,6 +1,6 @@ # 加载文本 -`Ascend` `GPU` `CPU` `数据准备` `初级` `中级` `高级` +`Linux` `Ascend` `GPU` `CPU` `数据准备` `初级` `中级` `高级` @@ -19,12 +19,6 @@ MindSpore提供的`mindspore.dataset`库可以帮助用户构建数据集对象,分批次地读取文本数据。同时,在各个数据集类中还内置了数据处理和数据分词算子,使得数据在训练过程中能够像经过pipeline管道的水一样源源不断地流向训练系统,提升数据训练效果。此外,MindSpore还支持分布式场景数据加载。 -MindSpore目前支持加载文本领域常用的经典数据集和多种数据存储格式下的数据集,用户也可以通过构建自定义数据集类实现自定义方式的数据加载。各种数据集的详细加载方法,可参考编程指南中[数据集加载](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html)章节。 - -MindSpore目前支持的数据处理和数据分词算子及其详细使用方法,可参考编程指南中[数据处理](https://www.mindspore.cn/api/zh-CN/master/programming_guide/pipeline.html)与[分词器](https://www.mindspore.cn/api/zh-CN/master/programming_guide/tokenizer.html)章节。 - -MindSpore目前支持的数据采样器及其详细使用方法,可参考编程指南中[采样器](https://www.mindspore.cn/api/zh-CN/master/programming_guide/sampler.html)章节。 - 下面,本教程将简要演示如何使用MindSpore加载和处理文本数据。 ## 准备 @@ -44,15 +38,18 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 └─tokenizer.txt ``` -3. 导入`mindspore.dataset`库。 +3. 导入`mindspore.dataset`和`mindspore.dataset.text`库。 ```python import mindspore.dataset as ds + import mindspore.dataset.text as text ``` ## 加载数据集 -使用`mindspore.dataset`中的`TextFileDataset`类加载数据集。 +MindSpore目前支持加载文本领域常用的数据集和多种数据存储格式下的数据集,用户也可以通过构建自定义数据集类实现自定义方式的数据加载。各种数据集的详细加载方法,可参考编程指南中[数据集加载](https://www.mindspore.cn/api/zh-CN/master/programming_guide/dataset_loading.html)章节。 + +下面演示使用`mindspore.dataset`中的`TextFileDataset`类加载数据集。 1. 配置数据集目录,创建数据集对象。 @@ -64,7 +61,7 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 2. 创建迭代器,通过迭代器获取数据。 ```python - for data in dataset.create_dict_iterator(): + for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) ``` @@ -78,11 +75,13 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 ## 数据处理 -在生成`dataset`对象后可对其进行数据处理操作,比如`SlidingWindow`、`shuffle`等。 +MindSpore目前支持的数据处理算子及其详细使用方法,可参考编程指南中[数据处理](https://www.mindspore.cn/api/zh-CN/master/programming_guide/pipeline.html)章节。 + +下面演示在生成`dataset`对象后进行数据处理操作,比如`SlidingWindow`、`shuffle`等。 - **SlidingWindow** - `TensorOp`从数据(现在仅是1-D)构造张量,其中尺寸轴上的每个元素都是从指定位置开始并具有指定宽度的数据切片。 + 下面演示使用`SlidingWindow`对文本数据进行切片操作。 1. 加载数据集。 @@ -94,10 +93,12 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 2. 原始数据输出效果。 ```python - for data in dataset.create_dict_iterator(): + for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text']).tolist()) ``` + 输出结果如下: + ``` ['大', '家', '早', '上', '好'] ``` @@ -105,16 +106,18 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 3. 执行操作。 ```python - dataset = dataset.map(input_columns=["text"], operations=text.SlidingWindow(2, 0)) + dataset = dataset.map(operations=text.SlidingWindow(2, 0), input_columns=["text"]) ``` 4. 执行之后输出效果。 ```python - for data in dataset.create_dict_iterator(): + for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text']).tolist()) ``` + 输出结果如下: + ``` [['大', '家'], ['家', '早'], @@ -124,23 +127,23 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 - **shuffle** - 当`shuffle=True`时,对数据集进行随机输出。 + 下面演示在加载数据集时使用`shuffle`对文本数据进行混洗操作。 1. 加载数据集。 ```python inputs = ["a", "b", "c", "d"] - dataset = ds.NumpySlicesDataset(inputs, column_names=["text"], shuffle=False) + dataset = ds.NumpySlicesDataset(inputs, column_names=["text"], shuffle=True) ``` 2. 数据输出效果。 ```python - for data in dataset.create_dict_iterator(): + for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text']).tolist()) ``` - 第一次输出: + 输出结果如下: ``` c @@ -149,18 +152,11 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 b ``` - 第二次输出: - - ``` - b - a - c - d - ``` - ## 数据分词 -使用`WhitespaceTokenizer`分词器来分词,该分词是按照空格来进行分词。 +MindSpore目前支持的数据分词算子及其详细使用方法,可参考编程指南中[分词器](https://www.mindspore.cn/api/zh-CN/master/programming_guide/tokenizer.html)章节。 + +下面演示使用`WhitespaceTokenizer`分词器来分词,该分词是按照空格来进行分词。 1. 创建`tokenizer`。 @@ -177,7 +173,7 @@ MindSpore目前支持的数据采样器及其详细使用方法,可参考编 3. 创建迭代器,通过迭代器获取数据。 ```python - for i in dataset.create_dict_iterator(num_epochs=1): + for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): token = text.to_str(i['text']).tolist() print(token) ``` diff --git a/tutorials/tutorial_code/distributed_training/resnet50_distributed_training.py b/tutorials/tutorial_code/distributed_training/resnet50_distributed_training.py index f7bde7ec41d7e4d3eea7c409bf99a4e384b10c51..99ca5892f0b62559341ba892e067f76daf987033 100644 --- a/tutorials/tutorial_code/distributed_training/resnet50_distributed_training.py +++ b/tutorials/tutorial_code/distributed_training/resnet50_distributed_training.py @@ -67,8 +67,8 @@ def create_dataset(data_path, repeat_num=1, batch_size=32, rank_id=0, rank_size= c_trans += [resize_op, rescale_op, normalize_op, changeswap_op] # apply map operations on images - data_set = data_set.map(input_columns="label", operations=type_cast_op) - data_set = data_set.map(input_columns="image", operations=c_trans) + data_set = data_set.map(operations=type_cast_op, input_columns="label") + data_set = data_set.map(operations=c_trans, input_columns="image") # apply shuffle operations data_set = data_set.shuffle(buffer_size=10) diff --git a/tutorials/tutorial_code/gradient_accumulation/train.py b/tutorials/tutorial_code/gradient_accumulation/train.py index c52fd0d63fbb62bde920a77413e9c73198a8464e..a0de464e8974e69a8154a97d1e6b89379e02c531 100644 --- a/tutorials/tutorial_code/gradient_accumulation/train.py +++ b/tutorials/tutorial_code/gradient_accumulation/train.py @@ -129,8 +129,8 @@ class GradientAccumulation: if __name__ == "__main__": parser = argparse.ArgumentParser(description='MindSpore Gard Cumulative Example') - parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'], - help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--device_target', type=str, default="GPU", choices=['GPU'], + help='device where the code will be implemented (default: GPU)') parser.add_argument('--data_path', type=str, default="./Data", help='path where the dataset is saved') args = parser.parse_args() diff --git a/tutorials/tutorial_code/lenet/lenet.py b/tutorials/tutorial_code/lenet/lenet.py index 3a25515233f080ad97e10a86eaabd9986fc95512..0bfca4e9185854ee60b15b06c7e1b0de439f491c 100644 --- a/tutorials/tutorial_code/lenet/lenet.py +++ b/tutorials/tutorial_code/lenet/lenet.py @@ -59,11 +59,11 @@ def create_dataset(data_path, batch_size=32, repeat_size=1, type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network # apply map operations on images - mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) - mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers) # apply DatasetOps buffer_size = 10000 diff --git a/tutorials/tutorial_code/linear_regression.py b/tutorials/tutorial_code/linear_regression.py index 6e53c6ae0936ed4b9b66f9393b624b0490b893ca..35003f67b2fb6635047ae8c4f1bd7125305b7378 100644 --- a/tutorials/tutorial_code/linear_regression.py +++ b/tutorials/tutorial_code/linear_regression.py @@ -1,74 +1,53 @@ import numpy as np -import mindspore as ms -from mindspore.ops import composite as C -from mindspore.ops import operations as P -from mindspore import Tensor -from mindspore import context -from mindspore.common.initializer import TruncatedNormal +from mindspore import dataset as ds +from mindspore.common.initializer import Normal from mindspore import nn - -context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") +from mindspore.train import Model +from mindspore.train.callback import LossMonitor +from mindspore import context + +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + -# Generating training data sets def get_data(num, w=2.0, b=3.0): - np_x = np.ones([num, 1]) - np_y = np.ones([num, 1]) for i in range(num): - x = np.random.uniform(-10.0, 10.0) - np_x[i] = x + x = np.random.uniform(-10.0, 10.0) noise = np.random.normal(0, 1) - y = x * w + b + noise - np_y[i]=y - return Tensor(np_x,ms.float32), Tensor(np_y,ms.float32) - -# Define the form of loss function: 1/2 * (y - y')^2 -class MyLoss(nn.loss.loss._Loss): - def __init__(self, reduction='mean'): - super().__init__(reduction) - self.square = P.Square() - def construct(self, data, label): - x = self.square(data - label) * 0.5 - return self.get_loss(x) - -# Gradient function -class GradWrap(nn.Cell): - """ GradWrap definition """ - def __init__(self, network): - super().__init__(auto_prefix=False) - self.network = network - self.weights = ms.ParameterTuple(filter(lambda x: x.requires_grad, - network.get_parameters())) - - def construct(self, data, label): - weights = self.weights - return C.GradOperation(get_by_list=True) \ - (self.network, weights)(data, label) - -# Initializing model functions -net = nn.Dense(1, 1, TruncatedNormal(0.02), TruncatedNormal(0.02)) - -# Loss function -criterion = MyLoss() -loss_opeartion = nn.WithLossCell(net, criterion) -train_network = GradWrap(loss_opeartion) -train_network.set_train() - -# Defining optimization -optim = nn.RMSProp(params=net.trainable_params(), learning_rate=0.02) - -# Executive Training -step_size = 200 -batch_size = 16 -for i in range(step_size): - data_x, data_y = get_data(batch_size) - grads = train_network(data_x, data_y) - optim(grads) - - # Print loss value per 10 step - if i%10 == 0: - output = net(data_x) - loss_output = criterion(output, data_y) - print(loss_output.asnumpy()) - -# Print final weight parameters -print("weight:", net.weight.set_data([0][0]), "bias:", net.bias.set_data([0])) \ No newline at end of file + y = x * w + b + noise + yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32) + + +def create_dataset(num_data, batch_size=16, repeat_size=1): + input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data','label']) + input_data = input_data.batch(batch_size) + input_data = input_data.repeat(repeat_size) + return input_data + + +class LinearNet(nn.Cell): + def __init__(self): + super(LinearNet, self).__init__() + self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02)) + + def construct(self, x): + x = self.fc(x) + return x + + +if __name__ == "__main__": + + num_data = 1600 + batch_size = 16 + repeat_size = 1 + lr = 0.005 + momentum = 0.9 + + net = LinearNet() + net_loss = nn.loss.MSELoss() + opt = nn.Momentum(net.trainable_params(), lr, momentum) + model = Model(net, net_loss, opt) + + ds_train = create_dataset(num_data, batch_size=batch_size, repeat_size=repeat_size) + model.train(1, ds_train, callbacks=LossMonitor(), dataset_sink_mode=False) + + print(net.trainable_params()[0], "\n%s" % net.trainable_params()[1]) \ No newline at end of file diff --git a/tutorials/tutorial_code/model_safety/mnist_attack_fgsm.py b/tutorials/tutorial_code/model_safety/mnist_attack_fgsm.py index 9561b5cebdd5883aff2e1b0ddfec85a361c81b38..7c8517dd07d6596e4dd06a829cd3ae22d58442ca 100644 --- a/tutorials/tutorial_code/model_safety/mnist_attack_fgsm.py +++ b/tutorials/tutorial_code/model_safety/mnist_attack_fgsm.py @@ -11,57 +11,42 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -mnist_attack_fgsm -The sample can be run on Ascend 910 AI processor. -""" -import sys import time import numpy as np -import pytest from scipy.special import softmax from mindspore import Model from mindspore import Tensor from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.nn import SoftmaxCrossEntropyWithLogits -from mindarmour.attacks.gradient_method import FastGradientSignMethod - +from mindarmour.adv_robustness.attacks import FastGradientSignMethod +from mindarmour.adv_robustness.evaluations import AttackEvaluate from mindarmour.utils.logger import LogUtil -from mindarmour.evaluations.attack_evaluation import AttackEvaluate - -from lenet5_net import LeNet5 - -context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") - -sys.path.append("..") -from data_processing import generate_mnist_dataset +from examples.common.networks.lenet5.lenet5_net import LeNet5 +from examples.common.dataset.data_processing import generate_mnist_dataset LOGGER = LogUtil.get_instance() +LOGGER.set_level('INFO') TAG = 'FGSM_Test' -@pytest.mark.level1 -@pytest.mark.platform_arm_ascend_training -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_card -@pytest.mark.component_mindarmour def test_fast_gradient_sign_method(): """ - FGSM-Attack test + FGSM-Attack test for CPU device. """ # upload trained network - ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + ckpt_path = '../../../common/networks/lenet5/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' net = LeNet5() - load_dict = load_checkpoint(ckpt_name) + load_dict = load_checkpoint(ckpt_path) load_param_into_net(net, load_dict) # get test data - data_list = "./MNIST_unzip/test" + data_list = "../../../common/dataset/MNIST/test" batch_size = 32 - ds = generate_mnist_dataset(data_list, batch_size, sparse=False) + ds = generate_mnist_dataset(data_list, batch_size) # prediction accuracy before attack model = Model(net) @@ -72,8 +57,8 @@ def test_fast_gradient_sign_method(): i = 0 for data in ds.create_tuple_iterator(): i += 1 - images = data[0].astype(np.float32) - labels = data[1] + images = data[0].asnumpy().astype(np.float32) + labels = data[1].asnumpy() test_images.append(images) test_labels.append(labels) pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(), @@ -82,15 +67,16 @@ def test_fast_gradient_sign_method(): if i >= batch_num: break predict_labels = np.concatenate(predict_labels) - true_labels = np.argmax(np.concatenate(test_labels), axis=1) + true_labels = np.concatenate(test_labels) accuracy = np.mean(np.equal(predict_labels, true_labels)) LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) # attacking - attack = FastGradientSignMethod(net, eps=0.3) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) + attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) start_time = time.clock() adv_data = attack.batch_generate(np.concatenate(test_images), - np.concatenate(test_labels), batch_size=32) + true_labels, batch_size=32) stop_time = time.clock() np.save('./adv_data', adv_data) pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy() @@ -100,7 +86,7 @@ def test_fast_gradient_sign_method(): accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels)) LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv) attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1), - np.concatenate(test_labels), + np.eye(10)[true_labels], adv_data.transpose(0, 2, 3, 1), pred_logits_adv) LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s', @@ -120,4 +106,6 @@ def test_fast_gradient_sign_method(): if __name__ == '__main__': + # device_target can be "CPU", "GPU" or "Ascend" + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") test_fast_gradient_sign_method() diff --git a/tutorials/tutorial_code/model_safety/mnist_defense_nad.py b/tutorials/tutorial_code/model_safety/mnist_defense_nad.py index d587f960acefeaf494c48964fe344d39b209fa15..3871e0f9fcfeddf56223a268812815031a7e152c 100644 --- a/tutorials/tutorial_code/model_safety/mnist_defense_nad.py +++ b/tutorials/tutorial_code/model_safety/mnist_defense_nad.py @@ -11,68 +11,54 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Defense example using nad -The sample can be run on CPU, GPU and Ascend 910 AI processor. -""" -import sys - -import logging +"""defense example using nad""" +import os import numpy as np -import pytest - from mindspore import Tensor from mindspore import context from mindspore import nn from mindspore.nn import SoftmaxCrossEntropyWithLogits -from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.train import Model +from mindspore.train.callback import LossMonitor -from mindarmour.attacks import FastGradientSignMethod -from mindarmour.defenses import NaturalAdversarialDefense +from mindarmour.adv_robustness.attacks import FastGradientSignMethod +from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense from mindarmour.utils.logger import LogUtil -from lenet5_net import LeNet5 - -sys.path.append("..") -from data_processing import generate_mnist_dataset +from examples.common.networks.lenet5.lenet5_net import LeNet5 +from examples.common.dataset.data_processing import generate_mnist_dataset -context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") LOGGER = LogUtil.get_instance() +LOGGER.set_level("INFO") TAG = 'Nad_Example' -@pytest.mark.level1 -@pytest.mark.platform_arm_ascend_training -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_card -@pytest.mark.component_mindarmour def test_nad_method(): """ NAD-Defense test. """ - # 1. load trained network - ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt' + mnist_path = "../../common/dataset/MNIST" + batch_size = 32 + # 1. train original model + ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), + batch_size=batch_size, repeat_size=1) net = LeNet5() - load_dict = load_checkpoint(ckpt_name) - load_param_into_net(net, load_dict) - - loss = SoftmaxCrossEntropyWithLogits(sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) - - nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt, - bounds=(0.0, 1.0), eps=0.3) + model = Model(net, loss, opt, metrics=None) + model.train(10, ds_train, callbacks=[LossMonitor()], + dataset_sink_mode=False) # 2. get test data - data_list = "./MNIST_unzip/test" - batch_size = 32 - ds_test = generate_mnist_dataset(data_list, batch_size=batch_size, - sparse=False) + ds_test = generate_mnist_dataset(os.path.join(mnist_path, "test"), + batch_size=batch_size, repeat_size=1) inputs = [] labels = [] for data in ds_test.create_tuple_iterator(): - inputs.append(data[0].astype(np.float32)) - labels.append(data[1]) + inputs.append(data[0].asnumpy().astype(np.float32)) + labels.append(data[1].asnumpy()) inputs = np.concatenate(inputs) labels = np.concatenate(labels) @@ -82,36 +68,46 @@ def test_nad_method(): batchs = inputs.shape[0] // batch_size for i in range(batchs): batch_inputs = inputs[i*batch_size : (i + 1)*batch_size] - batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + batch_labels = labels[i*batch_size : (i + 1)*batch_size] logits = net(Tensor(batch_inputs)).asnumpy() label_pred = np.argmax(logits, axis=1) acc_list.append(np.mean(batch_labels == label_pred)) - LOGGER.debug(TAG, 'accuracy of TEST data on original model is : %s', - np.mean(acc_list)) + LOGGER.info(TAG, 'accuracy of TEST data on original model is : %s', + np.mean(acc_list)) # 4. get adv of test data - attack = FastGradientSignMethod(net, eps=0.3) + attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) adv_data = attack.batch_generate(inputs, labels) - LOGGER.debug(TAG, 'adv_data.shape is : %s', adv_data.shape) + LOGGER.info(TAG, 'adv_data.shape is : %s', adv_data.shape) # 5. get accuracy of adv data on original model - net.set_train(False) acc_list = [] batchs = adv_data.shape[0] // batch_size for i in range(batchs): batch_inputs = adv_data[i*batch_size : (i + 1)*batch_size] - batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + batch_labels = labels[i*batch_size : (i + 1)*batch_size] logits = net(Tensor(batch_inputs)).asnumpy() label_pred = np.argmax(logits, axis=1) acc_list.append(np.mean(batch_labels == label_pred)) - LOGGER.debug(TAG, 'accuracy of adv data on original model is : %s', - np.mean(acc_list)) + LOGGER.info(TAG, 'accuracy of adv data on original model is : %s', + np.mean(acc_list)) # 6. defense + ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), + batch_size=batch_size, repeat_size=1) + inputs_train = [] + labels_train = [] + for data in ds_train.create_tuple_iterator(): + inputs_train.append(data[0].asnumpy().astype(np.float32)) + labels_train.append(data[1].asnumpy()) + inputs_train = np.concatenate(inputs_train) + labels_train = np.concatenate(labels_train) net.set_train() - nad.batch_defense(inputs, labels, batch_size=32, epochs=10) + nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt, + bounds=(0.0, 1.0), eps=0.3) + nad.batch_defense(inputs_train, labels_train, batch_size=32, epochs=10) # 7. get accuracy of test data on defensed model net.set_train(False) @@ -119,28 +115,29 @@ def test_nad_method(): batchs = inputs.shape[0] // batch_size for i in range(batchs): batch_inputs = inputs[i*batch_size : (i + 1)*batch_size] - batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + batch_labels = labels[i*batch_size : (i + 1)*batch_size] logits = net(Tensor(batch_inputs)).asnumpy() label_pred = np.argmax(logits, axis=1) acc_list.append(np.mean(batch_labels == label_pred)) - LOGGER.debug(TAG, 'accuracy of TEST data on defensed model is : %s', - np.mean(acc_list)) + LOGGER.info(TAG, 'accuracy of TEST data on defensed model is : %s', + np.mean(acc_list)) # 8. get accuracy of adv data on defensed model acc_list = [] batchs = adv_data.shape[0] // batch_size for i in range(batchs): batch_inputs = adv_data[i*batch_size : (i + 1)*batch_size] - batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1) + batch_labels = labels[i*batch_size : (i + 1)*batch_size] logits = net(Tensor(batch_inputs)).asnumpy() label_pred = np.argmax(logits, axis=1) acc_list.append(np.mean(batch_labels == label_pred)) - LOGGER.debug(TAG, 'accuracy of adv data on defensed model is : %s', - np.mean(acc_list)) + LOGGER.info(TAG, 'accuracy of adv data on defensed model is : %s', + np.mean(acc_list)) if __name__ == '__main__': - LOGGER.set_level(logging.DEBUG) + # device_target can be "CPU", "GPU" or "Ascend" + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") test_nad_method() diff --git a/tutorials/tutorial_code/resnet/cifar_resnet50.py b/tutorials/tutorial_code/resnet/cifar_resnet50.py index edf4e34bd8436ea1f4d482b01643c94999047fb1..1aa84ada4cd8199cca45c0268490b662433b1d60 100644 --- a/tutorials/tutorial_code/resnet/cifar_resnet50.py +++ b/tutorials/tutorial_code/resnet/cifar_resnet50.py @@ -41,6 +41,7 @@ random.seed(1) parser = argparse.ArgumentParser(description='Image classification') parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute.') parser.add_argument('--device_num', type=int, default=1, help='Device num.') +parser.add_argument('--device_target', type=str, default="Ascend", help='Device choice Ascend or GPU') parser.add_argument('--do_train', type=bool, default=True, help='Do train or not.') parser.add_argument('--do_eval', type=bool, default=False, help='Do eval or not.') parser.add_argument('--epoch_size', type=int, default=1, help='Epoch size.') @@ -50,12 +51,13 @@ parser.add_argument('--checkpoint_path', type=str, default=None, help='CheckPoin parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path.') args_opt = parser.parse_args() -device_id = int(os.getenv('DEVICE_ID')) - data_home = args_opt.dataset_path -context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -context.set_context(device_id=device_id) +context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target) + +if args_opt.device_target == "Ascend": + device_id = int(os.getenv('DEVICE_ID')) + context.set_context(device_id=device_id) def create_dataset(repeat_num=1, training=True): """ @@ -89,8 +91,8 @@ def create_dataset(repeat_num=1, training=True): changeswap_op] # apply map operations on images - cifar_ds = cifar_ds.map(input_columns="label", operations=type_cast_op) - cifar_ds = cifar_ds.map(input_columns="image", operations=c_trans) + cifar_ds = cifar_ds.map(operations=type_cast_op, input_columns="label") + cifar_ds = cifar_ds.map(operations=c_trans, input_columns="image") # apply shuffle operations cifar_ds = cifar_ds.shuffle(buffer_size=10) diff --git a/tutorials/tutorial_code/sample_for_cloud/dataset.py b/tutorials/tutorial_code/sample_for_cloud/dataset.py index 4f0546724d44bedc48f0009da92203212bd13262..7d8dfe93378e651813fa7c5d4e65cc68cfdb7d59 100644 --- a/tutorials/tutorial_code/sample_for_cloud/dataset.py +++ b/tutorials/tutorial_code/sample_for_cloud/dataset.py @@ -74,8 +74,8 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32): type_cast_op = C2.TypeCast(mstype.int32) - ds = ds.map(input_columns="label", num_parallel_workers=8, operations=type_cast_op) - ds = ds.map(input_columns="image", num_parallel_workers=8, operations=trans) + ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8) + ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True)