diff --git a/tutorials/source_en/compile/python_builtin_functions.md b/tutorials/source_en/compile/python_builtin_functions.md index d155cc0af33146f34a591d4b608c5330c7ee12ed..58ced234669814125d2ceefc53af3cd13f916a26 100644 --- a/tutorials/source_en/compile/python_builtin_functions.md +++ b/tutorials/source_en/compile/python_builtin_functions.md @@ -415,7 +415,7 @@ import mindspore z = mindspore.tensor(np.ones((6, 4, 5))) -@mindspore.jit() +@mindspore.jit def test(w): x = (2, 3, 4) y = [2, 3, 4] @@ -474,7 +474,7 @@ import numpy as np z = mindspore.tensor(np.ones((6, 4, 5))) -@mindspore.jit() +@mindspore.jit def test(w): x = (2, 3, 4) y = [2, 3, 4] @@ -881,7 +881,7 @@ import mindspore def add(x, y): return x + y -@mindspore.jit() +@mindspore.jit def test(): elements_a = (1, 2, 3) elements_b = (4, 5, 6) @@ -918,7 +918,7 @@ For example: ```python import mindspore -@mindspore.jit() +@mindspore.jit def test(): elements_a = (1, 2, 3) elements_b = (4, 5, 6) @@ -962,7 +962,7 @@ For example: ```python import mindspore -@mindspore.jit() +@mindspore.jit def test(): x = range(0, 6, 2) y = range(0, 5) @@ -1009,7 +1009,7 @@ import numpy as np y = mindspore.tensor(np.array([[1, 2], [3, 4], [5, 6]])) -@mindspore.jit() +@mindspore.jit def test(): x = (100, 200, 300, 400) m = enumerate(x, 3) @@ -1112,7 +1112,7 @@ import numpy as np x = mindspore.tensor(np.array([1, 2, 3])) y = mindspore.tensor(np.array([1, 2, 3])) -@mindspore.jit() +@mindspore.jit def test(x, y): return pow(x, y) @@ -1146,7 +1146,7 @@ import numpy as np x = mindspore.tensor(np.array([1, 2, 3]), mindspore.int32) y = mindspore.tensor(3, mindspore.int32) -@mindspore.jit() +@mindspore.jit def test(x, y): print(x) print(y) @@ -1186,7 +1186,7 @@ def is_odd(x): return True return False -@mindspore.jit() +@mindspore.jit def test(): elements1 = (1, 2, 3, 4, 5) ret1 = filter(is_odd, elements1) diff --git a/tutorials/source_en/compile/statements.md b/tutorials/source_en/compile/statements.md index 995784071089526679832c481936849cba3bcfde..9b3582d442484b375a53750ddfa0f86fdf96fa42 100644 --- a/tutorials/source_en/compile/statements.md +++ b/tutorials/source_en/compile/statements.md @@ -222,7 +222,7 @@ y = mindspore.tensor([0, 3], mindspore.int32) m = 1 n = 2 -@mindspore.jit() +@mindspore.jit def test_cond(x, y): if (x > y).any(): return m @@ -251,7 +251,7 @@ y = mindspore.tensor([0, 3], mindspore.int32) m = 1 n = 2 -@mindspore.jit() +@mindspore.jit def test_cond(x, y): out = 3 if (x > y).any(): @@ -281,7 +281,7 @@ x = mindspore.tensor([1, 4], mindspore.int32) y = mindspore.tensor([0, 3], mindspore.int32) m = 1 -@mindspore.jit() +@mindspore.jit def test_cond(x, y): out = 2 if (x > y).any(): @@ -328,7 +328,7 @@ import mindspore z = mindspore.tensor(np.ones((2, 3))) -@mindspore.jit() +@mindspore.jit def test_cond(): x = (1, 2, 3) for i in x: @@ -372,7 +372,7 @@ import mindspore m = 1 n = 2 -@mindspore.jit() +@mindspore.jit def test_cond(x, y): while x < y: x += 1 @@ -402,7 +402,7 @@ n = 2 def ops1(a, b): return a + b -@mindspore.jit() +@mindspore.jit def test_cond(x, y): out = m while x < y: @@ -437,7 +437,7 @@ import mindspore def number_add(x, y): return x + y -@mindspore.jit() +@mindspore.jit def test(x, y): return number_add(x, y) @@ -466,7 +466,7 @@ For example: ```python import mindspore -@mindspore.jit() +@mindspore.jit def test(x, y): number_add = lambda x, y: x + y return number_add(x, y) @@ -502,7 +502,7 @@ from mindspore import ops def add(x, y): return x + y -@mindspore.jit() +@mindspore.jit def test(): add_ = ops.partial(add, x=2) m = add_(y=3) @@ -541,7 +541,7 @@ The example is as follows: ```python import mindspore -@mindspore.jit() +@mindspore.jit def test(): l = [x * x for x in range(1, 11) if x % 2 == 0] return l @@ -581,7 +581,7 @@ The example is as follows: ```python import mindspore -@mindspore.jit() +@mindspore.jit def test(): x = [('a', 1), ('b', 2), ('c', 3)] res = {k: v for (k, v) in x if v > 1} @@ -623,7 +623,7 @@ For example: ```python import mindspore -@mindspore.jit() +@mindspore.jit def test(): l = (x * x for x in range(1, 11) if x % 2 == 0) return l diff --git a/tutorials/source_en/compile/static_graph.md b/tutorials/source_en/compile/static_graph.md index 29d4b60fd6cdf7db03b2dd2b85c3197b3f76688f..535649e4920cc61bc4c590ebcf1813434eda5279 100644 --- a/tutorials/source_en/compile/static_graph.md +++ b/tutorials/source_en/compile/static_graph.md @@ -1510,6 +1510,7 @@ compilation problems can be found in [Network compilation](https://www.mindspore ``` python import mindspore + from mindspore import nn class Net(nn.Cell): @classmethod @@ -1746,6 +1747,7 @@ not use the \'@jit_class\' decoration and does not inherit \'nn. Cell\`. ``` python import mindspore +from mindspore import nn class GetattrClass(): def __init__(self): @@ -2252,7 +2254,7 @@ class Net(nn.Cell): @mindspore.jit def construct(self, a): x = {'a': a, 'b': 2} - return a, (x, (1, 2)) + return a, x net = Net() out = mindspore.grad(net)(mindspore.tensor([1])) diff --git a/tutorials/source_en/compile/static_graph_expert_programming.md b/tutorials/source_en/compile/static_graph_expert_programming.md index de0a0b2ae7c52d185e1f1e551b6e18fb911e5e5e..5d06e65b08bdc376a5e48004b53ccd3a3559a051 100644 --- a/tutorials/source_en/compile/static_graph_expert_programming.md +++ b/tutorials/source_en/compile/static_graph_expert_programming.md @@ -40,8 +40,7 @@ If we think of the loop body as a subgraph that is called frequently, and tell t Taking the Pangu_alpha network as an example, the `network` handled in the `PipelineCell` function body is an instance of the `PanGUAlphaWithLoss` class. In order to implement a delayed inline, a `@lazy_inline` decorator is added to the `__init__` function of the `PanGUAlphaWithLoss` class to mark that the subgraph structure of the `PanGUAlphaWithLoss` class needs to be preserved without inlining or with delayed inlining. As shown below: ```python -from mindspore import nn -from mindspore import lazy_inline +from mindspore import nn, lazy_inline class PanGUAlphaWithLoss(nn.Cell): @lazy_inline @@ -67,8 +66,7 @@ def construct(self, x) With the introduction of reusable computation graphs, Cell instances with the same `cell_init_args` only need to be compiled and resolved once. So for more generalized scenarios of calling different instances of the same Cell class, as long as the `cell_init_args` are the same, we can add the `@lazy_inline` decorator to speed up compilation. For example, GPT networks: ```python -from mindspore import nn -from mindspore import lazy_inline +from mindspore import nn, lazy_inline class Block(nn.Cell): @lazy_inline @@ -108,8 +106,7 @@ As in the example above, add the `@lazy_inline` decorator to the `__init__` func 1. Cell generates Cell instance identifiers based on the class name of the Cell and the value of the `__init__` parameter. This is based on the assumption that the `__init__` parameter determines all the attributes of the Cell, and that the Cell attributes at the start of the `construct` composition are the same as the attributes at the end of the `__init__` execution, therefore the composition-dependent attributes of Cell cannot be changed after `__init__` is executed. For example: ```python - from mindspore import nn - from mindspore import lazy_inline + from mindspore import nn, lazy_inline class Block(nn.Cell): @lazy_inline @@ -145,8 +142,7 @@ As in the example above, add the `@lazy_inline` decorator to the `__init__` func 2. In a scenario where the network structure of a Cell class contains multiple instances of the Cell_X class, and the network structure of each Cell_X class contains multiple instances of the Cell_Y class, if you add `@lazy_inline` to the `__init__` functions of both the Cell_X and Cell_Y classes, only the outermost Cell_X instances will be compiled into a reusable computation graph and delayed inline. The computation graph of the inner Cell_Y instance will still be inline. e.g.: ```python - from mindspore import nn - from mindspore import lazy_inline + from mindspore import nn, lazy_inline class InnerBlock(nn.Cell): @lazy_inline # InnerBlock does not get delayed inline @@ -207,21 +203,24 @@ A code sample that optimizes compilation performance by enabling compilation cac ```python import os import time -from mindspore import dtype -import mindspore as ms +import mindspore +from mindspore import nn + +mindspore.set_context(mode=mindspore.GRAPH_MODE) -@ms.jit -def func(input_x, input_y): - output = input_x - for _ in range(200): - output = input_x + input_x * input_y + output - return output +class Model(nn.Cell): + def construct(self, input_x, input_y): + output = input_x + for _ in range(200): + output = input_x + input_x * input_y + output + return output os.environ['MS_COMPILER_CACHE_ENABLE'] = '0' -x = ms.Tensor([1], dtype.float32) -y = ms.Tensor([2], dtype.float32) +x = mindspore.tensor([1], mindspore.float32) +y = mindspore.tensor([2], mindspore.float32) +model = Model() start_time = time.time() -out = func(x, y) +out = model(x, y) end_time = time.time() print("Disable compile_cache cost time:", end_time - start_time) ``` @@ -238,22 +237,25 @@ It can be seen that when the compilation cache is turned off, the 2nd execution ```python import os import time -from mindspore import dtype -import mindspore as ms +import mindspore +from mindspore import nn + +mindspore.set_context(mode=mindspore.GRAPH_MODE) -@ms.jit -def func(input_x, input_y): - output = input_x - for _ in range(200): - output = input_x + input_x * input_y + output - return output +class Model(nn.Cell): + def construct(self, input_x, input_y): + output = input_x + for _ in range(200): + output = input_x + input_x * input_y + output + return output os.environ['MS_COMPILER_CACHE_ENABLE'] = '1' os.environ['MS_COMPILER_CACHE_PATH'] = 'my_compile_cache' -x = ms.Tensor([1], dtype.float32) -y = ms.Tensor([2], dtype.float32) +x = mindspore.tensor([1], mindspore.float32) +y = mindspore.tensor([2], mindspore.float32) +model = Model() start_time = time.time() -out = func(x, y) +out = model(x, y) end_time = time.time() os.environ['MS_COMPILER_CACHE_ENABLE'] = '0' print("Enable compile_cache cost time:", end_time - start_time) @@ -304,18 +306,18 @@ The jit_class decorator only supports modifying custom classes, not classes that ```python import numpy as np -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class InnerNet: - value = ms.Tensor(np.array([1, 2, 3])) + value = mindspore.tensor(np.array([1, 2, 3])) class Net(nn.Cell): + @mindspore.jit def construct(self): return InnerNet().value -ms.set_context(mode=ms.GRAPH_MODE) net = Net() out = net() print(out) @@ -328,16 +330,16 @@ print(out) If jit_class modifies a class that inherits from `Cell`, an error will be reported. ```python -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class Net(nn.Cell): + @mindspore.jit def construct(self, x): return x -ms.set_context(mode=ms.GRAPH_MODE) -x = ms.Tensor(1) +x = mindspore.tensor(1) net = Net() net(x) ``` @@ -352,15 +354,15 @@ jit_class supports scenarios where custom classes are used nested, and custom cl ```python import numpy as np -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class Inner: def __init__(self): - self.value = ms.Tensor(np.array([1, 2, 3])) + self.value = mindspore.tensor(np.array([1, 2, 3])) -@ms.jit_class +@mindspore.jit_class class InnerNet: def __init__(self): self.inner = Inner() @@ -370,11 +372,11 @@ class Net(nn.Cell): super(Net, self).__init__() self.inner_net = InnerNet() + @mindspore.jit def construct(self): out = self.inner_net.inner.value return out -ms.set_context(mode=ms.GRAPH_MODE) net = Net() out = net() print(out) @@ -389,10 +391,10 @@ print(out) Supports calling attributes and methods by class name or class instance. ```python -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class InnerNet: def __init__(self, val): self.number = val @@ -405,12 +407,12 @@ class Net(nn.Cell): super(Net, self).__init__() self.inner_net = InnerNet(2) + @mindspore.jit def construct(self, x, y): return self.inner_net.number + self.inner_net.act(x, y) -ms.set_context(mode=ms.GRAPH_MODE) -x = ms.Tensor(2, dtype=ms.int32) -y = ms.Tensor(3, dtype=ms.int32) +x = mindspore.tensor(2, dtype=mindspore.int32) +y = mindspore.tensor(3, dtype=mindspore.int32) net = Net() out = net(x, y) print(out) @@ -426,20 +428,20 @@ For functions that will be compiled into static computational graphs, such as `C ```python import numpy as np -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class InnerNet: def __init__(self, val): self.number = val + 3 class Net(nn.Cell): + @mindspore.jit def construct(self): net = InnerNet(2) return net.number -ms.set_context(mode=ms.GRAPH_MODE) net = Net() out = net() print(out) @@ -455,10 +457,10 @@ When calling an instance of a class modified by `@jit_class`, the `__call__` fun ```python import numpy as np -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class InnerNet: def __init__(self, number): self.number = number @@ -467,14 +469,14 @@ class InnerNet: return self.number * (x + y) class Net(nn.Cell): + @mindspore.jit def construct(self, x, y): net = InnerNet(2) out = net(x, y) return out -ms.set_context(mode=ms.GRAPH_MODE) -x = ms.Tensor(2, dtype=ms.int32) -y = ms.Tensor(3, dtype=ms.int32) +x = mindspore.tensor(2, dtype=mindspore.int32) +y = mindspore.tensor(3, dtype=mindspore.int32) net = Net() out = net(x, y) print(out) @@ -488,23 +490,23 @@ If the class does not define a `__call__` function, an error will be reported. ```python import numpy as np -import mindspore.nn as nn -import mindspore as ms +import mindspore +from mindspore import nn -@ms.jit_class +@mindspore.jit_class class InnerNet: def __init__(self, number): self.number = number class Net(nn.Cell): + @mindspore.jit def construct(self, x, y): net = InnerNet(2) out = net(x, y) return out -ms.set_context(mode=ms.GRAPH_MODE) -x = ms.Tensor(2, dtype=ms.int32) -y = ms.Tensor(3, dtype=ms.int32) +x = mindspore.tensor(2, dtype=mindspore.int32) +y = mindspore.tensor(3, dtype=mindspore.int32) net = Net() out = net(x, y) print(out) @@ -528,10 +530,10 @@ A code sample that uses the `Select` operator instead of an if statement to opti ```python import time +import mindspore from mindspore import ops -import mindspore as ms -@ms.jit +@mindspore.jit def if_net(x, y): out = 0 for _ in range(100): @@ -543,11 +545,11 @@ def if_net(x, y): return out start_time = time.time() -out = if_net(ms.Tensor([0]), ms.Tensor([1])) +out = if_net(mindspore.tensor([0]), mindspore.tensor([1])) end_time = time.time() print("if net cost time:", end_time - start_time) -@ms.jit +@mindspore.jit def select_net(x, y): out = x for _ in range(100): @@ -557,7 +559,7 @@ def select_net(x, y): return out start_time = time.time() -out = select_net(ms.Tensor([0]), ms.Tensor([1])) +out = select_net(mindspore.tensor([0]), mindspore.tensor([1])) end_time = time.time() print("select net cost time:", end_time - start_time) ``` @@ -580,13 +582,13 @@ The running results of the above code are as follows (the actual time consumptio ```python import numpy as np import time +import mindspore from mindspore import ops, vmap -import mindspore as ms def hswish_func(x): return ops.HSwish()(x) -@ms.jit +@mindspore.jit def manually_batched(xs): output = [] for i in range(xs.shape[0]): @@ -596,7 +598,7 @@ def manually_batched(xs): shape = (100, 2) prop = 100 x_np = (np.random.randn(*shape) * prop).astype(np.float32) -x = ms.Tensor(x_np) +x = mindspore.tensor(x_np) x = ops.sub(x, 0) start_time = time.time() @@ -642,13 +644,10 @@ It is worth stating that the particular set of operators used for floating-point ```python import numpy as np -import mindspore as ms -import mindspore.nn as nn -from mindspore import ops, set_context, Tensor -from mindspore import dtype as mstype +import mindspore +from mindspore import nn, ops, Tensor -set_context(mode=ms.GRAPH_MODE) -ms.set_device("Ascend") +mindspore.set_device("Ascend") class Net(nn.Cell): def __init__(self): @@ -657,6 +656,7 @@ class Net(nn.Cell): self.get_status = ops.NPUGetFloatStatus() self.clear_status = ops.NPUClearFloatStatus() + @mindspore.jit def construct(self, x): init = self.alloc_status() clear_status = self.clear_status(init) @@ -669,7 +669,7 @@ class Net(nn.Cell): value = 5 data = np.full((2, 3), value, dtype=np.float16) -x = Tensor(data, dtype=mstype.float16) +x = mindspore.tensor(data, dtype=mindspore.float16) net = Net() res = net(x) print(res) @@ -697,16 +697,13 @@ We can further understand this through the use case and the generated intermedia ```python import numpy as np -from mindspore.nn import Cell -from mindspore import Tensor, Parameter, ops -import mindspore as ms - -ms.set_context(mode=ms.GRAPH_MODE) +import mindspore +from mindspore import nn, ops, Tensor, Parameter -class ForwardNet(Cell): +class ForwardNet(nn.Cell): def __init__(self): super(ForwardNet, self).__init__() - self.weight = Parameter(Tensor(np.array(0), ms.int32), name="param") + self.weight = Parameter(mindspore.tensor(np.array(0), mindspore.int32), name="param") def construct(self, x): out = 0 @@ -717,22 +714,22 @@ class ForwardNet(Cell): i = i + 1 return out - -class BackwardNet(Cell): +class BackwardNet(nn.Cell): def __init__(self, net): super(BackwardNet, self).__init__(auto_prefix=False) self.forward_net = net self.grad = ops.GradOperation(get_all=True) + @mindspore.jit def construct(self, *inputs): grads = self.grad(self.forward_net)(*inputs) return grads -x = Tensor(np.array(1), ms.int32) +x = mindspore.tensor(np.array(1), mindspore.int32) graph_forword_net = ForwardNet() graph_backword_net = BackwardNet(graph_forword_net) graph_mode_grads = graph_backword_net(x) -output_except = (Tensor(np.array(3), ms.int32),) +output_except = (mindspore.tensor(np.array(3), mindspore.int32),) assert np.all(graph_mode_grads == output_except) ``` diff --git a/tutorials/source_zh_cn/compile/python_builtin_functions.ipynb b/tutorials/source_zh_cn/compile/python_builtin_functions.ipynb index a695fae6b97fca20e5bdcdcfb8a9baf00ce47317..0c59cd9017582150ac93a288b82f18fb9692f6b4 100644 --- a/tutorials/source_zh_cn/compile/python_builtin_functions.ipynb +++ b/tutorials/source_zh_cn/compile/python_builtin_functions.ipynb @@ -547,7 +547,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "8f0d5275", "metadata": {}, "outputs": [ @@ -570,7 +570,7 @@ "\n", "z = mindspore.tensor(np.ones((6, 4, 5)))\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test(w):\n", " x = (2, 3, 4)\n", " y = [2, 3, 4]\n", @@ -641,7 +641,7 @@ "\n", "z = mindspore.tensor(np.ones((6, 4, 5)))\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test(w):\n", " x = (2, 3, 4)\n", " y = [2, 3, 4]\n", @@ -1139,7 +1139,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "id": "859aad65", "metadata": {}, "outputs": [ @@ -1158,7 +1158,7 @@ "def add(x, y):\n", " return x + y\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " elements_a = (1, 2, 3)\n", " elements_b = (4, 5, 6)\n", @@ -1193,7 +1193,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": null, "id": "14ebda94", "metadata": {}, "outputs": [ @@ -1208,7 +1208,7 @@ "source": [ "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " elements_a = (1, 2, 3)\n", " elements_b = (4, 5, 6)\n", @@ -1251,7 +1251,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": null, "id": "a1fbd498", "metadata": {}, "outputs": [ @@ -1268,7 +1268,7 @@ "source": [ "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " x = range(0, 6, 2)\n", " y = range(0, 5)\n", @@ -1309,7 +1309,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": null, "id": "5265694c", "metadata": {}, "outputs": [ @@ -1328,7 +1328,7 @@ "\n", "y = mindspore.tensor(np.array([[1, 2], [3, 4], [5, 6]]))\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " x = (100, 200, 300, 400)\n", " m = enumerate(x, 3)\n", @@ -1439,7 +1439,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "id": "3d67dadf", "metadata": {}, "outputs": [ @@ -1458,7 +1458,7 @@ "x = mindspore.tensor(np.array([1, 2, 3]))\n", "y = mindspore.tensor(np.array([1, 2, 3]))\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test(x, y):\n", " return pow(x, y)\n", "\n", @@ -1487,7 +1487,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": null, "id": "9a834f00", "metadata": {}, "outputs": [ @@ -1507,7 +1507,7 @@ "x = mindspore.tensor(np.array([1, 2, 3]), mindspore.int32)\n", "y = mindspore.tensor(3, mindspore.int32)\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test(x, y):\n", " print(x)\n", " print(y)\n", @@ -1540,7 +1540,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": null, "id": "6f5dbb25", "metadata": {}, "outputs": [ @@ -1561,7 +1561,7 @@ " return True\n", " return False\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " elements1 = (1, 2, 3, 4, 5)\n", " ret1 = filter(is_odd, elements1)\n", diff --git a/tutorials/source_zh_cn/compile/statements.ipynb b/tutorials/source_zh_cn/compile/statements.ipynb index 952f4f8e6a70bebd4ddb3d39f3c34d0443c22467..0cafb5bce58d1472becf259d47c1ffaa58ac1899 100644 --- a/tutorials/source_zh_cn/compile/statements.ipynb +++ b/tutorials/source_zh_cn/compile/statements.ipynb @@ -332,7 +332,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "bed905ff", "metadata": {}, "outputs": [ @@ -352,7 +352,7 @@ "m = 1\n", "n = 2\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test_if_cond(x, y):\n", " if (x > y).any():\n", " return m\n", @@ -381,7 +381,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "97c44982", "metadata": {}, "outputs": [ @@ -401,7 +401,7 @@ "m = 1\n", "n = 2\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test_if_cond(x, y):\n", " out = 3\n", " if (x > y).any():\n", @@ -433,7 +433,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "e2c6cf0b", "metadata": {}, "outputs": [ @@ -452,7 +452,7 @@ "y = mindspore.tensor([0, 3], mindspore.int32)\n", "m = 1\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test_if_cond(x, y):\n", " out = 2\n", " if (x > y).any():\n", @@ -502,7 +502,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "72e9582e", "metadata": {}, "outputs": [ @@ -521,7 +521,7 @@ "\n", "z = mindspore.tensor(np.ones((2, 3)))\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test_cond():\n", " x = (1, 2, 3)\n", " for i in x:\n", @@ -560,7 +560,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "76a689cf", "metadata": {}, "outputs": [ @@ -578,7 +578,7 @@ "m = 1\n", "n = 2\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test_cond(x, y):\n", " while x < y:\n", " x += 1\n", @@ -608,7 +608,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "a70a6364", "metadata": {}, "outputs": [ @@ -629,7 +629,7 @@ "def ops1(a, b):\n", " return a + b\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test_cond(x, y):\n", " out = m\n", " while x < y:\n", @@ -667,7 +667,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "1839f820", "metadata": {}, "outputs": [ @@ -685,7 +685,7 @@ "def number_add(x, y):\n", " return x + y\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test(x, y):\n", " return number_add(x, y)\n", "\n", @@ -713,7 +713,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "2bdffe97", "metadata": {}, "outputs": [ @@ -728,7 +728,7 @@ "source": [ "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test(x, y):\n", " number_add = lambda x, y: x + y\n", " return number_add(x, y)\n", @@ -759,7 +759,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "84ebe8bd", "metadata": {}, "outputs": [ @@ -779,7 +779,7 @@ "def add(x, y):\n", " return x + y\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " add_ = ops.partial(add, x=2)\n", " m = add_(y=3)\n", @@ -816,7 +816,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "a5e4a2d1", "metadata": {}, "outputs": [ @@ -831,7 +831,7 @@ "source": [ "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " l = [x * x for x in range(1, 11) if x % 2 == 0]\n", " return l\n", @@ -880,7 +880,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "9c604b7f", "metadata": {}, "outputs": [ @@ -895,7 +895,7 @@ "source": [ "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " x = [('a', 1), ('b', 2), ('c', 3)]\n", " res = {k: v for (k, v) in x if v > 1}\n", @@ -925,7 +925,7 @@ "```python\n", "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " x = ({'a': 1, 'b': 2}, {'d': 1, 'e': 2}, {'g': 1, 'h': 2})\n", " res = {k: v for y in x for (k, v) in y.items()}\n", @@ -960,7 +960,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "3c2e02d3", "metadata": {}, "outputs": [ @@ -975,7 +975,7 @@ "source": [ "import mindspore\n", "\n", - "@mindspore.jit()\n", + "@mindspore.jit\n", "def test():\n", " l = (x * x for x in range(1, 11) if x % 2 == 0)\n", " return l\n", diff --git a/tutorials/source_zh_cn/compile/static_graph.md b/tutorials/source_zh_cn/compile/static_graph.md index eb035627f8722c2873d261e22d0025cdda9408b1..352409369bb6784e4666658655a582aad339efdb 100644 --- a/tutorials/source_zh_cn/compile/static_graph.md +++ b/tutorials/source_zh_cn/compile/static_graph.md @@ -1415,6 +1415,7 @@ in-place操作是指直接修改输入张量的内容,而不创建新的张量 ``` python import mindspore + from mindspore import nn class Net(nn.Cell): @classmethod @@ -1629,6 +1630,7 @@ in-place操作是指直接修改输入张量的内容,而不创建新的张量 ``` python import mindspore +from mindspore import nn class GetattrClass(): def __init__(self): @@ -2127,7 +2129,7 @@ class Net(nn.Cell): @mindspore.jit def construct(self, a): x = {'a': a, 'b': 2} - return a, (x, (1, 2)) + return a, x net = Net() out = mindspore.grad(net)(mindspore.tensor([1])) diff --git a/tutorials/source_zh_cn/compile/static_graph_expert_programming.ipynb b/tutorials/source_zh_cn/compile/static_graph_expert_programming.ipynb index 9849ea0d980e6cdf502a658dafe579091096acd9..f0e7bc3338b926283f678eb92e7f8d83b9561d90 100644 --- a/tutorials/source_zh_cn/compile/static_graph_expert_programming.ipynb +++ b/tutorials/source_zh_cn/compile/static_graph_expert_programming.ipynb @@ -67,8 +67,7 @@ "metadata": {}, "source": [ "```python\n", - "from mindspore import nn\n", - "from mindspore import lazy_inline\n", + "from mindspore import nn, lazy_inline\n", "\n", "class PanGUAlphaWithLoss(nn.Cell):\n", " @lazy_inline\n", @@ -118,8 +117,7 @@ "metadata": {}, "source": [ "```python\n", - "from mindspore import nn\n", - "from mindspore import lazy_inline\n", + "from mindspore import nn, lazy_inline\n", "\n", "class Block(nn.Cell):\n", " @lazy_inline\n", @@ -165,8 +163,7 @@ "1. Cell 是以Cell的类名和`__init__`参数值生成Cell实例标识的,这是基于`__init__`的参数确定Cell 的所有属性,以及`construct`构图开始时的Cell属性和`__init__`执行完的属性一致为假设前提,因此Cell与构图有关的属性,在`__init__`执行完后不能进行更改。例如:\n", "\n", " ```python\n", - " from mindspore import nn\n", - " from mindspore import lazy_inline\n", + " from mindspore import nn, lazy_inline\n", "\n", " class Block(nn.Cell):\n", " @lazy_inline\n", @@ -206,8 +203,7 @@ "2. 一个Cell类的网络结构包含多个Cell_X类的实例,同时每个Cell_X类的网络结构又包含多个Cell_Y的实例的场景,如果往Cell_X和Cell_Y类的`__init__`函数上都加上`@lazy_inline`,那么只有最外层的Cell_X实例的网络结构被编译成可复用的计算图且被延迟inline,内层的Cell_Y实例的计算图还是会被inline。例如:\n", "\n", " ```python\n", - " from mindspore import nn\n", - " from mindspore import lazy_inline\n", + " from mindspore import nn, lazy_inline\n", "\n", " class InnerBlock(nn.Cell):\n", " @lazy_inline # InnerBlock不会被延迟inline\n", @@ -275,7 +271,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "8e98fb56", "metadata": {}, "outputs": [ @@ -290,21 +286,24 @@ "source": [ "import os\n", "import time\n", - "from mindspore import dtype\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", + "\n", + "mindspore.set_context(mode=mindspore.GRAPH_MODE)\n", "\n", - "@ms.jit\n", - "def func(input_x, input_y):\n", - " output = input_x\n", - " for _ in range(200):\n", - " output = input_x + input_x * input_y + output\n", - " return output\n", + "class Model(nn.Cell):\n", + " def construct(self, input_x, input_y):\n", + " output = input_x\n", + " for _ in range(200):\n", + " output = input_x + input_x * input_y + output\n", + " return output\n", "\n", "os.environ['MS_COMPILER_CACHE_ENABLE'] = '0'\n", - "x = ms.Tensor([1], dtype.float32)\n", - "y = ms.Tensor([2], dtype.float32)\n", + "x = mindspore.tensor([1], mindspore.float32)\n", + "y = mindspore.tensor([2], mindspore.float32)\n", + "model = Model()\n", "start_time = time.time()\n", - "out = func(x, y)\n", + "out = model(x, y)\n", "end_time = time.time()\n", "print(\"Disable compile_cache cost time:\", end_time - start_time)" ] @@ -332,7 +331,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "278e2ca1", "metadata": {}, "outputs": [ @@ -347,22 +346,25 @@ "source": [ "import os\n", "import time\n", - "from mindspore import dtype\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", + "\n", + "mindspore.set_context(mode=mindspore.GRAPH_MODE)\n", "\n", - "@ms.jit\n", - "def func(input_x, input_y):\n", - " output = input_x\n", - " for _ in range(200):\n", - " output = input_x + input_x * input_y + output\n", - " return output\n", + "class Model(nn.Cell):\n", + " def construct(self, input_x, input_y):\n", + " output = input_x\n", + " for _ in range(200):\n", + " output = input_x + input_x * input_y + output\n", + " return output\n", "\n", "os.environ['MS_COMPILER_CACHE_ENABLE'] = '1'\n", "os.environ['MS_COMPILER_CACHE_PATH'] = 'my_compile_cache'\n", - "x = ms.Tensor([1], dtype.float32)\n", - "y = ms.Tensor([2], dtype.float32)\n", + "x = mindspore.tensor([1], mindspore.float32)\n", + "y = mindspore.tensor([2], mindspore.float32)\n", + "model = Model()\n", "start_time = time.time()\n", - "out = func(x, y)\n", + "out = model(x, y)\n", "end_time = time.time()\n", "os.environ['MS_COMPILER_CACHE_ENABLE'] = '0'\n", "print(\"Enable compile_cache cost time:\", end_time - start_time)" @@ -445,18 +447,18 @@ ], "source": [ "import numpy as np\n", - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class InnerNet:\n", - " value = ms.Tensor(np.array([1, 2, 3]))\n", + " value = mindspore.tensor(np.array([1, 2, 3]))\n", "\n", "class Net(nn.Cell):\n", + " @mindspore.jit\n", " def construct(self):\n", " return InnerNet().value\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", "net = Net()\n", "out = net()\n", "print(out)" @@ -476,16 +478,16 @@ "metadata": {}, "source": [ "```python\n", - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class Net(nn.Cell):\n", + " @mindspore.jit\n", " def construct(self, x):\n", " return x\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", - "x = ms.Tensor(1)\n", + "x = mindspore.tensor(1)\n", "net = Net()\n", "net(x)\n", "```" @@ -533,15 +535,15 @@ ], "source": [ "import numpy as np\n", - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class Inner:\n", " def __init__(self):\n", - " self.value = ms.Tensor(np.array([1, 2, 3]))\n", + " self.value = mindspore.tensor(np.array([1, 2, 3]))\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class InnerNet:\n", " def __init__(self):\n", " self.inner = Inner()\n", @@ -551,11 +553,11 @@ " super(Net, self).__init__()\n", " self.inner_net = InnerNet()\n", "\n", + " @mindspore.jit\n", " def construct(self):\n", " out = self.inner_net.inner.value\n", " return out\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", "net = Net()\n", "out = net()\n", "print(out)" @@ -586,10 +588,10 @@ } ], "source": [ - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class InnerNet:\n", " def __init__(self, val):\n", " self.number = val\n", @@ -602,12 +604,12 @@ " super(Net, self).__init__()\n", " self.inner_net = InnerNet(2)\n", "\n", + " @mindspore.jit\n", " def construct(self, x, y):\n", " return self.inner_net.number + self.inner_net.act(x, y)\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", - "x = ms.Tensor(2, dtype=ms.int32)\n", - "y = ms.Tensor(3, dtype=ms.int32)\n", + "x = mindspore.tensor(2, dtype=mindspore.int32)\n", + "y = mindspore.tensor(3, dtype=mindspore.int32)\n", "net = Net()\n", "out = net(x, y)\n", "print(out)" @@ -639,20 +641,20 @@ ], "source": [ "import numpy as np\n", - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class InnerNet:\n", " def __init__(self, val):\n", " self.number = val + 3\n", "\n", "class Net(nn.Cell):\n", + " @mindspore.jit\n", " def construct(self):\n", " net = InnerNet(2)\n", " return net.number\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", "net = Net()\n", "out = net()\n", "print(out)" @@ -684,10 +686,10 @@ ], "source": [ "import numpy as np\n", - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class InnerNet:\n", " def __init__(self, number):\n", " self.number = number\n", @@ -696,14 +698,14 @@ " return self.number * (x + y)\n", "\n", "class Net(nn.Cell):\n", + " @mindspore.jit\n", " def construct(self, x, y):\n", " net = InnerNet(2)\n", " out = net(x, y)\n", " return out\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", - "x = ms.Tensor(2, dtype=ms.int32)\n", - "y = ms.Tensor(3, dtype=ms.int32)\n", + "x = mindspore.tensor(2, dtype=mindspore.int32)\n", + "y = mindspore.tensor(3, dtype=mindspore.int32)\n", "net = Net()\n", "out = net(x, y)\n", "print(out)" @@ -724,23 +726,23 @@ "source": [ "```python\n", "import numpy as np\n", - "import mindspore.nn as nn\n", - "import mindspore as ms\n", + "import mindspore\n", + "from mindspore import nn\n", "\n", - "@ms.jit_class\n", + "@mindspore.jit_class\n", "class InnerNet:\n", " def __init__(self, number):\n", " self.number = number\n", "\n", "class Net(nn.Cell):\n", + " @mindspore.jit\n", " def construct(self, x, y):\n", " net = InnerNet(2)\n", " out = net(x, y)\n", " return out\n", "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", - "x = ms.Tensor(2, dtype=ms.int32)\n", - "y = ms.Tensor(3, dtype=ms.int32)\n", + "x = mindspore.tensor(2, dtype=mindspore.int32)\n", + "y = mindspore.tensor(3, dtype=mindspore.int32)\n", "net = Net()\n", "out = net(x, y)\n", "print(out)\n", @@ -798,10 +800,10 @@ ], "source": [ "import time\n", + "import mindspore\n", "from mindspore import ops\n", - "import mindspore as ms\n", "\n", - "@ms.jit\n", + "@mindspore.jit\n", "def if_net(x, y):\n", " out = 0\n", " for _ in range(100):\n", @@ -813,11 +815,11 @@ " return out\n", "\n", "start_time = time.time()\n", - "out = if_net(ms.Tensor([0]), ms.Tensor([1]))\n", + "out = if_net(mindspore.tensor([0]), mindspore.tensor([1]))\n", "end_time = time.time()\n", "print(\"if net cost time:\", end_time - start_time)\n", "\n", - "@ms.jit\n", + "@mindspore.jit\n", "def select_net(x, y):\n", " out = x\n", " for _ in range(100):\n", @@ -827,7 +829,7 @@ " return out\n", "\n", "start_time = time.time()\n", - "out = select_net(ms.Tensor([0]), ms.Tensor([1]))\n", + "out = select_net(mindspore.tensor([0]), mindspore.tensor([1]))\n", "end_time = time.time()\n", "print(\"select net cost time:\", end_time - start_time)" ] @@ -866,13 +868,13 @@ "source": [ "import numpy as np\n", "import time\n", + "import mindspore\n", "from mindspore import ops, vmap\n", - "import mindspore as ms\n", "\n", "def hswish_func(x):\n", " return ops.HSwish()(x)\n", "\n", - "@ms.jit\n", + "@mindspore.jit\n", "def manually_batched(xs):\n", " output = []\n", " for i in range(xs.shape[0]):\n", @@ -882,7 +884,7 @@ "shape = (100, 2)\n", "prop = 100\n", "x_np = (np.random.randn(*shape) * prop).astype(np.float32)\n", - "x = ms.Tensor(x_np)\n", + "x = mindspore.tensor(x_np)\n", "x = ops.sub(x, 0)\n", "\n", "start_time = time.time()\n", @@ -958,13 +960,10 @@ "source": [ "```python\n", "import numpy as np\n", - "import mindspore as ms\n", - "import mindspore.nn as nn\n", - "from mindspore import ops, set_context, Tensor\n", - "from mindspore import dtype as mstype\n", + "import mindspore\n", + "from mindspore import nn, ops, Tensor\n", "\n", - "set_context(mode=ms.GRAPH_MODE)\n", - "ms.set_device(\"Ascend\")\n", + "mindspore.set_device(\"Ascend\")\n", "\n", "class Net(nn.Cell):\n", " def __init__(self):\n", @@ -973,6 +972,7 @@ " self.get_status = ops.NPUGetFloatStatus()\n", " self.clear_status = ops.NPUClearFloatStatus()\n", "\n", + " @mindspore.jit\n", " def construct(self, x):\n", " init = self.alloc_status()\n", " clear_status = self.clear_status(init)\n", @@ -985,7 +985,7 @@ "\n", "value = 5\n", "data = np.full((2, 3), value, dtype=np.float16)\n", - "x = Tensor(data, dtype=mstype.float16)\n", + "x = mindspore.tensor(data, dtype=mindspore.float16)\n", "net = Net()\n", "res = net(x)\n", "print(res)\n", @@ -1025,16 +1025,13 @@ "source": [ "```python\n", "import numpy as np\n", - "from mindspore.nn import Cell\n", - "from mindspore import Tensor, Parameter, ops\n", - "import mindspore as ms\n", - "\n", - "ms.set_context(mode=ms.GRAPH_MODE)\n", + "import mindspore\n", + "from mindspore import nn, ops, Tensor, Parameter\n", "\n", - "class ForwardNet(Cell):\n", + "class ForwardNet(nn.Cell):\n", " def __init__(self):\n", " super(ForwardNet, self).__init__()\n", - " self.weight = Parameter(Tensor(np.array(0), ms.int32), name=\"param\")\n", + " self.weight = Parameter(mindspore.tensor(np.array(0), mindspore.int32), name=\"param\")\n", "\n", " def construct(self, x):\n", " out = 0\n", @@ -1045,22 +1042,22 @@ " i = i + 1\n", " return out\n", "\n", - "\n", - "class BackwardNet(Cell):\n", + "class BackwardNet(nn.Cell):\n", " def __init__(self, net):\n", " super(BackwardNet, self).__init__(auto_prefix=False)\n", " self.forward_net = net\n", " self.grad = ops.GradOperation(get_all=True)\n", "\n", + " @mindspore.jit\n", " def construct(self, *inputs):\n", " grads = self.grad(self.forward_net)(*inputs)\n", " return grads\n", "\n", - "x = Tensor(np.array(1), ms.int32)\n", + "x = mindspore.tensor(np.array(1), mindspore.int32)\n", "graph_forword_net = ForwardNet()\n", "graph_backword_net = BackwardNet(graph_forword_net)\n", "graph_mode_grads = graph_backword_net(x)\n", - "output_except = (Tensor(np.array(3), ms.int32),)\n", + "output_except = (mindspore.tensor(np.array(3), mindspore.int32),)\n", "assert np.all(graph_mode_grads == output_except)\n", "```" ]