diff --git a/docs/note/source_en/constraints_on_network_construction.md b/docs/note/source_en/constraints_on_network_construction.md index 1fa7869dc0b565f7a237f8035d33122024117855..9086c60ed39886de95e9443fc892ec6d808d0ccc 100644 --- a/docs/note/source_en/constraints_on_network_construction.md +++ b/docs/note/source_en/constraints_on_network_construction.md @@ -239,7 +239,7 @@ Currently, the following syntax is not supported in network constructors: class ExpandDimsTest(Cell): def __init__(self): super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() + self.expandDims = ops.ExpandDims() def construct(self, input_x, input_axis): return self.expandDims(input_x, input_axis) @@ -254,7 +254,7 @@ Currently, the following syntax is not supported in network constructors: class ExpandDimsTest(Cell): def __init__(self, axis): super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() + self.expandDims = ops.ExpandDims() self.axis = axis def construct(self, input_x): diff --git a/docs/note/source_zh_cn/constraints_on_network_construction.md b/docs/note/source_zh_cn/constraints_on_network_construction.md index f288d50bd59edaa097c41747888c31159a87a116..b55e0319a93ceabecca351f763444dd22f1fb117 100644 --- a/docs/note/source_zh_cn/constraints_on_network_construction.md +++ b/docs/note/source_zh_cn/constraints_on_network_construction.md @@ -254,8 +254,8 @@ tuple也支持切片取值操作, 但不支持切片类型为Tensor类型,支 class ExpandDimsTest(Cell): def __init__(self): super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() - + self.expandDims = ops.ExpandDims() + def construct(self, input_x, input_axis): return self.expandDims(input_x, input_axis) expand_dim = ExpandDimsTest() @@ -271,7 +271,7 @@ tuple也支持切片取值操作, 但不支持切片类型为Tensor类型,支 class ExpandDimsTest(Cell): def __init__(self, axis): super(ExpandDimsTest, self).__init__() - self.expandDims = P.ExpandDims() + self.expandDims = ops.ExpandDims() self.axis = axis def construct(self, input_x): diff --git a/tutorials/notebook/computer_vision_application.ipynb b/tutorials/notebook/computer_vision_application.ipynb index adfeec2c49d73e3eae85ae953290da05a5181b12..78ae171c0092deb6e6ad54ffa15654d63064c86e 100644 --- a/tutorials/notebook/computer_vision_application.ipynb +++ b/tutorials/notebook/computer_vision_application.ipynb @@ -144,7 +144,6 @@ "import random\n", "import argparse\n", "from mindspore import Tensor\n", - "from mindspore.ops import operations as P\n", "\n", "# Set Training Parameters \n", "random.seed(1)\n", @@ -212,7 +211,6 @@ "source": [ "import mindspore.nn as nn\n", "import mindspore.common.dtype as mstype\n", - "import mindspore.ops.functional as F\n", "import mindspore.dataset as ds\n", "import mindspore.dataset.vision.c_transforms as C\n", "import mindspore.dataset.transforms.c_transforms as C2\n", diff --git a/tutorials/notebook/customized_debugging_information.ipynb b/tutorials/notebook/customized_debugging_information.ipynb index 0ecf5695d2dca168a737a0163197c2bce56ec5c8..dafdfc455f29e014fa42b63d857c56e055bcd758 100644 --- a/tutorials/notebook/customized_debugging_information.ipynb +++ b/tutorials/notebook/customized_debugging_information.ipynb @@ -153,7 +153,6 @@ "source": [ "from mindspore.common.initializer import TruncatedNormal\n", "import mindspore.nn as nn\n", - "from mindspore.ops import operations as P\n", "\n", "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n", " \"\"\"Conv layer weight initial.\"\"\"\n", diff --git a/tutorials/notebook/debugging_in_pynative_mode.ipynb b/tutorials/notebook/debugging_in_pynative_mode.ipynb index c35d9049891cc5ec8dfd160a6d4b58521ad2146a..649cb059a819272f3e7941d6a3bac31f1f9293b7 100644 --- a/tutorials/notebook/debugging_in_pynative_mode.ipynb +++ b/tutorials/notebook/debugging_in_pynative_mode.ipynb @@ -253,8 +253,7 @@ "outputs": [], "source": [ "import mindspore.nn as nn\n", - "import mindspore.ops.operations as P\n", - "from mindspore.ops import composite as C\n", + "import mindspore.ops as ops", "from mindspore.common import dtype as mstype\n", "from mindspore.common.initializer import TruncatedNormal\n", "from mindspore.nn import Dense\n", @@ -373,7 +372,7 @@ "\n", " def construct(self, x, label):\n", " weights = self.weights\n", - " return C.GradOperation(get_by_list=True)(self.network, weights)(x, label)" + " return ops.GradOperation(get_by_list=True)(self.network, weights)(x, label)" ] }, { diff --git a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb index 95934934b7e06879f2d4945661715a92e8d6fe51..7f644a9642ac9614b01d9a709ddb5b441c3bbc5e 100644 --- a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb @@ -277,7 +277,7 @@ " \n", " ```python\n", " # Init ImageSummary\n", - " self.image_summary = P.ImageSummary()\n", + " self.image_summary = ops.ImageSummary()\n", " ```\n", " \n", " 2. 在 `construct` 方法中使用 `ImageSummary` 算子记录输入图像。其中 \"Image\" 为该数据的名称,MindInsight在展示时,会将该名称展示出来以方便识别是哪个数据。\n", @@ -293,7 +293,7 @@ " \n", " ```python\n", " # Init TensorSummary\n", - " self.tensor_summary = P.TensorSummary()\n", + " self.tensor_summary = ops.TensorSummary()\n", " ```\n", " \n", " 2. 在`construct`方法中使用`TensorSummary`算子记录张量数据。其中\"Tensor\"为该数据的名称。\n", @@ -319,7 +319,7 @@ "source": [ "import mindspore.nn as nn\n", "from mindspore.common.initializer import TruncatedNormal\n", - "from mindspore.ops import operations as P\n", + "import mindspore.ops as ops\n", "\n", "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode=\"valid\"):\n", " weight = weight_variable()\n", @@ -348,15 +348,15 @@ " self.conv4 = conv(384, 384, 3, pad_mode=\"same\")\n", " self.conv5 = conv(384, 256, 3, pad_mode=\"same\")\n", " self.relu = nn.ReLU()\n", - " self.max_pool2d = P.MaxPool(ksize=3, strides=2)\n", + " self.max_pool2d = ops.MaxPool(ksize=3, strides=2)\n", " self.flatten = nn.Flatten()\n", " self.fc1 = fc_with_initialize(6*6*256, 4096)\n", " self.fc2 = fc_with_initialize(4096, 4096)\n", " self.fc3 = fc_with_initialize(4096, num_classes)\n", " # Init TensorSummary\n", - " self.tensor_summary = P.TensorSummary()\n", + " self.tensor_summary = ops.TensorSummary()\n", " # Init ImageSummary\n", - " self.image_summary = P.ImageSummary()\n", + " self.image_summary = ops.ImageSummary()\n", "\n", " def construct(self, x):\n", " # Record image by Summary operator\n", @@ -747,7 +747,7 @@ " self.conv4 = conv(384, 384, 3, pad_mode=\"same\")\n", " self.conv5 = conv(384, 256, 3, pad_mode=\"same\")\n", " self.relu = nn.ReLU()\n", - " self.max_pool2d = P.MaxPool(ksize=3, strides=2)\n", + " self.max_pool2d = ops.MaxPool(ksize=3, strides=2)\n", " self.flatten = nn.Flatten()\n", " self.fc1 = fc_with_initialize(6*6*256, 4096)\n", " self.fc2 = fc_with_initialize(4096, 4096)\n", @@ -983,13 +983,13 @@ " self.conv4 = conv(384, 384, 3, pad_mode=\"same\")\n", " self.conv5 = conv(384, 256, 3, pad_mode=\"same\")\n", " self.relu = nn.ReLU()\n", - " self.max_pool2d = P.MaxPool(ksize=3, strides=2)\n", + " self.max_pool2d = ops.MaxPool(ksize=3, strides=2)\n", " self.flatten = nn.Flatten()\n", " self.fc1 = fc_with_initialize(6*6*256, 4096)\n", " self.fc2 = fc_with_initialize(4096, 4096)\n", " self.fc3 = fc_with_initialize(4096, num_classes)\n", " # Init TensorSummary\n", - " self.tensor_summary = P.TensorSummary()\n", + " self.tensor_summary = ops.TensorSummary()\n", "\n", " def construct(self, x):\n", " x = self.conv1(x)\n", @@ -1119,13 +1119,13 @@ " self.conv4 = conv(384, 384, 3, pad_mode=\"same\")\n", " self.conv5 = conv(384, 256, 3, pad_mode=\"same\")\n", " self.relu = nn.ReLU()\n", - " self.max_pool2d = P.MaxPool(ksize=3, strides=2)\n", + " self.max_pool2d = ops.MaxPool(ksize=3, strides=2)\n", " self.flatten = nn.Flatten()\n", " self.fc1 = fc_with_initialize(6*6*256, 4096)\n", " self.fc2 = fc_with_initialize(4096, 4096)\n", " self.fc3 = fc_with_initialize(4096, num_classes)\n", " # Init ImageSummary\n", - " self.image_summary = P.ImageSummary()\n", + " self.image_summary = ops.ImageSummary()\n", "\n", " def construct(self, x):\n", " # Record image by Summary operator\n", diff --git a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb index d7b558c8599d8659862a430559acb695184ca65f..432e48ebbbc0cd4646029174c361da8f6a06e1a0 100644 --- a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb @@ -229,7 +229,6 @@ "metadata": {}, "outputs": [], "source": [ - "from mindspore.ops import operations as P\n", "import mindspore.nn as nn\n", "from mindspore.common.initializer import TruncatedNormal\n", "\n", diff --git a/tutorials/notebook/mixed_precision.ipynb b/tutorials/notebook/mixed_precision.ipynb index d74e6e11f9161be89b1788fcd7365fbb2048a5b1..74c1f6c12c320fec830b5b17ac7f8195b0ab8d16 100644 --- a/tutorials/notebook/mixed_precision.ipynb +++ b/tutorials/notebook/mixed_precision.ipynb @@ -404,22 +404,21 @@ "outputs": [], "source": [ "from mindspore.nn.loss.loss import _Loss\n", - "from mindspore.ops import operations as P\n", - "from mindspore.ops import functional as F\n", + "import mindspore.ops as ops\n", "from mindspore import Tensor\n", "import mindspore.nn as nn\n", "\n", "class CrossEntropy(_Loss):\n", " def __init__(self, smooth_factor=0., num_classes=1001):\n", " super(CrossEntropy, self).__init__()\n", - " self.onehot = P.OneHot()\n", + " self.onehot = ops.OneHot()\n", " self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)\n", " self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)\n", " self.ce = nn.SoftmaxCrossEntropyWithLogits()\n", - " self.mean = P.ReduceMean(False)\n", + " self.mean = ops.ReduceMean(False)\n", "\n", " def construct(self, logit, label):\n", - " one_hot_label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)\n", + " one_hot_label = self.onehot(label, ops.shape(logit)[1], self.on_value, self.off_value)\n", " loss = self.ce(logit, one_hot_label)\n", " loss = self.mean(loss, 0)\n", " return loss" @@ -520,7 +519,7 @@ " if self.down_sample:\n", " self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride),\n", " _bn(out_channel)])\n", - " self.add = P.TensorAdd()\n", + " self.add = ops.TensorAdd()\n", "\n", " def construct(self, x):\n", " identity = x\n", @@ -560,7 +559,7 @@ "\n", " self.conv1 = _conv7x7(3, 64, stride=2)\n", " self.bn1 = _bn(64)\n", - " self.relu = P.ReLU()\n", + " self.relu = ops.ReLU()\n", " self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\")\n", "\n", " self.layer1 = self._make_layer(block,\n", @@ -584,7 +583,7 @@ " out_channel=out_channels[3],\n", " stride=strides[3])\n", "\n", - " self.mean = P.ReduceMean(keep_dims=True)\n", + " self.mean = ops.ReduceMean(keep_dims=True)\n", " self.flatten = nn.Flatten()\n", " self.end_point = _fc(out_channels[3], num_classes)\n", "\n", diff --git a/tutorials/notebook/nlp_application.ipynb b/tutorials/notebook/nlp_application.ipynb index d2df6a718d19955f19505f1d7c82a8a91ce638fd..1155b7351e4e78e6d32ff2c9e68973751a553958 100644 --- a/tutorials/notebook/nlp_application.ipynb +++ b/tutorials/notebook/nlp_application.ipynb @@ -679,7 +679,7 @@ "\n", "from mindspore import Tensor, nn, context, Parameter, ParameterTuple\n", "from mindspore.common.initializer import initializer\n", - "from mindspore.ops import operations as P" + "import mindspore.ops as ops" ] }, { @@ -787,7 +787,7 @@ " super(StackLSTM, self).__init__()\n", " self.num_layers = num_layers\n", " self.batch_first = batch_first\n", - " self.transpose = P.Transpose()\n", + " self.transpose = ops.Transpose()\n", "\n", " # direction number\n", " num_directions = 2 if bidirectional else 1\n", @@ -883,7 +883,7 @@ " embed_size,\n", " embedding_table=weight)\n", " self.embedding.embedding_table.requires_grad = False\n", - " self.trans = P.Transpose()\n", + " self.trans = ops.Transpose()\n", " self.perm = (1, 0, 2)\n", "\n", " if context.get_context(\"device_target\") in STACK_LSTM_DEVICE:\n", @@ -905,7 +905,7 @@ " dropout=0.0)\n", " self.h, self.c = lstm_default_state(batch_size, num_hiddens, num_layers, bidirectional)\n", "\n", - " self.concat = P.Concat(1)\n", + " self.concat = ops.Concat(1)\n", " if bidirectional:\n", " self.decoder = nn.Dense(num_hiddens * 4, num_classes)\n", " else:\n",