diff --git a/homework/class_2/lixiaoping/.keep b/homework/class_2/lixiaoping/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/homework/class_2/lixiaoping/common_device_type.py b/homework/class_2/lixiaoping/common_device_type.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d33295706a210ab810464e1ba6063bdbd91c43 --- /dev/null +++ b/homework/class_2/lixiaoping/common_device_type.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +common_path = os.path.dirname("../common/") +if common_path not in sys.path: + sys.path.append(common_path) +from common_device_type_new import * \ No newline at end of file diff --git a/homework/class_2/lixiaoping/common_utils.py b/homework/class_2/lixiaoping/common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5fd21b1bd2447ccf898912537675cd6a04a93f66 --- /dev/null +++ b/homework/class_2/lixiaoping/common_utils.py @@ -0,0 +1,28 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Importing this file must **not** initialize CUDA context. test_distributed +relies on this assumption to properly run. This means that when this is imported +no CUDA calls shall be made, including torch.cuda.device_count(), etc. + +torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported. +""" +import os +import sys +common_path = os.path.dirname("../common/") +if common_path not in sys.path: + sys.path.append(common_path) +from common_utils_new import * \ No newline at end of file diff --git a/homework/class_2/lixiaoping/data.py b/homework/class_2/lixiaoping/data.py new file mode 100644 index 0000000000000000000000000000000000000000..29a4d1e1bb4dcc8b5048cc37f7a336af5c2a8553 --- /dev/null +++ b/homework/class_2/lixiaoping/data.py @@ -0,0 +1,65 @@ +import torch +import torchvision +import torchvision.transforms as transforms +import numpy as np +import matplotlib.pyplot as plt + + +def get_cifar_data(batch_size): + # data compose + transform = transforms.Compose([ + # padding to 36x36 + transforms.Pad(4), + # Random Horizontal Flip + transforms.RandomHorizontalFlip(), + # cut to 32x32 + transforms.RandomCrop(32), + # to tensor + transforms.ToTensor(), + # normalization + transforms.Normalize(mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5)) + ]) + + # cifar10 path + cifar10Path = '/home/lxp/newascend/pytorch/test/test_npu/test_cifar10' + + # train data + train_dataset = torchvision.datasets.CIFAR10(root=cifar10Path, + train=True, + transform=transform, + download=True) + + # test data + test_dataset = torchvision.datasets.CIFAR10(root=cifar10Path, + train=False, + transform=transform) + + train_loader = torch.utils.data.DataLoader(dataset=train_dataset, + batch_size=batch_size, + shuffle=True) + + test_loader = torch.utils.data.DataLoader(dataset=test_dataset, + batch_size=batch_size, + shuffle=False) + + return train_loader, test_loader + + +if __name__ == '__main__': + train_loader, test_loader = get_cifar_data(batch_size=32) + + data_iter = iter(train_loader) + images, labels = next(data_iter) + idx = 31 + image = images[idx].numpy() + image = np.transpose(image, (1, 2, 0)) + plt.imshow(image) + plt.show() + classes = ('plane', 'car', 'bird', 'cat', + 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') + print(classes[labels[idx].numpy()]) + print("1") + + + print(type(train_loader)) diff --git a/homework/class_2/lixiaoping/model.py b/homework/class_2/lixiaoping/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c86885d1f98a2d35064e5ad1e6538633986c80c5 --- /dev/null +++ b/homework/class_2/lixiaoping/model.py @@ -0,0 +1,40 @@ +import torch +import torch.nn as nn + + +class ConvNet(nn.Module): + def __init__(self, num_classes=10): + super(ConvNet, self).__init__() + self.conv1 = nn.Sequential( + # convolution + nn.Conv2d(3, 16, kernel_size=5, stride=1, padding=2), + # batch normalization + nn.BatchNorm2d(16), + # ReLU activation + nn.ELU(), + # max pool + nn.MaxPool2d(kernel_size=2, stride=2)) + + self.conv2 = nn.Sequential( + nn.Conv2d(16, 16, kernel_size=5, stride=1, padding=2), + nn.BatchNorm2d(16), + nn.ReLU(), + nn.AvgPool2d(kernel_size=2, stride=2) + ) + + self.conv3 = nn.Sequential ( + nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2), + nn.BatchNorm2d(32), + nn.Softplus(), + nn.MaxPool2d(kernel_size=2, stride=2)) + + self.fc = nn.Linear(4 * 4 * 32, num_classes) + + # forward channel + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + out = out.reshape(out.size(0), -1) + out = self.fc(out) + return out diff --git a/homework/class_2/lixiaoping/test.py b/homework/class_2/lixiaoping/test.py new file mode 100644 index 0000000000000000000000000000000000000000..479ba433918cd2d00e81b71cdf215da472c2595f --- /dev/null +++ b/homework/class_2/lixiaoping/test.py @@ -0,0 +1,35 @@ +import numpy as np +import matplotlib.pyplot as plt +import torch +import data +import model + + +if __name__ == '__main__': + # cifar10 classes + classes = ('plane', 'car', 'bird', 'cat', + 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') + + train_loader, test_loader = data.get_cifar_data(32) + data_iter = iter(test_loader) + images, labels = next(data_iter) + + idx = 2 + image = images[idx].numpy() + image = np.transpose(image, (1, 2, 0)) + plt.imshow(image) + plt.show() + print(classes[labels[idx].numpy()]) + + image_batch = image.reshape(-1, 3, 32, 32) + image_tensor = torch.from_numpy(image_batch) + + model = model.ConvNet(10) + model_info = torch.load("model_info.ckpt") + model.load_state_dict(model_info["model"]) + model.eval() + output = model(image_tensor) + _, predicted = torch.max(output.data, 1) + pre = predicted.numpy() + print(pre) + print(classes[pre[0]]) diff --git a/homework/class_2/lixiaoping/train.py b/homework/class_2/lixiaoping/train.py new file mode 100644 index 0000000000000000000000000000000000000000..0d8e4993d6342c6e6a013d1195ab4db5d4a3dd2c --- /dev/null +++ b/homework/class_2/lixiaoping/train.py @@ -0,0 +1,62 @@ +import torch +import torch.nn as nn +import data +import model + + +def train(train_loader, test_loader, model, learning_rate, num_epochs): + # loss and optimization + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) + + # training + total_step = len(train_loader) + for epoch in range(num_epochs): + for i, (images, labels) in enumerate(train_loader): + # forward + outputs = model(images) + loss = criterion(outputs, labels) + + # backward and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if (i + 1) % 100 == 0: + print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' + .format(epoch + 1, num_epochs, i + 1, total_step, loss.item())) + + # evaluation and test + model.eval() + with torch.no_grad(): + correct = 0 + total = 0 + for images, labels in test_loader: + outputs = model(images) + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + print('Test Accuracy of the model on the test images: {} %'.format(100 * correct / total)) + + # save model + model_info = { + "iter_num": total_step * num_epochs, + "optimizer": optimizer, + "model": model.state_dict() + } + torch.save(model_info, 'model_info.ckpt') + + +if __name__ == '__main__': + # hyper parameters + num_epochs = 10 + num_classes = 10 + batch_size = 32 + learning_rate = 0.01 + + train_loader, test_loader = data.get_cifar_data(batch_size) + + model = model.ConvNet(num_classes) + + train(train_loader, test_loader, model, learning_rate, num_epochs) diff --git a/homework/class_2/lixiaoping/util_test.py b/homework/class_2/lixiaoping/util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f9818267c0e8a717d7fac4e3179d201329ba257a --- /dev/null +++ b/homework/class_2/lixiaoping/util_test.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +common_path = os.path.dirname("../common/") +if common_path not in sys.path: + sys.path.append(common_path) + +from util_test_new import *