1 Star 0 Fork 19

白奕凡18030100374/MindSpore分类套件_1

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
test_modules.py 4.01 KB
一键复制 编辑 原始数据 按行查看 历史
from mindcv.loss import create_loss
from mindcv.optim import create_optimizer
from mindcv.models import create_model
from mindcv.scheduler import create_scheduler
import mindspore
import numpy as np
from mindspore import Tensor
import mindspore as ms
import mindspore.nn as nn
from mindvision.engine.callback import LossMonitor
def test_loss():
#TODO: check computation correctness
#for lt in ['bce', 'ce']:
# for ls in [0, 0.1]:
lt = 'cross_entropy'
ls = 0.1
weight=Tensor([0.3, 0.3, 0.1, 0.1, 0.2], ms.float32)
loss = create_loss(loss_type=lt, weight=weight, reduction='mean', label_smoothing=ls, aux_factor=0.)
logits = Tensor(np.random.randn(3, 5), mindspore.float32)
labels = Tensor(np.array([1, 0, 4]), mindspore.int32)
output = loss(logits, labels)
print(output)
lt = 'bce'
ls = 0.1
weight = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)
loss = create_loss(loss_type=lt, weight=weight, reduction='mean', label_smoothing=ls, aux_factor=0.)
logits = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)
labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32) #TODO: note the labels must be of the same data type of logits
output = loss(logits, labels)
print(output)
def test_optimizer():
'''
model = ms.nn.SequentialCell(ms.nn.Dense(2, 3),
ms.nn.Sigmoid(),
ms.nn.Dense(3, 1),
ms.nn.Sigmoid())
'''
'''
network = create_model('resnet18')
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
opt = create_optimizer(network.trainable_params(), 'adam')
# fixme: use simpler model
#bs = 8
#input_data = Tensor(np.ones([bs, 3, 224, 224]).astype(np.float32) * 0.01)
label = Tensor(np.ones([bs]).astype(np.int32))
model = ms.Model(network, loss_fn=loss, optimizer=opt, metrics={'acc'})
model.train(10, , callbacks=[LossMonitor(learning_rate, 1875)])
output = model(Tensor(input_data))
loss_output = criterion(output, label)
grads = train_network(input_data, label)
success = optimizer(grads)
loss = loss_output.asnumpy()
'''
#TODO: use a data tensor and MLP with loss decreasing check to test faster.
from mindvision.classification.dataset import Mnist
from mindvision.classification.models import lenet
from mindvision.engine.callback import LossMonitor
download_train = Mnist(path="/data/mnist/mnist_mv_format", split="test", batch_size=32, repeat_num=1, shuffle=True, resize=32, download=False)
dataset_train = download_train.run()
network = lenet(num_classes=10, pretrained=False)
#net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
net_loss = create_loss('CE')
#net_opt = create_optimizer(network.trainable_params(), 'sgd', lr=0.01, weight_decay=1e-5)
net_opt = create_optimizer(network.trainable_params(), 'adam', lr=0.01, weight_decay=1e-5)
#net_opt = create_optimizer(network.trainable_params(), 'lamb', lr=0.001, weight_decay=1e-5)
model = ms.Model(network, loss_fn=net_loss, optimizer=net_opt, metrics={'acc'})
model.train(10, dataset_train, callbacks=[LossMonitor(1e-3, 100)])
def test_scheduler():
from mindspore import Tensor, nn
#learning_rate = 0.1
#decay_rate = 0.9
#decay_steps = 4
num_steps_epoch = 4
global_step = Tensor(num_steps_epoch * 5, mindspore.int32)
#exponential_decay_lr = nn.ExponentialDecayLR(learning_rate, decay_rate, decay_steps)
#sched = create_scheduler(num_steps_epoch, 'step_decay', lr=0.1, decay_rate=0.9, decay_epochs=5)
sched = create_scheduler(num_steps_epoch, 'warmup_cosine_decay', lr=0.1, min_lr=1e-5, decay_epochs=10)
result = sched(global_step)
print(result)
if __name__ == '__main__':
ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target='GPU')
np.random.seed(1)
ms.set_seed(1)
#test_loss()
#test_optimizer()
test_scheduler()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/EvanBay/mindspore-classification_1.git
git@gitee.com:EvanBay/mindspore-classification_1.git
EvanBay
mindspore-classification_1
MindSpore分类套件_1
master

搜索帮助