代码拉取完成,页面将自动刷新
同步操作将从 CodeGod/MindSpore分类套件 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
from mindcv.loss import create_loss
from mindcv.optim import create_optimizer
from mindcv.models import create_model
from mindcv.scheduler import create_scheduler
import mindspore
import numpy as np
from mindspore import Tensor
import mindspore as ms
import mindspore.nn as nn
from mindvision.engine.callback import LossMonitor
def test_loss():
#TODO: check computation correctness
#for lt in ['bce', 'ce']:
# for ls in [0, 0.1]:
lt = 'cross_entropy'
ls = 0.1
weight=Tensor([0.3, 0.3, 0.1, 0.1, 0.2], ms.float32)
loss = create_loss(loss_type=lt, weight=weight, reduction='mean', label_smoothing=ls, aux_factor=0.)
logits = Tensor(np.random.randn(3, 5), mindspore.float32)
labels = Tensor(np.array([1, 0, 4]), mindspore.int32)
output = loss(logits, labels)
print(output)
lt = 'bce'
ls = 0.1
weight = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)
loss = create_loss(loss_type=lt, weight=weight, reduction='mean', label_smoothing=ls, aux_factor=0.)
logits = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)
labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32) #TODO: note the labels must be of the same data type of logits
output = loss(logits, labels)
print(output)
def test_optimizer():
'''
model = ms.nn.SequentialCell(ms.nn.Dense(2, 3),
ms.nn.Sigmoid(),
ms.nn.Dense(3, 1),
ms.nn.Sigmoid())
'''
'''
network = create_model('resnet18')
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
opt = create_optimizer(network.trainable_params(), 'adam')
# fixme: use simpler model
#bs = 8
#input_data = Tensor(np.ones([bs, 3, 224, 224]).astype(np.float32) * 0.01)
label = Tensor(np.ones([bs]).astype(np.int32))
model = ms.Model(network, loss_fn=loss, optimizer=opt, metrics={'acc'})
model.train(10, , callbacks=[LossMonitor(learning_rate, 1875)])
output = model(Tensor(input_data))
loss_output = criterion(output, label)
grads = train_network(input_data, label)
success = optimizer(grads)
loss = loss_output.asnumpy()
'''
#TODO: use a data tensor and MLP with loss decreasing check to test faster.
from mindvision.classification.dataset import Mnist
from mindvision.classification.models import lenet
from mindvision.engine.callback import LossMonitor
download_train = Mnist(path="/data/mnist/mnist_mv_format", split="test", batch_size=32, repeat_num=1, shuffle=True, resize=32, download=False)
dataset_train = download_train.run()
network = lenet(num_classes=10, pretrained=False)
#net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
net_loss = create_loss('CE')
#net_opt = create_optimizer(network.trainable_params(), 'sgd', lr=0.01, weight_decay=1e-5)
net_opt = create_optimizer(network.trainable_params(), 'adam', lr=0.01, weight_decay=1e-5)
#net_opt = create_optimizer(network.trainable_params(), 'lamb', lr=0.001, weight_decay=1e-5)
model = ms.Model(network, loss_fn=net_loss, optimizer=net_opt, metrics={'acc'})
model.train(10, dataset_train, callbacks=[LossMonitor(1e-3, 100)])
def test_scheduler():
from mindspore import Tensor, nn
#learning_rate = 0.1
#decay_rate = 0.9
#decay_steps = 4
num_steps_epoch = 4
global_step = Tensor(num_steps_epoch * 5, mindspore.int32)
#exponential_decay_lr = nn.ExponentialDecayLR(learning_rate, decay_rate, decay_steps)
#sched = create_scheduler(num_steps_epoch, 'step_decay', lr=0.1, decay_rate=0.9, decay_epochs=5)
sched = create_scheduler(num_steps_epoch, 'warmup_cosine_decay', lr=0.1, min_lr=1e-5, decay_epochs=10)
result = sched(global_step)
print(result)
if __name__ == '__main__':
ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target='GPU')
np.random.seed(1)
ms.set_seed(1)
#test_loss()
#test_optimizer()
test_scheduler()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。