Pytorch入门之mnist分类实例
本文实例为大家分享了Pytorch入门之mnist分类的具体代码,供大家参考,具体内容如下
#!/usr/bin/envpython #-*-coding:utf-8-*- __author__='denny' __time__='2017-9-99:03' importtorch importtorchvision fromtorch.autogradimportVariable importtorch.utils.data.dataloaderasData train_data=torchvision.datasets.MNIST( './mnist',train=True,transform=torchvision.transforms.ToTensor(),download=True ) test_data=torchvision.datasets.MNIST( './mnist',train=False,transform=torchvision.transforms.ToTensor() ) print("train_data:",train_data.train_data.size()) print("train_labels:",train_data.train_labels.size()) print("test_data:",test_data.test_data.size()) train_loader=Data.DataLoader(dataset=train_data,batch_size=64,shuffle=True) test_loader=Data.DataLoader(dataset=test_data,batch_size=64) classNet(torch.nn.Module): def__init__(self): super(Net,self).__init__() self.conv1=torch.nn.Sequential( torch.nn.Conv2d(1,32,3,1,1), torch.nn.ReLU(), torch.nn.MaxPool2d(2)) self.conv2=torch.nn.Sequential( torch.nn.Conv2d(32,64,3,1,1), torch.nn.ReLU(), torch.nn.MaxPool2d(2) ) self.conv3=torch.nn.Sequential( torch.nn.Conv2d(64,64,3,1,1), torch.nn.ReLU(), torch.nn.MaxPool2d(2) ) self.dense=torch.nn.Sequential( torch.nn.Linear(64*3*3,128), torch.nn.ReLU(), torch.nn.Linear(128,10) ) defforward(self,x): conv1_out=self.conv1(x) conv2_out=self.conv2(conv1_out) conv3_out=self.conv3(conv2_out) res=conv3_out.view(conv3_out.size(0),-1) out=self.dense(res) returnout model=Net() print(model) optimizer=torch.optim.Adam(model.parameters()) loss_func=torch.nn.CrossEntropyLoss() forepochinrange(10): print('epoch{}'.format(epoch+1)) #training----------------------------- train_loss=0. train_acc=0. forbatch_x,batch_yintrain_loader: batch_x,batch_y=Variable(batch_x),Variable(batch_y) out=model(batch_x) loss=loss_func(out,batch_y) train_loss+=loss.data[0] pred=torch.max(out,1)[1] train_correct=(pred==batch_y).sum() train_acc+=train_correct.data[0] optimizer.zero_grad() loss.backward() optimizer.step() print('TrainLoss:{:.6f},Acc:{:.6f}'.format(train_loss/(len( train_data)),train_acc/(len(train_data)))) #evaluation-------------------------------- model.eval() eval_loss=0. eval_acc=0. forbatch_x,batch_yintest_loader: batch_x,batch_y=Variable(batch_x,volatile=True),Variable(batch_y,volatile=True) out=model(batch_x) loss=loss_func(out,batch_y) eval_loss+=loss.data[0] pred=torch.max(out,1)[1] num_correct=(pred==batch_y).sum() eval_acc+=num_correct.data[0] print('TestLoss:{:.6f},Acc:{:.6f}'.format(eval_loss/(len( test_data)),eval_acc/(len(test_data))))
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持毛票票。