CNN解决MNIST问题在Pytorch上没有学习

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets,transforms
from torch.autograd import Variable
from random import randint
from matplotlib import pyplot as plt


train = datasets.MNIST("",train=True,download=True,transform = transforms.Compose([transforms.ToTensor()]))
test = datasets.MNIST("",train=False,transform = transforms.Compose([transforms.ToTensor()]))

bs = 64
trainset = torch.utils.data.DataLoader(train,batch_size = bs,shuffle = True)
testset = torch.utils.data.DataLoader(train,batch_size = 1,shuffle = True)


def MSE(a,b):
   t1 = (a-b)**2
   return t1.mean()

class Net(nn.Module):

    def __init__(self):
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(1,10,kernel_size=5)
        self.conv2 = nn.Conv2d(10,20,kernel_size=5)
        self.mp = nn.MaxPool2d(2)
        self.fc = nn.Linear(320,1)

    def forward(self,x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = F.relu(self.mp(self.conv2(x)))
        x = x.view(in_size,-1)  # flatten the tensor
        x = self.fc(x).relu()
        return x

net = Net().cuda()     
optimizer = optim.Adam(net.parameters(),lr=(1.0e-3))
print('net created')
losses=[]
for epoch in range(20):
   net.train()#training mode
   for data in trainset:
      t1 = t.time()
      x,y = data
      x = x.cuda()
      y = y.cuda()
      optimizer.zero_grad()
      output = net(x)
      loss = MSE(output,y)
      #print(round(float(output),2),float(y),round(float(loss),2))
      print(float(loss))
      loss.backward()
      optimizer.step()
      losses.append(float(loss))

cnn返回一个o / p,用于根据MSE计算误差。 要注意的一件事是,当我将网络更改为10个神经元o / p层时,我得到的结果要好得多。在这种情况下,我将 nll_loss 用作损失函数,并且还将softmax应用于o / p层。从理论上讲,我认为这应该可行。但这是我在cnn上的第一个裂缝。 对于我来说,错误保持恒定在7-9。即使经过200批次的培训,我也没有学到任何东西。有关如何解决此问题的任何信息都很棒!

greattable 回答:CNN解决MNIST问题在Pytorch上没有学习

暂时没有好的解决方案,如果你有好的解决方案,请发邮件至:iooj@foxmail.com
本文链接:https://www.f2er.com/2627387.html

大家都在问