最近由于工作原因需要学习pytorch,我将pytorch入门代码看了一遍,记录下来代码解释,帮助我更好的理解代码
1.运行环境
建议使用Google的Colaboratory进行实验,该环境可以免费使用一块K40GPU而且不需要配置环境[https://colab.research.google.com/]。不过首先你需要一个梯子,整个页面和jupyter notebook一致
85FEA6B0-75DC-4BB5-9C7A-0885A416F2E8.png
2.加载数据集
这里使用CIFAR10这个数据集,pytorch已经集成到代码中
import torch
import torchvision
import torchvision.transforms as transforms
# transforms.ToTensor()将数据变成tf的tensor格式
# transforms.Normalize(mean, std)将数据规范化,数据是3维数据(带有RBG三个通道的彩色图片 channel,height, width),该方法把使用公式”(x-mean)/std”
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# 下载并规范化数据, 这里设置每个batch为4,每次取出前都会洗牌一次(打乱顺序)
trainset = torchvision.datasets.CIFAR10(root='/content/sample_data/data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,shuffle=True, num_workers=2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,shuffle=False, num_workers=2)
# 这个数据集一共有十个类
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
3.先查看一下数据长什么样子
import matplotlib.pyplot as plt
import numpy as np
# 画图函数
def imshow(img):
# transforms.Normalize=(data-mean)/std
# std = 0.5 mean=0.5
# 这里需要还原data*std+mean
img = img * 0.5 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# 变为一个迭代器, 去一组(batch)数据
dataiter = iter(trainloader)
images, labels = dataiter.next()
# 画图
imshow(torchvision.utils.make_grid(images))
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
111.png
cat frog truck horse
4.建立一个简单的CNN网络
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.conv1 的参数 in_channels =3, out_channels=6,kernel_size=(5,5), stride=1, padding=0
# 输入图片的维度(3, 32,32)
# 根据公式(height-kernal_size+2*padding)/stride + 1
# 输出数据(6, (32-5+2*0)/1+1, (32-5+2*0)/1+1)
self.conv1 = nn.Conv2d(3, 6, 5)
# MaxPool2d会将数据缩小,2表示/2
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5) # 将数据摊开变成一维
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
5.定义优化器和损失函数
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
6.训练数据
for epoch in range(100): # 迭代100次
running_loss = 0.0 # 记录损失函数值
for i, data in enumerate(trainloader, 0): # 每次随机选择一批样本(batch_size=4)
# 输入图像,和类标签
inputs, labels = data
# 将优化器的梯度清零(根据pytorch中的backward()函数的计算,
# 当网络参量进行反馈时,梯度是被积累的而不是被替换掉;
# 但是在每一个batch时毫无疑问并不需要将两个batch的梯度混合起来累积。
# 因此这里就需要每个batch设置一遍zero_grad 了)
# 其实这里还可以补充的一点是,如果不是每一个batch就清除掉原有的梯度,
# 而是比如说两个batch再清除掉梯度,这是一种变相提高batch_size的方法,
# 对于计算机硬件不行,但是batch_size可能需要设高的领域比较适合,比如目标检测模型的训练
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
# optimizer.step()通常用在每个mini-batch之中,
# 只有用了optimizer.step(),模型才会更新
loss.backward() # 计算新的梯度
optimizer.step() # 更新权重
# 打印每次结果
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('训练结束')
6.测试数据
dataiter = iter(testloader)
images, labels = dataiter.next()
# 这里只预测4张图片
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
cat ship ship plane
cat ship ship plane
# 完整代码
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='/content/sample_data/data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='/content/sample_data/data', train=False,download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,shuffle=False, num_workers=2)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5) # flatten
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
new = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')







网友评论