5.1 项目实战-猫狗分类(引入vgg16模型,同时训练卷积层和全连接层)
# 同时训练卷积层和全连接层
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
# 数据预处理,增强数据
transform = transforms.Compose([
transforms.RandomResizedCrop(224),# 对图像进行随机的crop以后再resize成固定大小
transforms.RandomRotation(20), # 随机旋转角度
transforms.RandomHorizontalFlip(p=0.5), # 随机水平翻转
transforms.ToTensor()
])
# 读取数据
root = '项目实战/猫狗识别/image'
train_dataset = datasets.ImageFolder(root + '/train', transform)
test_dataset = datasets.ImageFolder(root + '/test', transform)
# 导入数据
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=8, shuffle=True)
classes = train_dataset.classes
classes_index = train_dataset.class_to_idx
print(classes)
print(classes_index)
## ['cat', 'dog']
## {'cat': 0, 'dog': 1}
# 我们使用一个官方提供的模型
model = models.vgg16(pretrained = True)
# 我们可以看一下这个模型的细节,前面有很多的卷积池化,后面是一个全连接层,它本来就是用来识别东西的。
print(model)
## VGG(
## (features): Sequential(
## (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (1): ReLU(inplace=True)
## (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (3): ReLU(inplace=True)
## (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
## (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (6): ReLU(inplace=True)
## (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (8): ReLU(inplace=True)
## (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
## (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (11): ReLU(inplace=True)
## (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (13): ReLU(inplace=True)
## (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (15): ReLU(inplace=True)
## (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
## (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (18): ReLU(inplace=True)
## (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (20): ReLU(inplace=True)
## (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (22): ReLU(inplace=True)
## (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
## (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (25): ReLU(inplace=True)
## (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (27): ReLU(inplace=True)
## (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
## (29): ReLU(inplace=True)
## (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
## )
## (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
## (classifier): Sequential(
## (0): Linear(in_features=25088, out_features=4096, bias=True)
## (1): ReLU(inplace=True)
## (2): Dropout(p=0.5, inplace=False)
## (3): Linear(in_features=4096, out_features=4096, bias=True)
## (4): ReLU(inplace=True)
## (5): Dropout(p=0.5, inplace=False)
## (6): Linear(in_features=4096, out_features=1000, bias=True)
## )
##)
# 如果我们想只训练模型的全连接层,可以用别人训练好的,先设置全体参数不参与更新,下面再设置全连接层更新。也可以不设置,都参与训练,时间会变长,但是效果会相对好一些。
# for param in model.parameters():
# param.requires_grad = False
# 前面的卷积层我们可以直接用进行很好的特征提取,但是它的全连接层是10个分类,我们是2个要自己训练
# 构建新的全连接层
model.classifier = torch.nn.Sequential(torch.nn.Linear(25088, 100),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(100, 2))
LR = 0.0001
# 定义代价函数
entropy_loss = nn.CrossEntropyLoss()
# 定义优化器
optimizer = optim.SGD(model.parameters(), LR, momentum=0.9)
def train():
model.train()
for i, data in enumerate(train_loader):
# 获得数据和对应的标签
inputs, labels = data
# 获得模型预测结果,(64,10)
out = model(inputs)
# 交叉熵代价函数out(batch,C),labels(batch)
loss = entropy_loss(out, labels)
# 梯度清0
optimizer.zero_grad()
# 计算梯度
loss.backward()
# 修改权值
optimizer.step()
def test():
model.eval()
correct = 0
for i, data in enumerate(test_loader):
# 获得数据和对应的标签
inputs, labels = data
# 获得模型预测结果
out = model(inputs)
# 获得最大值,以及最大值所在的位置
_, predicted = torch.max(out, 1)
# 预测正确的数量
correct += (predicted == labels).sum()
print("Test acc: {0}".format(correct.item() / len(test_dataset)))
correct = 0
for i, data in enumerate(train_loader):
# 获得数据和对应的标签
inputs, labels = data
# 获得模型预测结果
out = model(inputs)
# 获得最大值,以及最大值所在的位置
_, predicted = torch.max(out, 1)
# 预测正确的数量
correct += (predicted == labels).sum()
print("Train acc: {0}".format(correct.item() / len(train_dataset)))
# 时间太久,我们暂且只训练2个周期
for epoch in range(0, 2):
print('epoch:',epoch)
train()
test()
# epoch: 0
## Test acc: 0.715
## Train acc: 0.7525
## epoch: 1
## Test acc: 0.82
## Train acc: 0.79
# 训练完成后保存模型供测试时使用
torch.save(model.state_dict(), 'model/猫狗分类全训练')
Comments NOTHING