本文参考

import os
import json
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import torchvision
from torchvision import models
from torch.utils.data import Dataset
from torchvision import transforms
from torch.utils.data import DataLoader
import visdom
# from tensorboardX import SummaryWriter
from torch.utils.tensorboard import SummaryWriter

实验介绍

本次实验使用 CIFAR10 数据集,它包含十个类别:‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’。CIFAR-10 中的图像尺寸为 3 × 32 × 32 3\times32\times323×32×32

加载 CIFAR10 数据集

torchvision.datasets.CIFAR10(root, train=True, transform=None, 
							target_transform=None, download=False)
  • root (string) – Root directory of dataset where directory cifar-10-batches-py exists or will be saved to if download is set to True.
  • train (bool, optional) – If True, creates dataset from training set, otherwise creates from test set.
  • transform (callable, optional) – A function/transform that takes in an PIL image and returns a transformed version. E.g, transforms.RandomCrop
  • target_transform (callable, optional) – A function/transform that takes in the target and transforms it.
  • download (bool, optional) – If true, downloads the dataset from the internet and puts it in rootdirectory. If dataset is already downloaded, it is not downloaded again.
  • __getitem__(index) Returns (image, target) where target is index of the target class.
transform = transforms.Compose([
                    # transforms.Resize(32), # 将图像最短边缩至240,宽高比例不变
                    transforms.RandomHorizontalFlip(), # 以0.5的概率左右翻转图像
                    transforms.ToTensor(), # 将PIL图像转为Tensor,并且进行归一化
                    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # 进行mean与std为0.5的标准化
                ])

trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                          shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                         shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

可视化图片

# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()

# show images
vis = visdom.Visdom(env='test')
images = images / 2 + 0.5     # unnormalize
vis.images(images, nrow=images.shape[0], opts=dict(title='CIFAR10 images'))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

输出:
在这里插入图片描述

car plane horse  frog

搭建网络

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        layers = []
        self.features = nn.Sequential(
            nn.Conv2d(3, 6, 5),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(6, 16, 5),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
        )
        self.classifier = nn.Sequential(
            nn.Linear(16 * 5 * 5, 120),
            nn.ReLU(inplace=True),
            nn.Linear(120, 84),
            nn.ReLU(inplace=True),
            nn.Linear(84, 10),
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.shape[0], -1)
        x = self.classifier(x)
        
        return x
    
net = Net()
net = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))

可视化网络结构

dummy_input = torch.rand(13, 3, 32, 32)
with SummaryWriter('runs/exp-1') as w:
    w.add_graph(net, (dummy_input,))

在这里插入图片描述

训练网络

for epoch in range(2):  # loop over the dataset multiple times
    running_loss = 0.0
    epoch_loss = 0.0
    for i, data in enumerate(trainloader):
        # get the inputs
        inputs, labels = data

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.item()
        epoch_loss += loss.item()
        if i % 2000 == 1999:    # print every 2000 mini-batches
            print('[%d, %5d] loss: %.3f' %
                  (epoch, i + 1, running_loss / 2000))
            
            with SummaryWriter('runs/exp-1') as w:
                w.add_scalar('TrainLoss/epoch' + str(epoch), running_loss / 2000, i // 2000)             
            running_loss = 0.0
            
    with SummaryWriter('runs/exp-1') as w:
        w.add_scalar('TrainLoss/all', epoch_loss / len(trainloader), epoch)
        epoch_loss = 0.0
    
print('Finished Training')
[0,  2000] loss: 1.872
[0,  4000] loss: 1.593
[0,  6000] loss: 1.499
[0,  8000] loss: 1.434
[0, 10000] loss: 1.391
[0, 12000] loss: 1.354
[1,  2000] loss: 1.302
[1,  4000] loss: 1.298
[1,  6000] loss: 1.295
[1,  8000] loss: 1.244
[1, 10000] loss: 1.256
[1, 12000] loss: 1.240
Finished Training

在这里插入图片描述

测试网络

correct = 0
total = 0

model.eval()

with torch.no_grad():
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print('Accuracy of the network on the 10000 test images: %d %%' % (
    100 * correct / total))
Accuracy of the network on the 10000 test images: 55 %