AlexNet实践练习

代码部分

文件预处理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import os
from shutil import copy, rmtree
import random


def mk_file(file_path: str):
if os.path.exists(file_path):
# 如果文件夹存在,则先删除原文件夹在重新创建
rmtree(file_path)
os.makedirs(file_path)


def main():
# 保证随机可复现
random.seed(0)

# 将数据集中10%的数据划分到验证集中
split_rate = 0.1

# 指向你解压后的flower_photos文件夹
cwd = os.getcwd()
data_root = os.path.join(cwd, "flower_data")
origin_flower_path = os.path.join(data_root, "flower_photos")
assert os.path.exists(origin_flower_path), "path '{}' does not exist.".format(origin_flower_path)

flower_class = [cla for cla in os.listdir(origin_flower_path)
if os.path.isdir(os.path.join(origin_flower_path, cla))]

# 建立保存训练集的文件夹
train_root = os.path.join(data_root, "train")
mk_file(train_root)
for cla in flower_class:
# 建立每个类别对应的文件夹
mk_file(os.path.join(train_root, cla))

# 建立保存验证集的文件夹
val_root = os.path.join(data_root, "val")
mk_file(val_root)
for cla in flower_class:
# 建立每个类别对应的文件夹
mk_file(os.path.join(val_root, cla))

for cla in flower_class:
cla_path = os.path.join(origin_flower_path, cla)
images = os.listdir(cla_path)
num = len(images)
# 随机采样验证集的索引
eval_index = random.sample(images, k=int(num*split_rate))
for index, image in enumerate(images):
if image in eval_index:
# 将分配至验证集中的文件复制到相应目录
image_path = os.path.join(cla_path, image)
new_path = os.path.join(val_root, cla)
copy(image_path, new_path)
else:
# 将分配至训练集中的文件复制到相应目录
image_path = os.path.join(cla_path, image)
new_path = os.path.join(train_root, cla)
copy(image_path, new_path)
print("\r[{}] processing [{}/{}]".format(cla, index+1, num), end="") # processing bar
print()

print("processing done!")


if __name__ == '__main__':
main()

model.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#导入包
import torch.nn as nn
import torch
#创建网络
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, init_weights=False):
super(AlexNet, self).__init__()
# 用nn.Sequential()将网络打包成一个模块,精简代码
# '''
# 为了加快训练,代码只使用了一半的网络参数,相当于只用了原论文中网络结构的一半部分(正好原论文中用的双GPU,我的电脑只有一块GPU)
# 后来我又用完整网络跑了遍,发现一半参数跟完整参数的训练结果acc相差无几)
# '''
self.features = nn.Sequential( # 卷积层提取图像特征
nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2), # input[3, 224, 224] output[48, 55, 55]
# '''
# 此处paddding=2 是将四周补上2列0;
# padding=(a,b)a代表上下补0行数,b代表左右补0列数;
# 若要实现论文中padding效果 可以使用nn.ZeroPad(a,b,c,d)上下左右
# '''
nn.ReLU(inplace=True), # 直接修改覆盖原值,节省运算内存
nn.MaxPool2d(kernel_size=3, stride=2), # output[48, 27, 27]
nn.Conv2d(48, 128, kernel_size=5, padding=2), # output[128, 27, 27]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 13, 13]
nn.Conv2d(128, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 128, kernel_size=3, padding=1), # output[128, 13, 13]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 6, 6]
)
self.classifier = nn.Sequential( # 全连接层对图像分类
nn.Dropout(p=0.5), # Dropout 随机失活神经元,默认比例为0.5
nn.Linear(128 * 6 * 6, 2048),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes),#num_classes 最后类别个数
)
#卷积池化层提取图像特征,全连接层进行图像分类,代码中写成两个模块,方便调用
if init_weights:#初始化权重
self._initialize_weights()

# 前向传播过程
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, start_dim=1) # 展平后再传入全连接层
x = self.classifier(x)
return x#返回预测输出

# 网络权重初始化,实际上 pytorch 在构建网络时会自动初始化权重
def _initialize_weights(self):
for m in self.modules():#继承自父类nn.modules 遍历所有结构
if isinstance(m, nn.Conv2d): # 若是卷积层
nn.init.kaiming_normal_(m.weight, mode='fan_out', # 用(何)kaiming_normal_法初始化权重w
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0) # 初始化偏重为0
elif isinstance(m, nn.Linear): # 若是全连接层
nn.init.normal_(m.weight, 0, 0.01) # 正态分布初始化,方差=0.01
nn.init.constant_(m.bias, 0) # 初始化偏重为0

train.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
#导入包
import os
import sys
import json

import torch
import torch.nn as nn
from torchvision import transforms, datasets, utils
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
from tqdm import tqdm

from model import AlexNet


def main():
# 运算资源调度
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")#调用 第一块 gpu
print("using {} device.".format(device))
#数据处理
data_transform = {
#训练集处理
"train": transforms.Compose([transforms.RandomResizedCrop(224),# 随机裁剪,再缩放成 224×224
transforms.RandomHorizontalFlip(),# 水平方向随机翻转,概率为 0.5, 即一半的概率翻转, 一半的概率不翻转
transforms.ToTensor(),#标准化格式转换 ToTensor将文件高*宽*深度转换诶 深度*高*宽
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),#标准化
#验证集处理
"val": transforms.Compose([transforms.Resize((224, 224)), # cannot 224, must (224, 224)
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

data_root = os.path.abspath(os.path.join(os.getcwd(), "../..")) # 获取数据集根目录;"../.."返回上上层目录
image_path = os.path.join(data_root, "data_set", "flower_data") # flower data set path
assert os.path.exists(image_path), "{} path does not exist.".format(image_path)#报错

#加载训练集路径以及处理
train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),#获取所需训练集路径
transform=data_transform["train"])#预处理模式选择
train_num = len(train_dataset)#获取数量

# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx#对应索引结构
cla_dict = dict((val, key) for key, val in flower_list.items())#遍历并反转键值对
# 此时为{0:'daisy', 1:'dandelion', 2:'roses', 3:'sunflower', 4:'tulips'}

# 存储索引字典为json格式
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
# 定义数据集大小
batch_size = 32
# 选定加载所使用线程个数
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
print('Using {} dataloader workers every process'.format(nw))
# 加载训练集
train_loader = torch.utils.data.DataLoader(train_dataset,#数据集选择
batch_size=batch_size,#数据集大小
shuffle=True,#随机打乱
num_workers=nw)
#加载验证集路径以及处理
validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),#获取所需验证集路径
transform=data_transform["val"])#验证集处理方式
val_num = len(validate_dataset)#获取数量
#加载验证集
validate_loader = torch.utils.data.DataLoader(validate_dataset, # 导入的验证集
batch_size=batch_size,
shuffle=True,
num_workers=nw)

print("using {} images for training, {} images for validation.".format(train_num,
val_num))
# #输出测试
# test_data_iter=iter(validate_loader)
# test_image,test_label=test_data_iter.__next__()#.next在部分版本不可以解决方案__next__
#
# def imshow(img):
# img = img / 2 + 0.5 # unnormalize
# npimg = img.numpy()
# plt.imshow(np.transpose(npimg, (1, 2, 0)))
# plt.show()
#
# print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
# imshow(utils.make_grid(test_image))

net = AlexNet(num_classes=5, init_weights=True)# 实例化网络(输出类型为5,初始化权重)

net.to(device)# 分配网络到指定的设备(GPU/CPU)训练
loss_function = nn.CrossEntropyLoss()#定义损失函数交叉熵损失
# pata = list(net.parameters())#查看模型参数
optimizer = optim.Adam(net.parameters(), lr=0.0002)#优化参数;学习率0.002

epochs = 10#迭代次数
save_path = './AlexNet.pth'#保存权重
best_acc = 0.0#最佳准确率
train_steps = len(train_loader)
for epoch in range(epochs):
# 开始训练
net.train()#让训练时使用dropout方法而预测时不使用
running_loss = 0.0#统计平均损失
train_bar = tqdm(train_loader, file=sys.stdout)#进度条模块
for step, data in enumerate(train_bar):
images, labels = data
optimizer.zero_grad()#清空梯度
outputs = net(images.to(device))# 正向传播
loss = loss_function(outputs, labels.to(device))#计算损失
loss.backward()#反向传播
optimizer.step()# 优化器更新参数

# 打印训练进度(使训练过程可视化)
running_loss += loss.item()

train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
epochs,
loss)

# 开始验证
net.eval()# 验证过程中关闭 Dropout
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
val_bar = tqdm(validate_loader, file=sys.stdout)
for val_data in val_bar:
val_images, val_labels = val_data
outputs = net(val_images.to(device))
predict_y = torch.max(outputs, dim=1)[1]# 以output中值最大位置对应的索引(标签)作为预测输出
acc += torch.eq(predict_y, val_labels.to(device)).sum().item()#预测正确个数统计

val_accurate = acc / val_num#计算准确率
print('[epoch %d] train_loss: %.3f val_accuracy: %.3f' %
(epoch + 1, running_loss / train_steps, val_accurate))
# 保存准确率最高的那次网络参数
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)

print('Finished Training')


if __name__ == '__main__':
main()

predict.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
from model import AlexNet
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
import json

# 预处理
data_transform = transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

# load image
img = Image.open("totest.jpg")
plt.imshow(img)#展示测试图片
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)

# read class_indict
try:
json_file = open('./class_indices.json', 'r')
class_indict = json.load(json_file)
except Exception as e:
print(e)
exit(-1)

# create model
model = AlexNet(num_classes=5)
# load model weights
model_weight_path = "./AlexNet.pth"#加载训练好的模型权重
model.load_state_dict(torch.load(model_weight_path))

model.eval()# 关闭 Dropout
with torch.no_grad():
# predict class
output = torch.squeeze(model(img)) # 将输出压缩,即压缩掉 batch 这个维度
predict = torch.softmax(output, dim=0)
predict_cla = torch.argmax(predict).numpy()
print(class_indict[str(predict_cla)], predict[predict_cla].item())
plt.show()

遇到问题以及解决方法

数据集下载以及加载

  1. 数据集下载

  2. 数据集分割部分数据集需要自行分割出训练集,验证集,测试集

  3. 加载路径

    1
    data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # 获取数据集根目录;"../.."返回上上层目录

奇怪的报错

AttributeError: ‘_MultiProcessingDataLoaderIter‘ object has no attribute ‘next‘ 问题解决

表示找不到这个方法

问题原因:

1
test_image,test_label=test_data_iter.next()

这句中**.next找不到 改为.__next__**即可