AlexNet神经网络(PyTorch)

一个基于PyTorch实现的AlexNet神经网络

AlexNet类包含了原始AlexNet架构中的卷积层部分(features)以及全部连接层部分(classifier)。在forward函数中,先通过卷积层和最大池化层提取特征,然后将特征展平并通过全连接层进行分类。在实际应用中,还需要加载预训练权重,定义损失函数、优化器、并编写训练和验证循环等代码来完成整个深度学习流程。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import torch
import torch.nn as nn

class AlexNet(nn.Module):
def __init__(self, num_classes):
super(AlexNet, self).__init__()

# 定义卷积层块
self.features = nn.Sequential(
# 第一个卷积层 + 激活函数(ReLU)
nn.Conv2d(3,64,kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),

# 第二个卷积层 + 激活函数
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),

# 第三个卷积层 + 激活函数
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),

# 第四个卷积层 + 激活函数
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),

# 第五个卷积层 + 激活函数
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)

# 全连接层
self.classifier = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),

nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),

# 输出层根据类别数量调整输出节点数
nn.Linear(4096, num_classes),
)

def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x =self.classifier(x)
return x

# 创建一个实例
model = AlexNet(num_classes=1000)

# 查看模型结构摘要信息
print(model)

输出结果:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
AlexNet(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
(1): ReLU(inplace=True)
(2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(4): ReLU(inplace=True)
(5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(7): ReLU(inplace=True)
(8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(9): ReLU(inplace=True)
(10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(classifier): Sequential(
(0): Linear(in_features=9216, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)