1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
| import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms
input_size = 784 hidden_size = 500 num_classes = 10 num_epochs = 5 batch_sise = 100 learning_rate = 0.001
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ])
train_dataset = datasets.MNIST(root='./data', train=True, transform=transform) test_dataset = datasets.MNIST(root='./data', train=False, transform=transform)
tarin_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_sise, shuffle=True) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_sise, shuffle=True)
class CNN(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(CNN, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forwar(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) return out
model = CNN(input_size, hidden_size, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs): for i, (images, labels) in enumerate(tarin_loader): outputs = model(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 100 == 0: print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(tarin_loader)}], Loss: {loss.item():.4f}')
model.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy of the network on the 10000 test images: {100 * correct / total}%')
print('Training finshied.')
|