1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
| import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets
input_size = 10 hidden_size = 20 num_layers = 1 num_classes = 5 num_epochs = 100 batch_sise = 10 learning_rate = 0.01
class RNN(nn.Module): def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes)
def forwar(self, x): h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) c0 = torch.zeros(self.num_layers, x.size( 0), self.hidden_size).to(x.device) out, _ = self.run(x, (h0,c0)) out = self.fc(out[:, -1, :]) return out
model = RNN(input_size, hidden_size, num_layers, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
train_dataset = datasets.MNIST(root='./data', train=True) test_dataset = datasets.MNIST(root='./data', train=False)
train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_sise, shuffle=True) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_sise, shuffle=True)
for epoch in range(num_epochs): for i, (inputs, labels) in enumerate(train_loader): outputs = model(inputs) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, len(train_loader), loss.item()))
print('Training finshied.')
|