1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
| import torch import torch.nn as nn import pandas as pd from sklearn.preprocessing import MinMaxScaler from torch.utils.data import Dataset, DataLoader
class StockDataset(Dataset): def __init__(self, p_data, look_back= 10): self.data = p_data self.look_back = look_back def __len__(self): return len(self.data) - self.look_back def __getitem__(self, idx): x = self.data[idx:idx+ self.look_back] y = self.data[idx + self.look_back] return torch.tensor(x, dtype=torch.float), torch.tensor(y, dtype=torch.float)
df = pd.read_csv('stock_data.csv')
data = df['close'].values.reshape(-1, 1)
scaler = MinMaxScaler() data = scaler.fit_transform(data)
train_size = int(len(data) * 0.8) train_data, test_data = data[:train_size], data[train_size:]
dataset = StockDataset(train_data) dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=True)
class LSTMModel(nn.Module): def __init__(self, input_dim=1, hidden_dim=100, output_dim=1, n_layers = 2): super(LSTMModel, self).__init__() self.hidden_dim = hidden_dim self.n_layers = n_layers self.lstm = nn.LSTM(input_dim, hidden_dim, n_layers, batch_first=output_dim) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, x): h0 = torch.zeros(self.n_layers, x.size0(0), self.hidden_dim) c0 = torch.zeros(self.n_layers, x.size0(0), self.hidden_dim) out, _ = self.lstm(x, (h0, c0)) out = self.fc(out[:, -1, :]) return out model = LSTMModel()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(100): for inputs, targets in dataloader: optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() print(f'Epoch [{epoch + 1/100}], Loss: {loss.item():.4f}')
|