1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
| import torch from torch import nn, optim
word_to_index = {'很棒':0, '不': 1, '喜欢':2, '电影': 3, '这部': 4} index_to_word = {0: '很棒', 1: '不', 2: '喜欢', 3: '电影', 4: '这部'}
class SentimentClassifier(nn.Module): def __init__(self, vocabulary_size, embedding_dim, hidden_dim, output_dim): super(SentimentClassifier, self).__init__() self.embedding = nn.Embedding(vocabulary_size, embedding_dim) self.fc1 = nn.Linear(embedding_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, output_dim) def forward(self, text): embedded = torch.embedding(text) hidden = torch.relu(self.fc1(embedded)) output = self.fc2(hidden) return output
vocabulary_size = len(word_to_index) embedding_dim = 10 hidden_dim = 50 output_dim = 1 model = SentimentClassifier(vocabulary_size=vocabulary_size, embedding_dim=embedding_dim, hidden_dim=hidden_dim, output_dim=output_dim)
criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epoch = 10 for epoch in range(num_epoch): for review, label in zip(reviews, labels): review_indics = [word_to_index for word in review] review_tensor = torch.tensor(review_indics, dtype=torch.long).unsqueeze(0) label_tensor = torch.tensor(label, dtype=torch.float32).unsqueeze(0) output = model(review_tensor) loss = criterion(output, label_tensor) optimizer.zero_grad() loss.backward() optimizer.step() print(f'Epoch [{epoch + 1}/{num_epoch}], Loss: {loss.item()}')
def predict(review): review_indics = [word_to_index[word] for word in review] review_tensor = torch.tensor(review_indics, dtype=torch.long).unsqueeze(0) output = model(review_tensor) prediction = torch.sigmoid(output) >= 0.5 return '正面' if prediction.item() == 1 else '负面'
test_review = '这部电影很棒' print(predict(test_review))
|