import numpy as np
import pandas as pd
data = pd.read_csv('/Users/liming/Downloads/review_star.csv')
print(data.shape)
data.head()
(10000, 3)
| reviewid | reviewbody | star | |
|---|---|---|---|
| 0 | 661655779 | 感谢大众点评的vip会员卷。很多好吃的都打折。去天河城吃饭顺便把喝的换了。两杯茶才9.9。简... | 40 |
| 1 | 661662037 | (9月26日就餐)这家的服务态度真是很奇葩了:在等餐时看到服务员在端豆浆上桌前,突然在碗里发... | 5 |
| 2 | 661662167 | 除了贵 没毛病 | 35 |
| 3 | 661674219 | 亲民,家门口,味道可以,主要是在家附近,吃点家常菜还是比较方便的,不做饭就在这里吃点,做的也... | 45 |
| 4 | 661677846 | 之前在会展中心店按过,觉得很不错,说上梅林也有一家店,刚好在附近办事,所以来了这里,装修风格... | 50 |
# 定义函数:根据用户评的星级来估计sentiment(情感)
def make_label(star):
if star >=30:
return 1
else:
return 0
# 运用 apply 方法得到新列
data["sentiment"] = data.star.apply(make_label)
data.head()
| reviewid | reviewbody | star | sentiment | |
|---|---|---|---|---|
| 0 | 661655779 | 感谢大众点评的vip会员卷。很多好吃的都打折。去天河城吃饭顺便把喝的换了。两杯茶才9.9。简... | 40 | 1 |
| 1 | 661662037 | (9月26日就餐)这家的服务态度真是很奇葩了:在等餐时看到服务员在端豆浆上桌前,突然在碗里发... | 5 | 0 |
| 2 | 661662167 | 除了贵 没毛病 | 35 | 1 |
| 3 | 661674219 | 亲民,家门口,味道可以,主要是在家附近,吃点家常菜还是比较方便的,不做饭就在这里吃点,做的也... | 45 | 1 |
| 4 | 661677846 | 之前在会展中心店按过,觉得很不错,说上梅林也有一家店,刚好在附近办事,所以来了这里,装修风格... | 50 | 1 |
data["reviewbody"] = data["reviewbody"].astype(str)
data.head()
| reviewid | reviewbody | star | sentiment | |
|---|---|---|---|---|
| 0 | 661655779 | 感谢大众点评的vip会员卷。很多好吃的都打折。去天河城吃饭顺便把喝的换了。两杯茶才9.9。简... | 40 | 1 |
| 1 | 661662037 | (9月26日就餐)这家的服务态度真是很奇葩了:在等餐时看到服务员在端豆浆上桌前,突然在碗里发... | 5 | 0 |
| 2 | 661662167 | 除了贵 没毛病 | 35 | 1 |
| 3 | 661674219 | 亲民,家门口,味道可以,主要是在家附近,吃点家常菜还是比较方便的,不做饭就在这里吃点,做的也... | 45 | 1 |
| 4 | 661677846 | 之前在会展中心店按过,觉得很不错,说上梅林也有一家店,刚好在附近办事,所以来了这里,装修风格... | 50 | 1 |
# 首先对评论数据进行分词
import jieba
def chinese_word_cut(mytext):
return " ".join(jieba.cut(mytext))
data['cut_comment'] = data["reviewbody"].apply(chinese_word_cut)
data.head()
Building prefix dict from the default dictionary ...
Loading model from cache /var/folders/zd/qhg48cw17_ncqf0rl48wz5rh0000gp/T/jieba.cache
Loading model cost 0.566 seconds.
Prefix dict has been built successfully.
| reviewid | reviewbody | star | sentiment | cut_comment | |
|---|---|---|---|---|---|
| 0 | 661655779 | 感谢大众点评的vip会员卷。很多好吃的都打折。去天河城吃饭顺便把喝的换了。两杯茶才9.9。简... | 40 | 1 | 感谢 大众 点评 的 vip 会员 卷 。 很多 好吃 的 都 打折 。 去 天河城 吃饭 ... |
| 1 | 661662037 | (9月26日就餐)这家的服务态度真是很奇葩了:在等餐时看到服务员在端豆浆上桌前,突然在碗里发... | 5 | 0 | ( 9 月 26 日 就餐 ) 这家 的 服务态度 真是 很 奇葩 了 : 在 等 餐时 看... |
| 2 | 661662167 | 除了贵 没毛病 | 35 | 1 | 除了 贵 没 毛病 |
| 3 | 661674219 | 亲民,家门口,味道可以,主要是在家附近,吃点家常菜还是比较方便的,不做饭就在这里吃点,做的也... | 45 | 1 | 亲民 , 家门口 , 味道 可以 , 主要 是 在家 附近 , 吃点 家常菜 还是 比较 方... |
| 4 | 661677846 | 之前在会展中心店按过,觉得很不错,说上梅林也有一家店,刚好在附近办事,所以来了这里,装修风格... | 50 | 1 | 之前 在 会展中心 店 按 过 , 觉得 很 不错 , 说 上 梅林 也 有 一家 店 , ... |
maxlen = 100 #截断词数
min_count = 5 #出现次数少于该值的词扔掉。这是最简单的降维方法
content = []
for i in data['cut_comment']:
content.extend(i)
abc = pd.Series(content).value_counts()
abc = abc[abc >= min_count]
abc[:] = range(1, len(abc)+1)
abc[''] = 0 #添加空字符串用来补全
word_set = set(abc.index)
def doc2num(s, maxlen):
s = [i for i in s if i in word_set]
s = s[:maxlen] + ['']*max(0, maxlen-len(s))
return list(abc[s])
data['doc2num'] = data['cut_comment'].apply(lambda s: doc2num(s, maxlen))
data.head()
| reviewid | reviewbody | star | sentiment | cut_comment | doc2num | |
|---|---|---|---|---|---|---|
| 0 | 661655779 | 感谢大众点评的vip会员卷。很多好吃的都打折。去天河城吃饭顺便把喝的换了。两杯茶才9.9。简... | 40 | 1 | 感谢 大众 点评 的 vip 会员 卷 。 很多 好吃 的 都 打折 。 去 天河城 吃饭 ... | [53, 401, 1, 32, 435, 1, 19, 202, 1, 3, 1, 887... |
| 1 | 661662037 | (9月26日就餐)这家的服务态度真是很奇葩了:在等餐时看到服务员在端豆浆上桌前,突然在碗里发... | 5 | 0 | ( 9 月 26 日 就餐 ) 这家 的 服务态度 真是 很 奇葩 了 : 在 等 餐时 看... | [470, 1, 402, 1, 495, 1, 199, 326, 1, 254, 1, ... |
| 2 | 661662167 | 除了贵 没毛病 | 35 | 1 | 除了 贵 没 毛病 | [675, 9, 1, 345, 1, 1, 1, 42, 1, 572, 1402, 0,... |
| 3 | 661674219 | 亲民,家门口,味道可以,主要是在家附近,吃点家常菜还是比较方便的,不做饭就在这里吃点,做的也... | 45 | 1 | 亲民 , 家门口 , 味道 可以 , 主要 是 在家 附近 , 吃点 家常菜 还是 比较 方... | [537, 779, 1, 2, 1, 27, 174, 54, 1, 2, 1, 22, ... |
| 4 | 661677846 | 之前在会展中心店按过,觉得很不错,说上梅林也有一家店,刚好在附近办事,所以来了这里,装修风格... | 50 | 1 | 之前 在 会展中心 店 按 过 , 觉得 很 不错 , 说 上 梅林 也 有 一家 店 , ... | [142, 113, 1, 28, 1, 56, 912, 77, 70, 1, 29, 1... |
#选择每个句子长为200
seq_len = 200
from tensorflow.keras import preprocessing
features = np.zeros((len(data['doc2num']),seq_len),dtype=int)
#将reviews_ints值逐行 赋值给features
features = preprocessing.sequence.pad_sequences(data['doc2num'],200)
features.shape
(10000, 200)
split_frac = 0.8
## split data into training, validation, and test data (features and labels, x and y)
split_idx = int(len(features)*split_frac)
train_x, remaining_x = features[:split_idx], features[split_idx:]
train_y, remaining_y = data['sentiment'][:split_idx], data['sentiment'][split_idx:]
test_idx = int(len(remaining_x)*0.5)
val_x, test_x = remaining_x[:test_idx], remaining_x[test_idx:]
val_y, test_y = remaining_y[:test_idx], remaining_y[test_idx:]
## print out the shapes of your resultant feature data
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
Feature Shapes:
Train set: (8000, 200)
Validation set: (1000, 200)
Test set: (1000, 200)
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(np.asarray(train_x)), torch.from_numpy(np.asarray(train_y)))
valid_data = TensorDataset(torch.from_numpy(np.asarray(val_x)), torch.from_numpy(np.asarray(val_y)))
test_data = TensorDataset(torch.from_numpy(np.asarray(test_x)), torch.from_numpy(np.asarray(test_y)))
# dataloaders
batch_size = 50
# make sure the SHUFFLE your training data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
# obtain one batch of training data
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
print('Sample input size: ', sample_x.size()) # batch_size, seq_length
print('Sample input: \n', sample_x)
print()
print('Sample label size: ', sample_y.size()) # batch_size
print('Sample label: \n', sample_y)
Sample input size: torch.Size([50, 200])
Sample input:
tensor([[ 0, 0, 0, ..., 0, 0, 0],
[ 0, 0, 0, ..., 2190, 1, 3],
[ 0, 0, 0, ..., 0, 0, 0],
...,
[ 0, 0, 0, ..., 515, 515, 1],
[ 0, 0, 0, ..., 0, 0, 0],
[ 0, 0, 0, ..., 0, 0, 0]], dtype=torch.int32)
Sample label size: torch.Size([50])
Sample label:
tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,
1, 1])
import torch.nn as nn
class SentimentRNN(nn.Module): # 构建 情感分析神经网络(Sentiment)类
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, bidirectional=True, drop_prob=0.3):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
# 初始化层类型
self.output_size = output_size # 输出层大小
self.n_layers = n_layers #
self.hidden_dim = hidden_dim # 隐藏层维度
self.bidirectional = bidirectional
# embedding and LSTM layers(嵌入层和 LSTM 层)
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True,
bidirectional=bidirectional)
# dropout layer
self.dropout = nn.Dropout(0.3) # 除最后一层,每一层的输出都进行 dropout
# linear and sigmoid layers(线性层和 sigmoid 层)
if bidirectional:
self.fc = nn.Linear(hidden_dim*2, output_size)
else:
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden): # 定义前向传播
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
x = x.long()
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# if bidirectional:
# lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim*2)
# else:
# lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully-connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size): # 初始化隐藏层
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
number = 1
if self.bidirectional:
number = 2
hidden = (weight.new(self.n_layers*number, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers*number, batch_size, self.hidden_dim).zero_()
)
return hidden
# Instantiate the model w/ hyperparams
vocab_size = len(abc)+1 # +1 for the 0 padding + our word tokens 输入层特征维度为词向量长度
output_size = 1 # 输出层特征维度为1
embedding_dim = 400 # 嵌入层特征维度为400
hidden_dim = 256 # 隐藏层特征维度为256
n_layers = 2 # 隐藏层层数为2
bidirectional = False #这里为True,为双向LSTM
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, bidirectional)
print(net) # 输入层特征3008,隐藏层一层400*256,一层256*1,函数为 sigmoid
SentimentRNN(
(embedding): Embedding(3008, 400)
(lstm): LSTM(400, 256, num_layers=2, batch_first=True, dropout=0.3)
(dropout): Dropout(p=0.3, inplace=False)
(fc): Linear(in_features=256, out_features=1, bias=True)
(sig): Sigmoid()
)
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss() # 损失函数
optimizer = torch.optim.Adam(net.parameters(), lr=lr) # 优化函数
# training params
epochs = 3 # 3-4 is approx where I noticed the validation loss stop decreasing
print_every = 100
clip=5 # gradient clipping
net.train() # 必备,将模型设置为训练模式
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
counter = 0
# batch loop
for inputs, labels in train_loader: # 从数据加载器迭代一个 batch 的数据
counter += 1
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h) # 喂入数据并前向传播获取输出
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float()) # 调用损失函数计算损失
loss.backward() # 反向传播
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step() # 更新参数
# loss stats
if counter % print_every == 0: # 根据设置的显式间隔输出训练日志
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for inputs, labels in valid_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
output, val_h = net(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
Epoch: 1/3... Step: 100... Loss: 0.309615... Val Loss: 0.231731
Epoch: 2/3... Step: 100... Loss: 0.296311... Val Loss: 0.232263
Epoch: 3/3... Step: 100... Loss: 0.058527... Val Loss: 0.221560
# Get test data loss and accuracy
test_losses = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size)
net.eval() # 必备,将模型设置为评估模式
# iterate over test data
for inputs, labels in test_loader: # 从数据加载器迭代一个 batch 的数据
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# get predicted outputs
output, h = net(inputs, h)
# calculate loss
test_loss = criterion(output.squeeze(), labels.float())
test_losses.append(test_loss.item())
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy())
num_correct += np.sum(correct) # 统计预测正确个数
# -- stats! -- ##
# avg test loss
print("Test loss: {:.3f}".format(np.mean(test_losses)))
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
Test loss: 0.205
Test accuracy: 0.945