我在实施LSTM时遇到问题。我不确定我是否有正确的实现方式,或者这仅仅是一个过拟合的问题。我正在使用LSTM进行论文评分,以0-10(或其他分数范围)的分数对文本进行评分。我正在将ASAP kaggle competition data用作训练数据之一。
但是,主要目标是在大约500个样本的私有数据集上实现良好的性能。 500个样本包括验证和训练集。我之前已经做过一些实验,并使模型能够正常工作,但是在摆弄一些东西之后,该模型不再适合。该模型根本没有改善。我还用更加面向对象的代码以一种更简洁的方式重新实现了该代码,但仍然无法重现我以前的结果。
但是,我正在使模型适合我的数据,只是存在巨大的过度拟合。我不确定这是某种实现问题还是只是过拟合,但我无法使模型正常工作。在ASAP数据论文集1上使用LSTM,我可以得到的最大值是0.35 kappa。出于某些奇怪的原因,我可以将单层完全连接的模型设置为0.75 kappa。我认为这是一个实现问题,但我不确定。
这是我的旧代码:
train.py
import gensim
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import cohen_kappa_score
from torch import nn
import torch.utils.data as data_utils
from torch.optim import Adam
from dataset import AESDataset
from network import Network
from optimizer import Ranger
from qwk import quadratic_weighted_kappa,kappa
batch_size = 32
device = "cuda:0"
torch.manual_seed(1000)
# Load data from csv
file_name = "data/data_new.csv"
data = pd.read_csv(file_name)
arr = data.to_numpy()
text = arr[:,:2]
text = [str(line[0]) + str(line[1]) for line in text]
text = [gensim.utils.simple_preprocess(line) for line in text]
score = arr[:,2]
score = [sco*6 for sco in score]
score = np.asarray(score,dtype=int)
train_dataset = AESDataset(text_arr=text[:400],scores=score[:400])
test_dataset = AESDataset(text_arr=text[400:],scores=score[400:])
score = torch.tensor(score).view(-1,1).long().to(device)
train_loader = data_utils.DataLoader(train_dataset,shuffle=True,batch_size=batch_size,drop_last=True)
test_loader = data_utils.DataLoader(test_dataset,drop_last=True)
out_class = 61
epochs = 1000
model = Network(out_class).to(device)
model.load_state_dict(torch.load("model/best_model"))
y_onehot = torch.FloatTensor(batch_size,out_class).to(device)
optimizer = Adam(model.parameters())
criti = torch.nn.CrossEntropyLoss()
# model,optimizer = amp.initialize(model,optimizer,opt_level="O2")
step = 0
for i in range(epochs):
#Testing
if i % 1 == 0:
total_loss = 0
total_kappa = 0
total_batches = 0
model.eval()
for (text,score) in test_loader:
out = model(text)
out_score = torch.argmax(out,1)
y_onehot.zero_()
y_onehot.scatter_(1,score,1)
kappa_l = cohen_kappa_score(score.view(batch_size).tolist(),out_score.view(batch_size).tolist())
score = score.view(-1)
loss = criti(out,score.view(-1))
total_loss += loss
total_kappa += kappa_l
total_batches += 1
print(f"Epoch {i} Testing kappa {total_kappa/total_batches} loss {total_loss/total_batches}")
with open(f"model/epoch_{i}","wb") as f:
torch.save(model.state_dict(),f)
model.train()
#Training
for (text,score) in train_loader:
optimizer.zero_grad()
step += 1
out = model(text)
out_score = torch.argmax(out,1)
y_onehot.zero_()
y_onehot.scatter_(1,1)
kappa_l = cohen_kappa_score(score.view(batch_size).tolist(),out_score.view(batch_size).tolist())
loss = criti(out,score.view(-1))
print(f"Epoch {i} step {step} kappa {kappa_l} loss {loss}")
loss.backward()
optimizer.step()
dataset.py
import gensim
import torch
import numpy as np
class AESDataset(torch.utils.data.Dataset):
def __init__(self,text_arr,scores):
self.data = text_arr
self.scores = scores
self.w2v_model = ("w2vec_model_all")
self.max_len = 500
def __getitem__(self,item):
vector = []
essay = self.data[item]
pad_vec = [1 for i in range(300)]
for i in range(self.max_len - len(essay)):
vector.append(pad_vec)
for word in essay:
word_vec = pad_vec
try:
word_vec = self.w2v_model[word]
except:
#print(f"Skipping word as word {word} not in dictionary")
word_vec = pad_vec
vector.append(word_vec)
#print(len(vector))
vector = np.stack(vector)
tensor = torch.tensor(vector[:self.max_len]).float().to("cuda")
score = self.scores[item]
score = torch.tensor(score).long().to("cuda").view(1)
return tensor,score
def __len__(self):
return len(self.scores)
network.py
import torch.nn as nn
import torch
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self,output_size):
super(Network,self).__init__()
self.lstm = nn.LSTM(300,500,1,batch_first=True)
self.dropout = nn.Dropout(p=0.5)
#self.l2 = nn.L2
self.linear = nn.Linear(500,output_size)
def forward(self,x):
x,_ = self.lstm(x)
x = x[:,-1,:]
x = self.dropout(x)
x = self.linear(x)
return x