大模型基础--情感分析任务的演进(深度学习篇)

举报
剑指南天 发表于 2026/04/25 19:31:53 2026/04/25
【摘要】 基于神经网络构建一个文本情感分类模型

1.需求说明

本案例的目标是基于神经网络构建一个文本情感分类模型,对评论内容进行二分类判断(正面或负面)。

2.需求分析

数据来源:https://github.com/SophonPlus/ChineseNlpCorpus/blob/master/datasets/online_shopping_10_cats/online_shopping_10_cats.zip

模型结构设计:模型整体由Embedding层和三层神经网络构成

训练方案:损失函数使用 BCEWithLogitsLoss,结合了sigmoid激活和二分类交叉熵计算,数值稳定且适合二分类任务。优化器使用Adam优化器进行参数更新,提升训练效率。

评估方案:模型训练完毕后,使用测试集统计正确率

3.代码实现

3.1 原始训练数据的预处理

import jieba
import numpy as np
import pandas as pd
from gensim.models import Word2Vec
from sklearn.model_selection import train_test_split

from nlp_tutorial.ch03_tradtional_model.dl_project.src.d_config import *


def preprocess():
    print("开始数据预处理")
    # 从本地读取csv文件
    df = pd.read_csv(RAW_DATA_DIR / RAW_DATA_FILE, usecols=['label', 'review'], encoding='utf-8',
                     keep_default_na=False)
    # 将训练数据分为训练集和测试集
    train_df, test_df = train_test_split(df, test_size=0.2, stratify=df['label'])
    # 使用训练集训练word2vec model
    sentences = [[word for word in jieba.lcut(line) if word.strip()] for line in train_df['review'] if line.strip()]
    word2vec_model = Word2Vec(
        sentences=sentences,
        window=5,
        vector_size=EMBEDDING_SIZE,
        min_count=5,
        workers=8,
        sg=1
    )
    # 增加一个无效字符的标签和向量,向量元素都是0.0是为了不让其对梯度更新贡献为0
    word2vec_model.wv.add_vector(UNK_TOKEN, np.array([0.0] * EMBEDDING_SIZE))
    # 保存词汇-向量表
    word2vec_model.wv.save_word2vec_format(MODEL_DIR / WORD2VEC_MODEL)
    # 词和词在词汇表中的索引对应关系
    word2index = word2vec_model.wv.key_to_index

    # 将review中的每个词转化为对应的索引,超过长度SEQ_LEN,截取前SEQ_LEN,不超过则使用UNK_TOKEN的索引填充
    def padding_in_sentence(review):
        ids = [word2index.get(word, word2index[UNK_TOKEN]) for word in review[0:SEQ_LEN]]
        padding_len = SEQ_LEN - len(ids)
        ids.extend([word2index[UNK_TOKEN]] * padding_len)
        return ids

    train_df['review'] = train_df['review'].apply(lambda review: padding_in_sentence(review))
    test_df['review'] = test_df['review'].apply(lambda review: padding_in_sentence(review))
    # 将训练集和测试集数据本地保存
    train_df.to_json(PROCESSED_DATA_DIR / TRAIN_DATA_FILE, orient="records", lines=True)
    test_df.to_json(PROCESSED_DATA_DIR / TEST_DATA_FILE, orient="records", lines=True)
    print("数据处理结束")


if __name__ == '__main__':
    preprocess()

3.2 自定义DataLoader(DataLoader会自动分批,还可以在批次内处理数据)

import pandas as pd
import torch
from torch.utils.data import Dataset,DataLoader
from nlp_tutorial.ch03_tradtional_model.dl_project.src.d_config import *

# 自定义DataSet
class ReviewAnalysisDataset(Dataset):
    def __init__(self,path):
        # 加载训练数据
        self.data = pd.read_json(path,lines=True,orient="records").to_dict(orient='records')
    def __len__(self):
        return len(self.data)
    def __getitem__(self, index):
        input = torch.tensor(self.data[index]['review'],dtype=torch.long)
        target = torch.tensor(self.data[index]['label'],dtype=torch.float)
        return input,target
# 获取DataLoader
def get_dataloader(train=True):
    # 训练集和测试集的文件路径
    path = PROCESSED_DATA_DIR/(TRAIN_DATA_FILE if train else TEST_DATA_FILE)
    # 获取dataset
    dataset = ReviewAnalysisDataset(path)
    # 将dataset分批,并且顺序打乱
    dataloader = DataLoader(dataset,batch_size=BATCH_SIZE,shuffle=True)
    return dataloader

3.3 定义模型

class ReviewAnalysisModel(nn.Module):
    def __init__(self):
        super().__init__()
        # padding_idx的作用是对于矩阵中填充词不进行梯度计算
        # 加载word2vec模型
        self.word2vec_wv = KeyedVectors.load_word2vec_format(MODEL_DIR / WORD2VEC_MODEL)
        # 获取word和索引的映射
        self.word2id = self.word2vec_wv.key_to_index
        # 嵌入word2vec的向量
        self.embedding = nn.Embedding.from_pretrained(embeddings=torch.tensor(self.word2vec_wv.vectors), freeze=True,
                                                      padding_idx=self.word2id[UNK_TOKEN])
        self.flatten = nn.Flatten()
        self.bn = nn.BatchNorm1d(SEQ_LEN*EMBEDDING_SIZE)
        self.dropout1 = nn.Dropout(0.1)
        self.linear1 = nn.Linear(SEQ_LEN*EMBEDDING_SIZE, 1024)
        self.relu1 = nn.ReLU()
        self.linear2 = nn.Linear(1024, 256)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(0.5)
        self.linear3 = nn.Linear(256, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, X):
        embed = self.embedding(X)
        flatten = self.flatten(embed)
        bn = self.bn(flatten)
        dropout1 = self.dropout1(bn)
        linear1 = self.relu1(self.linear1(dropout1))
        linear2 = self.relu2(self.linear2(linear1))
        dropout2 = self.dropout2(linear2)
        linear3 = self.linear3(dropout2)
        return linear3.squeeze(dim=-1)

3.4 配置信息(按照配置文件创建对应文件夹)

from pathlib import Path

ROOT_DIR = Path(__file__).parent.parent
RAW_DATA_DIR = ROOT_DIR / 'data' / 'raw'
PROCESSED_DATA_DIR = ROOT_DIR / 'data' / 'processed'
MODEL_DIR = ROOT_DIR / 'models'
LOG_DIR = ROOT_DIR / 'logs'

RAW_DATA_FILE = 'online_shopping_10_cats.csv'
TRAIN_DATA_FILE = 'train.jsonl'
TEST_DATA_FILE = 'test.jsonl'
BEST_MODEL = 'best_model.pt'
WORD2VEC_MODEL = 'word2vec.wv'

UNK_TOKEN = '<UNK>'
PAD_TOKEN = '<PAD>'

SEQ_LEN = 128
BATCH_SIZE = 64
EMBEDDING_SIZE = 16
HIDDEN_SIZE = 256

LEARN_RATE = 1e-3
EPOCHS = 50

3.5 定义训练过程

import time

import torch
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from nlp_tutorial.ch03_tradtional_model.dl_project.src.b_dataset import get_dataloader
from nlp_tutorial.ch03_tradtional_model.dl_project.src.c_model import ReviewAnalysisModel
from nlp_tutorial.ch03_tradtional_model.dl_project.src.d_config import *

# 训练一个epoch
def train_one_epoch(model, train_loader, loss, optimizer, device):
model.train()
total_loss = 0.0
for input, target in tqdm(train_loader, desc='训练: '):
input, target = input.to(device), target.to(device)
output = model(input)
loss_value = loss(output, target)
loss_value.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += loss_value.item()
return total_loss / len(train_loader)

# 训练的基础配置
def train():
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 获取数据
train_loader = get_dataloader()
# 加载模型到指定的计算资源
model = ReviewAnalysisModel().to(device)
# 定义损失类型
loss = nn.BCEWithLogitsLoss()
# 定义梯度优化器
optimizer = optim.Adam(lr=LEARN_RATE, params=model.parameters())
# 友好显示训练过程
min_loss = float('inf')
with SummaryWriter(log_dir=LOG_DIR / time.strftime('%Y-%m-%d_%H-%M-%S')) as writer:
for epoch in range(EPOCHS):
print("=" * 10, f"EPOCH:{epoch + 1}", "=" * 10)
time.sleep(0.1)
this_loss = train_one_epoch(model=model, train_loader=train_loader, loss=loss, optimizer=optimizer,
device=device)
print("this loss: ", this_loss)
writer.add_scalar('loss', this_loss, epoch + 1)

if this_loss < min_loss:
min_loss = this_loss
torch.save(model.state_dict(), MODEL_DIR / BEST_MODEL)
print("模型保存成功!")


if __name__ == '__main__':
train()

3.6 模型评估

import torch
from tqdm import tqdm

from nlp_tutorial.ch03_tradtional_model.dl_project.src.b_dataset import get_dataloader
from nlp_tutorial.ch03_tradtional_model.dl_project.src.c_model import ReviewAnalysisModel
from nlp_tutorial.ch03_tradtional_model.dl_project.src.d_config import *

def predict_batch(model, input):
    model.eval()
    with torch.no_grad():
        output = model(input)
        output = torch.sigmoid(output)
    return output.tolist()

def evaluate(model, dataloader, device):
    correct_count = 0.0
    total_count = 0.0
    for inputs, targets in tqdm(dataloader, "评估模型中: "):
        inputs, targets = inputs.to(device), targets.to(device)
        batch_result = predict_batch(model, inputs)
        for target, result in zip(targets, batch_result):
            total_count += 1
            result = 1 if result > 0.5 else 0
            if result == target:
                correct_count += 1.0
    return correct_count / total_count


def run_evaluate():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # 加载模型到指定的计算资源
    model = ReviewAnalysisModel().to(device)
    # 加载模型
    model.load_state_dict(torch.load(MODEL_DIR / BEST_MODEL))
    print("模型加载成功!")
    test_dataloader = get_dataloader(train=False)
    # 评估
    acc = evaluate(model, test_dataloader, device)
    print("评估结果:")
    print("Accuracy: ", acc)


if __name__ == '__main__':
    run_evaluate()

4. 运行数据预处理代码

5. 运行训练代码

7. 运行评估代码











【版权声明】本文为华为云社区用户原创内容,未经允许不得转载,如需转载请自行联系原作者进行授权。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。