当前位置: 首页 > news >正文

第N5周:Pytorch文本分类入门

  •         🍨 本文为🔗365天深度学习训练营中的学习记录博客
  •         🍖 原作者:K同学啊

一、前期准备

1.加载数据
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms,datasets
import os,PIL,pathlib,warningswarnings.filterwarnings("ignore")
#忽略警告信息#win10系统,调用GPU运行
#device = torch.device("cuda" if torch.cuda.is_available()else "cpu")
#devicedevice = torch.device("cpu")
device
device(type='cpu')
from torchtext.datasets import AG_NEWStrain_iter = list(AG_NEWS(split='train')) #加载 AG_News 数据集
num_class = len(set([label for (label, text) in train_iter]))
2.构建词典
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iteratortokenizer = get_tokenizer('basic_english') # 返回分词器函数,训练营内“get tokenizer函数详解def yield_tokens(data_iter):for _, text in data_iter:yield tokenizer(text)vocab = build_vocab_from_iterator(yield_tokens(train_iter),specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"]) #设置默认索引,如果找不到单词,则会选择默认索引
vocab(['here','is','an','example'])

 [475, 21, 30, 5297]

text_pipeline= lambda x:vocab(tokenizer(x))
label_pipeline=lambda x:int(x)-1text_pipeline('here is the an example')

 [475, 21, 2, 30, 5297]

label_pipeline('10')

3.生成数据批次和迭代器
from torch.utils.data import DataLoaderdef collate_batch(batch):label_list,text_list,offsets =[],[],[0]for(_label, _text) in batch:#标签列表label_list.append(label_pipeline(_label))#文本列表processed_text = torch.tensor(text_pipeline(_text),dtype=torch.int64)text_list.append(processed_text)#偏移量,即语句的总词汇量offsets.append(processed_text.size(0))label_list = torch.tensor(label_list, dtype=torch.int64)text_list = torch.cat(text_list)offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)  #返回维度dim中输入元素的累计和return label_list.to(device),text_list.to(device),offsets.to(device)#数据加载器
dataloader =DataLoader(train_iter,batch_size=8,shuffle =False,collate_fn=collate_batch)

二、准备模型

1.定义模型
from torch import nnclass TextclassificationModel(nn.Module):def __init__(self, vocab_size, embed_dim, num_class):super(TextclassificationModel,self).__init__()self.embedding =nn.EmbeddingBag(vocab_size, #词典大小embed_dim,  #嵌入的维度sparse=False) #self.fc =nn.Linear(embed_dim,num_class)self.init_weights()def init_weights(self):initrange =0.5self.embedding.weight.data.uniform_(-initrange, initrange)self.fc.weight.data.uniform_(-initrange, initrange)self.fc.bias.data.zero_()def forward(self,text, offsets):embedded =self.embedding(text,offsets)return self.fc(embedded)
2.定义实例
num_class = len(set([label for(label,text)in train_iter]))
vocab_size = len(vocab)
em_size = 64
model = TextclassificationModel(vocab_size,em_size,num_class).to(device)
3.定义训练函数和评估函数
import timedef train(dataloader, model, optimizer, criterion, epoch):model.train()total_acc, train_loss, total_count = 0, 0, 0log_interval = 500start_time = time.time()for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)optimizer.zero_grad()loss = criterion(predicted_label, label)loss.backward()optimizer.step()total_acc += (predicted_label.argmax(1) == label).sum().item()train_loss += loss.item()total_count += label.size(0)if idx % log_interval == 0 and idx > 0:elapsed = time.time() - start_timeprint('| epoch {:1d} | {:4d}/{:4d} batches ''| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),total_acc / total_count, train_loss / total_count))total_acc, train_loss, total_count = 0, 0, 0start_time = time.time()def evaluate(dataloader, model, criterion):model.eval()  # 切换为测试模式total_acc, train_loss, total_count = 0, 0, 0with torch.no_grad():for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)loss = criterion(predicted_label, label)  # 计算loss值# 记录测试数据total_acc   += (predicted_label.argmax(1) == label).sum().item()train_loss  += loss.item()total_count += label.size(0)return total_acc/total_count, train_loss/total_count

三、训练模型

1.拆分数据集并运行模型
import timedef train(dataloader, model, optimizer, criterion, epoch):model.train()total_acc, train_loss, total_count = 0, 0, 0log_interval = 500start_time = time.time()for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)optimizer.zero_grad()loss = criterion(predicted_label, label)loss.backward()optimizer.step()total_acc += (predicted_label.argmax(1) == label).sum().item()train_loss += loss.item()total_count += label.size(0)if idx % log_interval == 0 and idx > 0:elapsed = time.time() - start_timeprint('| epoch {:1d} | {:4d}/{:4d} batches ''| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),total_acc / total_count, train_loss / total_count))total_acc, train_loss, total_count = 0, 0, 0start_time = time.time()def evaluate(dataloader, model, criterion):model.eval()  # 切换为测试模式total_acc, train_loss, total_count = 0, 0, 0with torch.no_grad():for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)loss = criterion(predicted_label, label)  # 计算loss值# 记录测试数据total_acc   += (predicted_label.argmax(1) == label).sum().item()train_loss  += loss.item()total_count += label.size(0)return total_acc/total_count, train_loss/total_count
| epoch 1 |  500/1782 batches | train_acc 0.904 train_loss 0.00450
| epoch 1 | 1000/1782 batches | train_acc 0.903 train_loss 0.00455
| epoch 1 | 1500/1782 batches | train_acc 0.904 train_loss 0.00443
---------------------------------------------------------------------
| epoch 1 | time:11.72s | valid_acc 0.901 valid_loss 0.005
---------------------------------------------------------------------
| epoch 2 |  500/1782 batches | train_acc 0.918 train_loss 0.00379
| epoch 2 | 1000/1782 batches | train_acc 0.920 train_loss 0.00377
| epoch 2 | 1500/1782 batches | train_acc 0.913 train_loss 0.00399
---------------------------------------------------------------------
| epoch 2 | time:11.52s | valid_acc 0.907 valid_loss 0.005
---------------------------------------------------------------------
| epoch 3 |  500/1782 batches | train_acc 0.930 train_loss 0.00323
| epoch 3 | 1000/1782 batches | train_acc 0.925 train_loss 0.00345
| epoch 3 | 1500/1782 batches | train_acc 0.925 train_loss 0.00350
---------------------------------------------------------------------
| epoch 3 | time:11.77s | valid_acc 0.915 valid_loss 0.004
---------------------------------------------------------------------
| epoch 4 |  500/1782 batches | train_acc 0.937 train_loss 0.00294
| epoch 4 | 1000/1782 batches | train_acc 0.931 train_loss 0.00317
| epoch 4 | 1500/1782 batches | train_acc 0.927 train_loss 0.00332
---------------------------------------------------------------------
| epoch 4 | time:11.81s | valid_acc 0.914 valid_loss 0.004
---------------------------------------------------------------------
| epoch 5 |  500/1782 batches | train_acc 0.951 train_loss 0.00243
| epoch 5 | 1000/1782 batches | train_acc 0.950 train_loss 0.00243
| epoch 5 | 1500/1782 batches | train_acc 0.949 train_loss 0.00245
---------------------------------------------------------------------
| epoch 5 | time:11.94s | valid_acc 0.917 valid_loss 0.004
---------------------------------------------------------------------
| epoch 6 |  500/1782 batches | train_acc 0.951 train_loss 0.00236
| epoch 6 | 1000/1782 batches | train_acc 0.951 train_loss 0.00241
| epoch 6 | 1500/1782 batches | train_acc 0.951 train_loss 0.00241
---------------------------------------------------------------------
| epoch 6 | time:11.69s | valid_acc 0.918 valid_loss 0.004
---------------------------------------------------------------------
| epoch 7 |  500/1782 batches | train_acc 0.952 train_loss 0.00233
| epoch 7 | 1000/1782 batches | train_acc 0.952 train_loss 0.00236
| epoch 7 | 1500/1782 batches | train_acc 0.952 train_loss 0.00235
---------------------------------------------------------------------
| epoch 7 | time:11.88s | valid_acc 0.920 valid_loss 0.004
---------------------------------------------------------------------
| epoch 8 |  500/1782 batches | train_acc 0.953 train_loss 0.00233
| epoch 8 | 1000/1782 batches | train_acc 0.954 train_loss 0.00226
| epoch 8 | 1500/1782 batches | train_acc 0.953 train_loss 0.00229
---------------------------------------------------------------------
| epoch 8 | time:11.92s | valid_acc 0.917 valid_loss 0.004
---------------------------------------------------------------------
| epoch 9 |  500/1782 batches | train_acc 0.956 train_loss 0.00223
| epoch 9 | 1000/1782 batches | train_acc 0.955 train_loss 0.00219
| epoch 9 | 1500/1782 batches | train_acc 0.955 train_loss 0.00223
---------------------------------------------------------------------
| epoch 9 | time:11.78s | valid_acc 0.919 valid_loss 0.004
---------------------------------------------------------------------
| epoch 10 |  500/1782 batches | train_acc 0.955 train_loss 0.00226
| epoch 10 | 1000/1782 batches | train_acc 0.954 train_loss 0.00223
| epoch 10 | 1500/1782 batches | train_acc 0.955 train_loss 0.00221
---------------------------------------------------------------------
| epoch 10 | time:11.82s | valid_acc 0.919 valid_loss 0.004
---------------------------------------------------------------------
2.使用测试数据集评估模型 
print('checking the results of test dataset.')
test_acc,test_loss = evaluate(test_dataloader,model, criterion)
print('test accuracy{:8.3f}'.format(test_acc))

四、学习心得

       本周额外安装了 portalocker 库,并且下载了AG_News数据集,并TextClassificationModel模型,首先对文本进行嵌入,然后对句子嵌入之后的结果进行均值聚合,从而最终实现了文本分类的任务。在训练过程出现一些问题得到有效解决。

相关文章:

  • Windows11系统上安装WM虚拟机及Ubuntu 22.04系统
  • 地址簇与数据序列
  • SpringCloud系列(38)--SpringCloud Gateway实现动态路由
  • SYSCFG 时钟在 GD32F4 系列微控制器中的作用
  • 升级 PowerShell 7
  • elastic-ai.creator开源程序是设计、训练和生成专门针对 FPGA 优化的神经网络
  • 基于 Java+MySQL实现TCP聊天工具
  • MySQL 基线核查实录:标准化配置与命令验证全解析
  • (LeetCode 面试经典 150 题 ) 238. 除自身以外数组的乘积 (前缀和)
  • 考取华为HCIE-AI有什么用?
  • 机器学习3——参数估计之极大似然估计
  • vscode 使用教程
  • 麒麟系统使用-运用VSCode运行.NET工程
  • C++day04(大容量数据、科学记数法、浮点数的格式化)
  • LangChain4j(20)——调用百度地图MCP服务
  • 车载诊断架构--- 车载诊断中的引导式诊断
  • Node.js到底是什么
  • opi是什么
  • 面向大语言模型幻觉的关键数据集:系统性综述与分类法_DEEPSEEK
  • virtual box 配置ubuntu 22.04网络与SSH服务