当前位置: 首页 > news >正文

Seq2seq+Attention 机器翻译

本文为🔗365天深度学习训练营内部文章

原作者:K同学啊

from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as Fdevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")

1.搭建语言类 

SOS_token = 0
EOS_token = 1# 语言类,方便对语料库进行操作
class Lang:def __init__(self, name):self.name = nameself.word2index = {}self.word2count = {}self.index2word = {0: "SOS", 1: "EOS"}self.n_words = 2  # Count SOS and EOSdef addSentence(self, sentence):for word in sentence.split(' '):self.addWord(word)def addWord(self, word):if word not in self.word2index:self.word2index[word] = self.n_wordsself.word2count[word] = 1self.index2word[self.n_words] = wordself.n_words += 1else:self.word2count[word] += 1

2.文本处理函数 

def unicodeToAscii(s):return ''.join(c for c in unicodedata.normalize('NFD', s)if unicodedata.category(c) != 'Mn')# 小写化,剔除标点与非字母符号
def normalizeString(s):s = unicodeToAscii(s.lower().strip())s = re.sub(r"([.!?])", r" \1", s)s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)return s

 3.文本读取函数

def readLangs(lang1, lang2, reverse=False):print("Reading lines...")# 以行为单位读取文件lines = open('%s-%s.txt' % (lang1, lang2), encoding='utf-8'). \read().strip().split('\n')# 将每一行放入一个列表中# 一个列表中有两个元素,A语言文本与B语言文本pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]# 创建Lang实例,并确认是否反转语言顺序if reverse:pairs = [list(reversed(p)) for p in pairs]input_lang = Lang(lang2)output_lang = Lang(lang1)else:input_lang = Lang(lang1)output_lang = Lang(lang2)return input_lang, output_lang, pairsMAX_LENGTH = 10  # 定义语料最长长度eng_prefixes = ("i am ", "i m ","he is", "he s ","she is", "she s ","you are", "you re ","we are", "we re ","they are", "they re "
)def filterPair(p):return len(p[0].split(' ')) < MAX_LENGTH and \len(p[1].split(' ')) < MAX_LENGTH and p[1].startswith(eng_prefixes)def filterPairs(pairs):# 选取仅仅包含 eng_prefixes 开头的语料return [pair for pair in pairs if filterPair(pair)]def prepareData(lang1, lang2, reverse=False):# 读取文件中的数据input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)print("Read %s sentence pairs" % len(pairs))# 按条件选取语料pairs = filterPairs(pairs[:])print("Trimmed to %s sentence pairs" % len(pairs))print("Counting words...")# 将语料保存至相应的语言类for pair in pairs:input_lang.addSentence(pair[0])output_lang.addSentence(pair[1])# 打印语言类的信息print("Counted words:")print(input_lang.name, input_lang.n_words)print(output_lang.name, output_lang.n_words)return input_lang, output_lang, pairsinput_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))

 4.编码器

class EncoderRNN(nn.Module):def __init__(self, input_size, hidden_size):super(EncoderRNN, self).__init__()self.hidden_size = hidden_sizeself.embedding = nn.Embedding(input_size, hidden_size)self.gru = nn.GRU(hidden_size, hidden_size)def forward(self, input, hidden):embedded = self.embedding(input).view(1, 1, -1)output = embeddedoutput, hidden = self.gru(output, hidden)return output, hiddendef initHidden(self):return torch.zeros(1, 1, self.hidden_size, device=device)

 5.解码器 加了Attention注意力机制

class AttnDecoderRNN(nn.Module):def __init__(self, hidden_size, output_size,dropout_p=0.1,max_length=MAX_LENGTH):super(AttnDecoderRNN, self).__init__()self.hidden_size = hidden_sizeself.output_size = output_sizeself.dropout_p = dropout_pself.max_length = max_lengthself.embedding = nn.Embedding(self.output_size, self.hidden_size)self.attn = nn.Linear(self.hidden_size * 2,self.max_length)self.attn_combine = nn.Linear(self.hidden_size * 2,self.hidden_size)self.dropout = nn.Dropout(self.dropout_p)self.gru = nn.GRU(self.hidden_size, self.hidden_size)self.out = nn.Linear(self.hidden_size, self.output_size)def forward(self, input, hidden,encoder_outputs):embedded = self.embedding(input).view(1, 1, -1)embedded = self.dropout(embedded)attn_weights = F.softmax(self.attn(torch.cat((embedded[0],hidden[0]),1)),dim=1)attn_applied = torch.bmm(attn_weights.unsqueeze(0),encoder_outputs.unsqueeze(0))output = torch.cat((embedded[0],attn_applied[0]),-1)output = self.attn_combine(output).unsqueeze(0)output = F.relu(output)output,hidden = self.gru(output,hidden)output = F.log_softmax(self.out(output[0]),dim=1)return output,hidden,attn_weightsdef initHidden(self):return torch.zeros(1, 1, self.hidden_size, device=device)

6.训练 

# 数据预处理
# 将文本数字化,获取词汇index
def indexesFromSentence(lang, sentence):return [lang.word2index[word] for word in sentence.split(' ')]# 将数字化的文本,转化为tensor数据
def tensorFromSentence(lang, sentence):indexes = indexesFromSentence(lang, sentence)indexes.append(EOS_token)return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)# 输入pair文本,输出预处理好的数据
def tensorsFromPair(pair):input_tensor = tensorFromSentence(input_lang, pair[0])target_tensor = tensorFromSentence(output_lang, pair[1])return (input_tensor, target_tensor)# 训练函数
teacher_forcing_ratio = 0.5def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):encoder_hidden = encoder.initHidden()encoder_optimizer.zero_grad()decoder_optimizer.zero_grad()input_length = input_tensor.size(0)target_length = target_tensor.size(0)encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)loss = 0# 编码阶段for ei in range(input_length):encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)encoder_outputs[ei] = encoder_output[0, 0]# 解码阶段decoder_input = torch.tensor([[SOS_token]], device=device)decoder_hidden = encoder_hiddenuse_teacher_forcing = True if random.random() < teacher_forcing_ratio else Falseif use_teacher_forcing:for di in range(target_length):decoder_output, decoder_hidden, _ = decoder(decoder_input, decoder_hidden, encoder_outputs)  # ✅ 传入 encoder_outputsloss += criterion(decoder_output, target_tensor[di])decoder_input = target_tensor[di]  # Teacher forcingelse:for di in range(target_length):decoder_output, decoder_hidden, _ = decoder(decoder_input, decoder_hidden, encoder_outputs)  # ✅ 传入 encoder_outputstopv, topi = decoder_output.topk(1)decoder_input = topi.squeeze().detach()loss += criterion(decoder_output, target_tensor[di])if decoder_input.item() == EOS_token:breakloss.backward()encoder_optimizer.step()decoder_optimizer.step()return loss.item() / target_lengthimport time
import mathdef asMinutes(s):m = math.floor(s / 60)s -= m * 60return '%dm %ds' % (m, s)def timeSince(since, percent):now = time.time()s = now - sincees = s / (percent)rs = es - sreturn '%s (- %s)' % (asMinutes(s), asMinutes(rs))def trainIters(encoder, decoder, n_iters, print_every=1000,plot_every=100, learning_rate=0.01):start = time.time()plot_losses = []print_loss_total = 0  # Reset every print_everyplot_loss_total = 0  # Reset every plot_everyencoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)# 在 pairs 中随机选取 n_iters 条数据用作训练集training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)]criterion = nn.NLLLoss()for iter in range(1, n_iters + 1):training_pair = training_pairs[iter - 1]input_tensor = training_pair[0]target_tensor = training_pair[1]loss = train(input_tensor, target_tensor, encoder,decoder, encoder_optimizer, decoder_optimizer, criterion)print_loss_total += lossplot_loss_total += lossif iter % print_every == 0:print_loss_avg = print_loss_total / print_everyprint_loss_total = 0print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),iter, iter / n_iters * 100, print_loss_avg))if iter % plot_every == 0:plot_loss_avg = plot_loss_total / plot_everyplot_losses.append(plot_loss_avg)plot_loss_total = 0return plot_lossesdef evaluate(encoder,decoder,sentence,max_length=MAX_LENGTH):with torch.no_grad():input_tensor = tensorFromSentence(input_lang,sentence)input_length = input_tensor.size()[0]encoder_hidden = encoder.initHidden()encoder_outputs = torch.zeros(max_length,encoder.hidden_size,device=device)for ei in range(input_length):encoder_output,encoder_hidden = encoder(input_tensor[ei],encoder_hidden)encoder_outputs[ei] += encoder_output[0,0]decoder_input = torch.tensor([[SOS_token]],device=device)   # SOSdecoder_hidden = encoder_hiddendecoded_words = []decoder_attentions = torch.zeros(max_length,max_length)for di in range(max_length):decoder_output,decoder_hidden,decoder_attention = decoder(decoder_input,decoder_hidden,encoder_outputs)decoder_attentions[di] = decoder_attention.datatopv,topi = decoder_output.data.topk(1)if topi.item() == EOS_token:decoded_words.append('<EOS>')breakelse:decoded_words.append(output_lang.index2word[topi.item()])decoder_input = topi.squeeze().detach()return decoded_words,decoder_attentions[:di+1]def evaluateRandomly(encoder,decoder,n=5):for i in range(n):pair = random.choice(pairs)print('>',pair[0])print('=',pair[1])output_words,attentions = evaluate(encoder,decoder,pair[0])output_sentence = ' '.join(output_words)print('<',output_sentence)print('')

7.评估 

hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size,output_lang.n_words,dropout_p=0.1).to(device)
plot_losses = trainIters(encoder1, attn_decoder1, 10000, print_every=5000)

 

evaluateRandomly(encoder1,attn_decoder1)

 

8.Loss函数 

 

# 1.loss图
import matplotlib.pyplot as plt
# 隐藏警告
import warningswarnings.filterwarnings("ignore")  # 忽略警告信息
# plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100  # 分辨率epochs_range = range(len(plot_losses))plt.figure(figsize=(8, 3))plt.subplot(1, 1, 1)
plt.plot(epochs_range, plot_losses, label='Training Loss')
plt.legend(loc='upper right')
plt.title('Training Loss')
plt.show()

9.可视化注意力 

# 2.可视化注意力
output_words,attentions = evaluate(encoder1,attn_decoder1,"je suis trop froid .")
plt.matshow(attentions.numpy())

# 2.可视化注意力
output_words,attentions = evaluate(encoder1,attn_decoder1,"je suis trop froid .")
plt.matshow(attentions.numpy())import matplotlib.ticker as tickerdef showAttentuon(input_sentence,output_words,attentions):fig = plt.figure()ax = fig.add_subplot(111)cax = ax.matshow(attentions.numpy(),cmap='bone')fig.colorbar(cax)ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'],rotation=90)ax.set_yticklabels([''] + output_words)ax.xaxis.set_major_locator(ticker.MultipleLocator(1))ax.yaxis.set_major_locator(ticker.MultipleLocator(1))plt.show()def evaluateAndShowAttention(input_sentence):output_words,attentions = evaluate(encoder1,attn_decoder1,input_sentence)print('input =',input_sentence)print('output =',' '.join(output_words))showAttentuon(input_sentence,output_words,attentions)evaluateAndShowAttention('elle a cinq ans de moins que moi .')
evaluateAndShowAttention('elle est trop petit .')
evaluateAndShowAttention('je ne crains pas de mourir .')
evaluateAndShowAttention('c est un jeune directeur plein de talent .')

 

 

http://www.dtcms.com/a/266988.html

相关文章:

  • Go中使用国家新闻出版署实名认证
  • [C++] # 深入理解C++继承:从原理到实现
  • 通过网页调用身份证阅读器http websocket方法-华视电子————仙盟创梦IDE
  • 结构型智能科技的关键可行性——信息型智能向结构型智能的转换(提纲)
  • CSS知识复习2
  • 3-1 PID算法改进(积分部分)
  • Softhub软件下载站实战开发(十二):软件管理编辑页面实现
  • 科学的第五范式:人工智能如何重塑发现之疆
  • 力扣打卡第二十一天 中后遍历+中前遍历 构造二叉树
  • 【Qt】QxORM无法删除和更改主键值为0的行,否则报错:invalid primary key
  • Docker学习笔记:Docker网络
  • DotNetBrowser 2.27.14 版本发布啦!
  • Python 制作 pyd(Windows 平台的动态链接库)
  • 力扣:70. 爬楼梯
  • [Terence Tao访谈] 无限 | 关注模型 | 矢量场 | 策略性“作弊” | Lean
  • 【Prometheus 】通过 Pushgateway 上报指标数据
  • 408第三季part1 - 操作系统 - 输入输出管理
  • pyproject.toml 有什么作用呢?
  • Java对象哈希值深度解析
  • vue/微信小程序/h5 实现react的boundary
  • 汽车功能安全概念阶段开发【相关项定义HARA】2
  • 【Mysql系列】Mysql 多级隔离级别揭秘
  • 使用Python绘制图片拆分工具
  • 风平浪静、无事发生
  • 论文解读:《DeepGray:基于灰度图像和深度学习的恶意软件分类方法》
  • OneCode 智能化UI布局与定位:注解驱动的视觉编排艺术
  • 图灵完备之路(数电学习三分钟)----数据选择器与总线
  • 使用alist+RaiDrive+webdav将百度夸克网盘变为本地电脑磁盘方法教程
  • 《人生顶层设计》读书笔记7
  • J20250704 算法题5道