《动手深度学习》8.2文本预处理—代码分析
这部分代码也让我头晕,想必大家也有很多问题,边写边查,边注释了~
文本预处理的思路如下:
1、读入文本
2、数据清洗(去掉不要的符号、空格等)
3、token化
4、vocab词表化
1、读入文本 + 数据清洗
import collections
import re
from d2l import torch as d2l#d2l内嵌的字典,先在这个字典注册ulr和校验码
d2l.DATA_HUB['time machine'] = (d2l.DATA_URL + 'timemachine.txt','090b5e7e70c295757f55df93cb0a180b9691891a')#按每一行来读入,并且去掉非字母、前后空格、小写化
def read_time_machine():with open(d2l.download('time machine'), 'r') as f:#此时读入的形状就是list of stringslines = f.readlines()return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]lines = read_time_machine()
print(lines[0])
print(len(lines))
2、token化
#这里处理为了list of lists
#分别是按单词和按字符处理
def tokenize(lines, token='word'):if token == 'word':return [line.split() for line in lines]elif token == 'char':return [list(line) for line in lines]else:print(f'错误,未知token类型:{token}')
tokens = tokenize(lines, 'word')
print(len(tokens))
print(tokens[0])
3、vocab化
#首先实现利用Counter(内置词典),来对单词频率进行统计
def count_corpus(tokens):if len(tokens) == 0 or isinstance(tokens[0], list):tokens = [token for line in tokens for token in line]return collections.Counter(tokens)
class Vocab:#min_freq是对低频词的筛选#reserved_tokens是预留词,这些词不一定出现在文本,但训练时会用到,自己显示加进去def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):if tokens is None:tokens = []if reserved_tokens is None:reserved_tokens = []#频率统计conter = count_corpus(tokens)#降序排#self._tokens_freqs是排序后的字典self._tokens_freqs = sorted(conter.items(), key=lambda x:x[1], reverse=True)#'<unk>'的索引为0#先把预留词填进去#这里是两个对应的映射转换表self.idx_to_token = ['unk'] + reserved_tokensself.token_to_idx = {token : idx for idx, token in enumerate(self.idx_to_token)} #把_tokens_freqs填入映射转换表for token, freq in self._tokens_freqs:if freq < min_freq:breakif token not in self.token_to_idx:self.idx_to_token.append(token)self.token_to_idx[token] = len(self.idx_to_token) - 1#重写方法,可以让vocab直接使用len(vocab)而无需vocab.len()def __len__(self):return len(self.idx_to_token)#方法属性,代码风格而已,使得self.unk 等价于vocab.unk()@propertydef unk(self):return 0@propertydef token_freqs(self):return self._tokens_freqs#由token 获得 idx#重写,可以让vocab直接使用vocab[tokens],等价于vocab.__getitem__(tokens)def __getitem__(self, tokens):#判断传入的token为一个或者多个if not isinstance(tokens, (list, tuple)):return self.token_to_idx.get(tokens, self.unk)#多个则需要递归处理return [self.__getitem__(token) for token in tokens]#由idx 获得tokendef to_tokens(self, indices):if not isinstance(indices, (list, tuple)):return self.idx_to_token[indices]return [self.idx_to_token[idx] for idx in indices]
vocab = Vocab(tokens)
print(list(vocab.token_to_idx.items())[:10])for i in [0, 10]:print(tokens[i])print(vocab[tokens[i]])
#整合所有功能
#corpus 是所有token的映射idx,是为了输入进模型
#vocab 是一个转换查询表,相当于建立了映射关系
#所以corpus利用vocab进行转换
#vocab是翻译官
def load_corpus_time_machine(max_tokens=-1):lines = read_time_machine()tokens = tokenize(lines, 'char')vocab = Vocab(tokens)#展平tokens,模型需要一维输入corpus = [vocab[token] for line in tokens for token in line]if max_tokens > 0:corpus = corpus[:max_tokens]return corpus, vocabcorpus, vocab = load_corpus_time_machine()
print(len(corpus))
print(len(vocab))