当前位置: 首页 > news >正文

无网络环境下配置并运行 word2vec复现.py

需运行文件

# -*- coding: utf-8 -*-
import torch
import pandas as pd
import jieba
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader,Dataset
from transformers import AutoTokenizer,AutoModeldef get_stop_word():with open("../data/baidu_stopwords.txt",encoding="utf-8") as f:return f.read().split("\n")def read_data(n=3):import jieba.posseg as psg# with open("../data/数学原始数据.csv",encoding="gbk") as f:all_data = pd.read_csv("../data/数学原始数据.csv",names=["data"],encoding="gbk") #all_data = all_data["data"].tolist()no_t = ["x","c","m","d","uj","r",""]result = []word_fre = {}for data in all_data:words = psg.lcut(data)new_word = []for word,t in words:if t in no_t:continueif word not in stop_words:word_fre[word] = word_fre.get(word,0) + 1new_word.append(word)result.append(new_word)new_result = []for words in result:new_word = []for word in words:if word_fre[word]<n:continuenew_word.append(word)new_result.append(new_word)return new_resultdef build_data(all_data):result = []for data in all_data:for ni,now_word in enumerate(data):other_word = data[max(ni-n_gram,0):ni] + data[ni+1:ni+1+n_gram]for o in other_word:result.append((now_word,o))return resultclass MyDataset(Dataset):def __init__(self,all_data):self.all_data = all_datadef __len__(self):return len(self.all_data)def __getitem__(self, index):data = self.all_data[index]# index = word_2_index[data[0]]# index = [word_2_index[i] for i in data[1]]word1_idx = tokenizer(data[0])["input_ids"][0]word2_idx = tokenizer(data[1])["input_ids"][0]return word1_idx,word2_idxclass Model(nn.Module):def __init__(self):super(Model, self).__init__()self.base_model = AutoModel.from_pretrained("../model/Qwen2.5-0.5B-Instruct")self.linear1 = nn.Linear(corpus_len,emb_dim)self.linear1.weight.data[:,:151936] = self.base_model.embed_tokens.weight.data.Tself.linear2 = nn.Linear( emb_dim, corpus_len)self.linear2.weight.requires_grad = False# self.linear2.weight.rself.loss_fun = nn.CrossEntropyLoss()def forward(self,batch_w1_index,batch_w2_index):word1_onehot = torch.zeros(size=[len(batch_w1_index),corpus_len])# word1_onehot[batch_w1_index] = 1.0for i in range(len(batch_w1_index)):word1_onehot[i][batch_w1_index] = 1.0# word2_onehot = torch.zeros(size=[1, corpus_len])# word2_onehot[0][batch_w2_index] = 1.0h = self.linear1(word1_onehot)predict = self.linear2(h)loss = self.loss_fun(predict,batch_w2_index)return lossdef add_word(all_data):global tokenizernew_data = []for i in all_data:new_data.extend(i)new_data = list(set(new_data))# tokenizer.convert_tokens_to_string("hh")for word in new_data:t = tokenizer(word)["input_ids"]if len(t)!=1:tokenizer.add_tokens(word)# print(word)if __name__ == "__main__":aaa = 10n_gram = 1batch_size = 100epoch = 10emb_dim = 896lr = 0.01grad_acc = 1stop_words = get_stop_word()stop_words = stop_words + ["。",",","(",")"]all_data = read_data()rel_words = build_data(all_data)tokenizer = AutoTokenizer.from_pretrained("../model/Qwen2.5-0.5B-Instruct")add_word(all_data)corpus_len = len(tokenizer.get_vocab())# tokenizer.convert_tokens_to_string("hh")train_dataset = MyDataset(rel_words)train_dataloader = DataLoader(train_dataset,batch_size=batch_size,shuffle=False)model = Model()opt = torch.optim.Adam(model.parameters(),lr=lr)for e in range(epoch):for batch_idx,(batch_w1_index,batch_w2_index) in tqdm(enumerate(train_dataloader,start=1)):loss = model.forward(batch_w1_index,batch_w2_index)loss.backward()if batch_idx%grad_acc == 0:opt.step()opt.zero_grad()print(loss)

创建和激活虚拟环境(可选)

python3 -m venv word2vec_offline
source word2vec_offline/bin/activate

安装依赖

pip install torch pandas jieba tqdm transformers

 下载依赖的离线安装包

在有网络的机器上,执行:

mkdir offline_pkgs
pip download torch pandas jieba tqdm transformers -d offline_pkgs

这样会把所有依赖包(包括依赖的依赖)下载到 offline_pkgs 文件夹。

拷贝依赖和项目文件到无网络环境

  • 拷贝 offline_pkgs 文件夹到无网络环境
  • 拷贝你的 word2vec复现.py 以及所需的 ../data/、../model/ 文件夹

3. 在无网络环境下新建虚拟环境

python3 -m venv venv
source venv/bin/activate

4. 离线安装依赖

进入 offline_pkgs 文件夹,执行:

pip install --no-index --find-links=offline_pkgs torch pandas jieba tqdm transformers

如果有依赖报错,先安装报错的依赖,再装主包。

5. 检查依赖安装

pip list

确认 torch、pandas、jieba、tqdm、transformers 都已安装。

6. 运行你的代码

确保你在虚拟环境中,且数据和模型路径正确:

python word2vec复现.py

相关文章:

  • 大模型系列(四)--- GPT2: Language Models are Unsupervised Multitask Learners​
  • 南京市出台工作方案深化“智改数转网联”,物联网集成商从“困局”到“蓝海”!
  • Vue 项目中长按保存图片功能实现指南
  • Unity_JK框架【4】MonoSystem 和 协程工具类 的剖析与实践
  • Czkawka:跨平台重复文件清理
  • 滑动窗口——无重复字符最长的字串
  • 蓝桥杯国赛备赛——字符串
  • Redis持久化存储介质评估:NFS与Ceph的适用性分析
  • 数据中心 第十五次CCF-CSP计算机软件能力认证
  • 护照阅读器简介
  • Spring MVC Controller 方法的返回类型有哪些?
  • Android Car Input HAL
  • MCP学习
  • C++初阶 —— 类和对象
  • 如何使用UGUI的EventTrigger
  • 南京大学OpenHarmony技术俱乐部正式揭牌 仓颉编程语言引领生态创新
  • 汽车免拆诊断案例|车辆行驶中急加速车身抖动故障排除 2 例
  • 台州智惠自动化签约智橙PLM,让创新持续发生
  • 挑战用豆包教我学Java01天
  • 软件测试需求之测试类型分析
  • 衡水注册公司流程和费用/独立站seo
  • 做电商设计在哪个网站接单/百度seo优化收费标准
  • 怎样对一个网站做seo/怎么做网站优化排名
  • 比较好的做外贸网站/做推广哪个平台好
  • 嘉兴专业定制网站制作企业/新闻稿发布
  • 宁波建设网站哪家好/亿驱动力竞价托管