vllm加载多个Lora部署
一 序言
最近训练大模型,但是发现微调了很多模型,都希望部署起来,但是如果把模型合并,然后每个都去加载模型,发现成本太高了。心想能不能只加载基座模型,然后分别加载微调权重,这样成本比较低,也方便。顺着这个思路,发现vllm可以这样做,于是在此记录该做法。
二代码
本次实验是基于Qwen2.5-72B-Instruct微调的
from vllm impirt LLM, SamplingParams
from vllm.lora.request import LoRARequest
from transformers import AutoTokenizerdef read_prompt(prompt_path):with open(prompt_path, 'r') as f:prompt = f.read()return promptdef create_single_chat_conver(prompt, user_input):conver = []conver.append({"role":"system", "content":prompt})conver.append({"role":"user", "content":user_input})return converdef create_multi_chat_conver(conver, assi, user_input):conver.append("role":"assistant", "content":assi)conver.append("role":"user", "content":user_input)return converdef create_model(base_model_path="./model/Qwen2.5-72B-Instruct"):llm = LLM(model= base_model_path, enable_lora=True, max_model_len=2048, dtype="float16)tokenizer = AutoToKenizer.from_pretrained(base_model_path)return vllm, tokenizerdef generate_text_lora(messages, tokenizer, vllm, lora_request):input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_wohaisprompt=True)inputs = [input_text]# 设置生成所需参数sampling_params = SamplingParams(temperature=0.7, top_p=0.8, top_k=50, max_tokens=2048)outputs = vllm.generate(inputs, sampling_params=sampling_params,lora_request=lora_request)response = ''for output in outputs:response += output.outputs[0].textreturn responsedef generate_text(messages, tokenizer, vllm):input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_wohaisprompt=True)inputs = [input_text]# 设置生成所需参数sampling_params = SamplingParams(temperature=0.7, top_p=0.8, top_k=50, max_tokens=2048)outputs = vllm.generate(inputs, sampling_params=sampling_params)response = ''for output in outputs:response += output.outputs[0].textreturn responseif __name__ == "__main__":# 提示词路径prompt_path = './prompt.pbt'# 输入文本data_path = './input_txt.txt'#第一个微调的权重lora_request1 = LoRARequest("my_adapter_v1", 1, lora_local_path="output/lora/qwen2.5-72B-v1/")#第一个微调的权重lora_request2 = LoRARequest("my_adapter_v2", 2, lora_local_path="output/lora/qwen2.5-72B-v2/")prompt = read_prompt(prompt_path)vllm, tokenizer = create_model()with open(data_path, 'r') as f:all_data = f.readlines()for data in all_data:conver = create_single_chat_conver(prompt, data)response1 = generate_text_lora(conver, tokenizer, vllm, lora_request1)response2 = generate_text_lora(conver, tokenizer, vllm, lora_request2)response = generate_text(conver, tokenizer, vllm)
三总结
以上都是伪代码,仅做参考学习使用