主页 > 游戏开发  > 

大模型微调入门(Transformers+Pytorch)

大模型微调入门(Transformers+Pytorch)
目标

输入:你是谁?

输出:我们预训练的名字。

训练

为了性能好下载小参数模型,普通机器都能运行。

下载模型 # 方式1:使用魔搭社区SDK 下载 # down_deepseek.py from modelscope import snapshot_download model_dir = snapshot_download('deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B') # 方式2:git lfs # 需要提前安装git大文件存储 git-lfs # 在线查看 .modelscope /models/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B git lfs install git clone .modelscope /deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B.git 训练模型 # finetune_deepseek.py from datasets import Dataset from transformers import ( AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForLanguageModeling ) # 加载模型和分词器 model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) # 准备训练数据 train_data = [ { "question": "你是谁?", "answer": "我是黄登峰。" }, { "question": "你的名字是什么?", "answer": "黄登峰" }, { "question": "你是做什么的?", "answer": "我是深圳一家公司打工的牛马程序员。" }, # 在这里添加更多的问答对 ] test_data = [ { "question": "你的名字是什么?", "answer": "我的名字是黄登峰。" } ] def format_instruction(example): """格式化输入输出对""" return f"Human: {example['question']}\n\nAssistant: {example['answer']}" # 转换数据格式 train_formatted_data = [{"text": format_instruction(item)} for item in train_data] test_formatted_data = [{"text": format_instruction(item)} for item in test_data] train_dataset = Dataset.from_list(train_formatted_data) test_dataset = Dataset.from_list(test_formatted_data) # 数据预处理函数 def preprocess_function(examples): return tokenizer(examples["text"], truncation=True, padding="max_length", max_length=512) # 对数据集进行预处理 train_tokenized_dataset = train_dataset.map( preprocess_function, batched=True, remove_columns=train_dataset.column_names ) test_tokenized_dataset = test_dataset.map( preprocess_function, batched=True, remove_columns=test_dataset.column_names ) output_dir = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B_CUSTOM" # 训练参数设置 training_args = TrainingArguments( output_dir=output_dir, num_train_epochs=3, per_device_train_batch_size=4, save_steps=100, save_total_limit=2, learning_rate=2e-5, weight_decay=0.01, logging_dir="./logs", logging_steps=10, ) # 创建训练器 trainer = Trainer( model=model, args=training_args, train_dataset=train_tokenized_dataset, eval_dataset=test_tokenized_dataset, data_collator=DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False), ) # 开始训练 trainer.train() # 保存模型 trainer.save_model() # 保存tokenizer tokenizer.save_pretrained(output_dir) 模型格式

训练后的模型输出格式是Hugging Face格式,vllm 可以直接使用,ollama,llama.cpp默认是GGUF格式。

# 需要用llama.cpp仓库的convert_hf_to_gguf.py脚本来转换 git clone github /ggerganov/llama.cpp.git pip install -r llama.cpp/requirements.txt # 如果不量化,保留模型的效果 python llama.cpp/convert_hf_to_gguf.py ./DeepSeek-R1-Distill-Qwen-1.5B --outtype f16 --verbose --outfile DeepSeek-R1-Distill-Qwen-1.5B.gguf # 如果需要量化(加速并有损效果),直接执行下面脚本就可以 python llama.cpp/convert_hf_to_gguf.py ./DeepSeek-R1-Distill-Qwen-1.5B --outtype q8_0 --verbose --outfile DeepSeek-R1-Distill-Qwen-1.5B.gguf 验证 # test_model.py from transformers import AutoModelForCausalLM, AutoTokenizer import torch def generate_response(prompt, model, tokenizer, max_length=512): # 将输入格式化为训练时的格式 formatted_prompt = f"Human: {prompt}\n\nAssistant:" # 对输入进行编码 inputs = tokenizer(formatted_prompt, return_tensors="pt", padding=True, truncation=True) # 生成回答 with torch.no_grad(): outputs = model.generate( inputs.input_ids, max_length=max_length, num_return_sequences=1, temperature=0.7, do_sample=True, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, ) # 解码输出 response = tokenizer.decode(outputs[0], skip_special_tokens=True) # 提取Assistant的回答部分 response = response.split("Assistant:")[-1].strip() return response def main(): # 加载微调后的模型和分词器 model_path = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B_CUSTOM" tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) # 准备测试问题 test_questions = [ "你是谁?", "你的名字是什么?", "你是做什么的?", ] # 测试模型回答 print("开始测试模型回答:") print("-" * 50) for question in test_questions: print(f"问题: {question}") response = generate_response(question, model, tokenizer) print(f"回答: {response}") print("-" * 50) if __name__ == "__main__": main()

标签:

大模型微调入门(Transformers+Pytorch)由讯客互联游戏开发栏目发布,感谢您对讯客互联的认可,以及对我们原创作品以及文章的青睐,非常欢迎各位朋友分享到个人网站或者朋友圈,但转载请说明文章出处“大模型微调入门(Transformers+Pytorch)