mirror of
https://github.com/LC044/WeChatMsg
synced 2024-11-09 09:31:18 +08:00
增加了模型:qwen2-0.5b
This commit is contained in:
parent
82f40a0dd5
commit
5147a198b6
BIN
MemoAI/img/img3.png
Normal file
BIN
MemoAI/img/img3.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 359 KiB |
BIN
MemoAI/img/img4.png
Normal file
BIN
MemoAI/img/img4.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 58 KiB |
186
MemoAI/qwen2-0.5b/app.py
Normal file
186
MemoAI/qwen2-0.5b/app.py
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
import os
|
||||||
|
import copy
|
||||||
|
import random
|
||||||
|
import threading
|
||||||
|
import subprocess
|
||||||
|
import gradio as gr
|
||||||
|
from typing import List, Optional, Tuple, Dict
|
||||||
|
|
||||||
|
|
||||||
|
os.system("pip uninstall -y tensorflow tensorflow-estimator tensorflow-io-gcs-filesystem")
|
||||||
|
os.environ["LANG"] = "C"
|
||||||
|
os.environ["LC_ALL"] = "C"
|
||||||
|
|
||||||
|
default_system = '你是一个微信聊天机器人'
|
||||||
|
|
||||||
|
from dashinfer.helper import EngineHelper, ConfigManager
|
||||||
|
|
||||||
|
log_lock = threading.Lock()
|
||||||
|
|
||||||
|
config_file = "di_config.json"
|
||||||
|
config = ConfigManager.get_config_from_json(config_file)
|
||||||
|
|
||||||
|
def download_model(model_id, revision, source="modelscope"):
|
||||||
|
print(f"Downloading model {model_id} (revision: {revision}) from {source}")
|
||||||
|
if source == "modelscope":
|
||||||
|
from modelscope import snapshot_download
|
||||||
|
model_dir = snapshot_download(model_id, revision=revision)
|
||||||
|
elif source == "huggingface":
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
model_dir = snapshot_download(repo_id=model_id)
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown source")
|
||||||
|
|
||||||
|
print(f"Save model to path {model_dir}")
|
||||||
|
|
||||||
|
return model_dir
|
||||||
|
|
||||||
|
cmd = f"pip show dashinfer | grep 'Location' | cut -d ' ' -f 2"
|
||||||
|
package_location = subprocess.run(cmd,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
shell=True,
|
||||||
|
text=True)
|
||||||
|
package_location = package_location.stdout.strip()
|
||||||
|
os.environ["AS_DAEMON_PATH"] = package_location + "/dashinfer/allspark/bin"
|
||||||
|
os.environ["AS_NUMA_NUM"] = str(len(config["device_ids"]))
|
||||||
|
os.environ["AS_NUMA_OFFSET"] = str(config["device_ids"][0])
|
||||||
|
|
||||||
|
## download original model
|
||||||
|
## download model from modelscope
|
||||||
|
original_model = {
|
||||||
|
"source": "modelscope",
|
||||||
|
"model_id": config["model_space"] + config["model_name"],
|
||||||
|
"revision": "master",
|
||||||
|
"model_path": ""
|
||||||
|
}
|
||||||
|
original_model["model_path"] = download_model(original_model["model_id"],
|
||||||
|
original_model["revision"],
|
||||||
|
original_model["source"])
|
||||||
|
|
||||||
|
engine_helper = EngineHelper(config)
|
||||||
|
engine_helper.verbose = True
|
||||||
|
engine_helper.init_tokenizer(original_model["model_path"])
|
||||||
|
|
||||||
|
## convert huggingface model to dashinfer model
|
||||||
|
## only one conversion is required
|
||||||
|
engine_helper.convert_model(original_model["model_path"])
|
||||||
|
|
||||||
|
engine_helper.init_engine()
|
||||||
|
engine_max_batch = engine_helper.engine_config["engine_max_batch"]
|
||||||
|
|
||||||
|
###################################################
|
||||||
|
|
||||||
|
History = List[Tuple[str, str]]
|
||||||
|
Messages = List[Dict[str, str]]
|
||||||
|
|
||||||
|
|
||||||
|
class Role:
|
||||||
|
USER = 'user'
|
||||||
|
SYSTEM = 'system'
|
||||||
|
BOT = 'bot'
|
||||||
|
ASSISTANT = 'assistant'
|
||||||
|
ATTACHMENT = 'attachment'
|
||||||
|
|
||||||
|
|
||||||
|
def clear_session() -> History:
|
||||||
|
return '', []
|
||||||
|
|
||||||
|
|
||||||
|
def modify_system_session(system: str) -> str:
|
||||||
|
if system is None or len(system) == 0:
|
||||||
|
system = default_system
|
||||||
|
return system, system, []
|
||||||
|
|
||||||
|
|
||||||
|
def history_to_messages(history: History, system: str) -> Messages:
|
||||||
|
messages = [{'role': Role.SYSTEM, 'content': system}]
|
||||||
|
for h in history:
|
||||||
|
messages.append({'role': Role.USER, 'content': h[0]})
|
||||||
|
messages.append({'role': Role.ASSISTANT, 'content': h[1]})
|
||||||
|
return messages
|
||||||
|
|
||||||
|
|
||||||
|
def messages_to_history(messages: Messages) -> Tuple[str, History]:
|
||||||
|
assert messages[0]['role'] == Role.SYSTEM
|
||||||
|
system = messages[0]['content']
|
||||||
|
history = []
|
||||||
|
for q, r in zip(messages[1::2], messages[2::2]):
|
||||||
|
history.append([q['content'], r['content']])
|
||||||
|
return system, history
|
||||||
|
|
||||||
|
|
||||||
|
def message_to_prompt(messages: Messages) -> str:
|
||||||
|
prompt = ""
|
||||||
|
for item in messages:
|
||||||
|
im_start, im_end = "<|im_start|>", "<|im_end|>"
|
||||||
|
prompt += f"\n{im_start}{item['role']}\n{item['content']}{im_end}"
|
||||||
|
prompt += f"\n{im_start}assistant\n"
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
|
||||||
|
def model_chat(query: Optional[str], history: Optional[History],
|
||||||
|
system: str) -> Tuple[str, str, History]:
|
||||||
|
if query is None:
|
||||||
|
query = ''
|
||||||
|
if history is None:
|
||||||
|
history = []
|
||||||
|
|
||||||
|
messages = history_to_messages(history, system)
|
||||||
|
messages.append({'role': Role.USER, 'content': query})
|
||||||
|
prompt = message_to_prompt(messages)
|
||||||
|
|
||||||
|
gen_cfg = copy.deepcopy(engine_helper.default_gen_cfg)
|
||||||
|
gen_cfg["max_length"] = 1024
|
||||||
|
gen_cfg["seed"] = random.randint(0, 10000)
|
||||||
|
|
||||||
|
request_list = engine_helper.create_request([prompt], [gen_cfg])
|
||||||
|
|
||||||
|
request = request_list[0]
|
||||||
|
gen = engine_helper.process_one_request_stream(request)
|
||||||
|
for response in gen:
|
||||||
|
role = Role.ASSISTANT
|
||||||
|
system, history = messages_to_history(messages + [{'role': role, 'content': response}])
|
||||||
|
yield '', history, system
|
||||||
|
|
||||||
|
json_str = engine_helper.convert_request_to_jsonstr(request)
|
||||||
|
log_lock.acquire()
|
||||||
|
try:
|
||||||
|
print(f"{json_str}\n")
|
||||||
|
finally:
|
||||||
|
log_lock.release()
|
||||||
|
|
||||||
|
###################################################
|
||||||
|
|
||||||
|
with gr.Blocks() as demo:
|
||||||
|
demo_title = "<center>微信的你</center>"
|
||||||
|
gr.Markdown(demo_title)
|
||||||
|
with gr.Row():
|
||||||
|
with gr.Column(scale=3):
|
||||||
|
system_input = gr.Textbox(value=default_system,
|
||||||
|
lines=1,
|
||||||
|
label='System')
|
||||||
|
with gr.Column(scale=1):
|
||||||
|
modify_system = gr.Button("🛠️ Set system prompt and clear history.", scale=2)
|
||||||
|
system_state = gr.Textbox(value=default_system, visible=False)
|
||||||
|
chatbot = gr.Chatbot(label=config["model_name"])
|
||||||
|
textbox = gr.Textbox(lines=2, label='Input')
|
||||||
|
|
||||||
|
with gr.Row():
|
||||||
|
clear_history = gr.Button("🧹清除历史记录")
|
||||||
|
sumbit = gr.Button("🚀和我聊天!")
|
||||||
|
|
||||||
|
sumbit.click(model_chat,
|
||||||
|
inputs=[textbox, chatbot, system_state],
|
||||||
|
outputs=[textbox, chatbot, system_input],
|
||||||
|
concurrency_limit=engine_max_batch)
|
||||||
|
clear_history.click(fn=clear_session,
|
||||||
|
inputs=[],
|
||||||
|
outputs=[textbox, chatbot],
|
||||||
|
concurrency_limit=engine_max_batch)
|
||||||
|
modify_system.click(fn=modify_system_session,
|
||||||
|
inputs=[system_input],
|
||||||
|
outputs=[system_state, system_input, chatbot],
|
||||||
|
concurrency_limit=engine_max_batch)
|
||||||
|
|
||||||
|
demo.queue(api_open=False).launch(height=800, share=False, server_name="127.0.0.1", server_port=7860)
|
52
MemoAI/qwen2-0.5b/di_config.json
Normal file
52
MemoAI/qwen2-0.5b/di_config.json
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
{
|
||||||
|
"model_space": "YOUR-NAME-SPACE",
|
||||||
|
"model_name": "YOUR-MODEL-NAME",
|
||||||
|
"model_type": "Qwen_v20",
|
||||||
|
"model_path": "./dashinfer_models/",
|
||||||
|
"data_type": "float32",
|
||||||
|
"device_type": "CPU",
|
||||||
|
"device_ids": [
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"multinode_mode": false,
|
||||||
|
"engine_config": {
|
||||||
|
"engine_max_length": 1024,
|
||||||
|
"engine_max_batch": 2,
|
||||||
|
"do_profiling": false,
|
||||||
|
"num_threads": 0,
|
||||||
|
"matmul_precision": "medium"
|
||||||
|
},
|
||||||
|
"generation_config": {
|
||||||
|
"temperature": 0.7,
|
||||||
|
"early_stopping": true,
|
||||||
|
"top_k": 20,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"repetition_penalty": 1.05,
|
||||||
|
"presence_penalty": 0.0,
|
||||||
|
"min_length": 0,
|
||||||
|
"max_length": 8192,
|
||||||
|
"no_repeat_ngram_size": 0,
|
||||||
|
"eos_token_id": 151643,
|
||||||
|
"seed": 1234,
|
||||||
|
"stop_words_ids": [
|
||||||
|
[
|
||||||
|
151643
|
||||||
|
],
|
||||||
|
[
|
||||||
|
151644
|
||||||
|
],
|
||||||
|
[
|
||||||
|
151645
|
||||||
|
]
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"convert_config": {
|
||||||
|
"do_dynamic_quantize_convert": false
|
||||||
|
},
|
||||||
|
"quantization_config": {
|
||||||
|
"activation_type": "bfloat16",
|
||||||
|
"weight_type": "uint8",
|
||||||
|
"SubChannel": true,
|
||||||
|
"GroupSize": 512
|
||||||
|
}
|
||||||
|
}
|
1
MemoAI/qwen2-0.5b/requirements.txt
Normal file
1
MemoAI/qwen2-0.5b/requirements.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
dashinfer
|
419
MemoAI/qwen2-0.5b/train.ipynb
Normal file
419
MemoAI/qwen2-0.5b/train.ipynb
Normal file
@ -0,0 +1,419 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "de53995b-32ed-4722-8cac-ba104c8efacb",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 导入环境"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "52fac949-4150-4091-b0c3-2968ab5e385c",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from datasets import Dataset\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer, GenerationConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e098d9eb",
|
||||||
|
"metadata": {
|
||||||
|
"ExecutionIndicator": {
|
||||||
|
"show": true
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"df = pd.read_json('train.json')\n",
|
||||||
|
"ds = Dataset.from_pandas(df)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "8ac92d42-efae-49b1-a00e-ccaa75b98938",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ds[:3]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "380d9f69-9e98-4d2d-b044-1d608a057b0b",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 下载模型"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "312d6439-1932-44a3-b592-9adbdb7ab702",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from modelscope import snapshot_download\n",
|
||||||
|
"model_dir = snapshot_download('qwen/Qwen2-0.5B-Instruct', cache_dir='qwen2-0.5b/')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "51d05e5d-d14e-4f03-92be-9a9677d41918",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 处理数据集"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "74ee5a67-2e55-4974-b90e-cbf492de500a",
|
||||||
|
"metadata": {
|
||||||
|
"ExecutionIndicator": {
|
||||||
|
"show": true
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tokenizer = AutoTokenizer.from_pretrained('./qwen2-0.5b/qwen/Qwen2-0___5B-Instruct/', use_fast=False, trust_remote_code=True)\n",
|
||||||
|
"tokenizer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "2503a5fa-9621-4495-9035-8e7ef6525691",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def process_func(example):\n",
|
||||||
|
" MAX_LENGTH = 384 # Llama分词器会将一个中文字切分为多个token,因此需要放开一些最大长度,保证数据的完整性\n",
|
||||||
|
" input_ids, attention_mask, labels = [], [], []\n",
|
||||||
|
" instruction = tokenizer(f\"<|im_start|>system\\n现在你需要扮演我,和我的微信好友快乐聊天!<|im_end|>\\n<|im_start|>user\\n{example['instruction'] + example['input']}<|im_end|>\\n<|im_start|>assistant\\n\", add_special_tokens=False)\n",
|
||||||
|
" response = tokenizer(f\"{example['output']}\", add_special_tokens=False)\n",
|
||||||
|
" input_ids = instruction[\"input_ids\"] + response[\"input_ids\"] + [tokenizer.pad_token_id]\n",
|
||||||
|
" attention_mask = instruction[\"attention_mask\"] + response[\"attention_mask\"] + [1] # 因为eos token咱们也是要关注的所以 补充为1\n",
|
||||||
|
" labels = [-100] * len(instruction[\"input_ids\"]) + response[\"input_ids\"] + [tokenizer.pad_token_id] \n",
|
||||||
|
" if len(input_ids) > MAX_LENGTH: # 做一个截断\n",
|
||||||
|
" input_ids = input_ids[:MAX_LENGTH]\n",
|
||||||
|
" attention_mask = attention_mask[:MAX_LENGTH]\n",
|
||||||
|
" labels = labels[:MAX_LENGTH]\n",
|
||||||
|
" return {\n",
|
||||||
|
" \"input_ids\": input_ids,\n",
|
||||||
|
" \"attention_mask\": attention_mask,\n",
|
||||||
|
" \"labels\": labels\n",
|
||||||
|
" }"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "84f870d6-73a9-4b0f-8abf-687b32224ad8",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tokenized_id = ds.map(process_func, remove_columns=ds.column_names)\n",
|
||||||
|
"tokenized_id"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "1f7e15a0-4d9a-4935-9861-00cc472654b1",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tokenizer.decode(tokenized_id[0]['input_ids'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "97f16f66-324a-454f-8cc3-ef23b100ecff",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tokenizer.decode(list(filter(lambda x: x != -100, tokenized_id[1][\"labels\"])))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "424823a8-ed0d-4309-83c8-3f6b1cdf274c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 创建模型"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "170764e5-d899-4ef4-8c53-36f6dec0d198",
|
||||||
|
"metadata": {
|
||||||
|
"ExecutionIndicator": {
|
||||||
|
"show": true
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import torch\n",
|
||||||
|
"\n",
|
||||||
|
"model = AutoModelForCausalLM.from_pretrained('./qwen2-0.5b/qwen/Qwen2-0___5B-Instruct', device_map=\"auto\",torch_dtype=torch.bfloat16)\n",
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "2323eac7-37d5-4288-8bc5-79fac7113402",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model.enable_input_require_grads()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f808b05c-f2cb-48cf-a80d-0c42be6051c7",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model.dtype"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "13d71257-3c1c-4303-8ff8-af161ebc2cf1",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# lora "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "2d304ae2-ab60-4080-a80d-19cac2e3ade3",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from peft import LoraConfig, TaskType, get_peft_model\n",
|
||||||
|
"\n",
|
||||||
|
"config = LoraConfig(\n",
|
||||||
|
" task_type=TaskType.CAUSAL_LM, \n",
|
||||||
|
" target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
|
||||||
|
" inference_mode=False, # 训练模式\n",
|
||||||
|
" r=8, # Lora 秩\n",
|
||||||
|
" lora_alpha=32, # Lora alaph,具体作用参见 Lora 原理\n",
|
||||||
|
" lora_dropout=0.1# Dropout 比例\n",
|
||||||
|
")\n",
|
||||||
|
"config"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "2c2489c5-eaab-4e1f-b06a-c3f914b4bf8e",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model = get_peft_model(model, config)\n",
|
||||||
|
"config"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "ebf5482b-fab9-4eb3-ad88-c116def4be12",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model.print_trainable_parameters()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "ca055683-837f-4865-9c57-9164ba60c00f",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 配置训练参数"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "7e76bbff-15fd-4995-a61d-8364dc5e9ea0",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"args = TrainingArguments(\n",
|
||||||
|
" output_dir=\"./output/\",\n",
|
||||||
|
" per_device_train_batch_size=4,\n",
|
||||||
|
" gradient_accumulation_steps=4,\n",
|
||||||
|
" logging_steps=10,\n",
|
||||||
|
" num_train_epochs=3,\n",
|
||||||
|
" learning_rate=1e-4,\n",
|
||||||
|
" gradient_checkpointing=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f142cb9c-ad99-48e6-ba86-6df198f9ed96",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"trainer = Trainer(\n",
|
||||||
|
" model=model,\n",
|
||||||
|
" args=args,\n",
|
||||||
|
" train_dataset=tokenized_id,\n",
|
||||||
|
" data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "aec9bc36-b297-45af-99e1-d4c4d82be081",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"trainer.train()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "8abb2327-458e-4e96-ac98-2141b5b97c8e",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 合并加载模型,这里的路径可能有点不太一样,lora_path填写为Output的最后的checkpoint"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "bd2a415a-a9ad-49ea-877f-243558a83bfc",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
||||||
|
"import torch\n",
|
||||||
|
"from peft import PeftModel\n",
|
||||||
|
"\n",
|
||||||
|
"mode_path = './qwen2-0.5b/qwen/Qwen2-0___5B-Instruct'\n",
|
||||||
|
"lora_path = './output/checkpoint-10' #修改这里\n",
|
||||||
|
"# 加载tokenizer\n",
|
||||||
|
"tokenizer = AutoTokenizer.from_pretrained(mode_path, trust_remote_code=True)\n",
|
||||||
|
"\n",
|
||||||
|
"# 加载模型\n",
|
||||||
|
"model = AutoModelForCausalLM.from_pretrained(mode_path, device_map=\"auto\",torch_dtype=torch.bfloat16, trust_remote_code=True).eval()\n",
|
||||||
|
"\n",
|
||||||
|
"# 加载lora权重\n",
|
||||||
|
"model = PeftModel.from_pretrained(model, model_id=lora_path)\n",
|
||||||
|
"\n",
|
||||||
|
"prompt = \"在干啥呢?\"\n",
|
||||||
|
"inputs = tokenizer.apply_chat_template([{\"role\": \"user\", \"content\": \"现在你需要扮演我,和我的微信好友快乐聊天!\"},{\"role\": \"user\", \"content\": prompt}],\n",
|
||||||
|
" add_generation_prompt=True,\n",
|
||||||
|
" tokenize=True,\n",
|
||||||
|
" return_tensors=\"pt\",\n",
|
||||||
|
" return_dict=True\n",
|
||||||
|
" ).to('cuda')\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"gen_kwargs = {\"max_length\": 2500, \"do_sample\": True, \"top_k\": 1}\n",
|
||||||
|
"with torch.no_grad():\n",
|
||||||
|
" outputs = model.generate(**inputs, **gen_kwargs)\n",
|
||||||
|
" outputs = outputs[:, inputs['input_ids'].shape[1]:]\n",
|
||||||
|
" print(tokenizer.decode(outputs[0], skip_special_tokens=True))\n",
|
||||||
|
"\n",
|
||||||
|
"# 保存合并后的模型和tokenizer\n",
|
||||||
|
"save_directory = './model_merge'\n",
|
||||||
|
"\n",
|
||||||
|
"# 保存模型\n",
|
||||||
|
"\n",
|
||||||
|
"model.save_pretrained(save_directory)\n",
|
||||||
|
"\n",
|
||||||
|
"# 保存tokenizer\n",
|
||||||
|
"tokenizer.save_pretrained(save_directory)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "b67e5e0a-2566-4483-9bce-92b5be8b4b34",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# 然后把模型上传到modelscope开始下一步"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "dafe4f24-af5c-407e-abbc-eefd9d44cb15",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.14"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
22
doc/ai-qwen/readme.md
Normal file
22
doc/ai-qwen/readme.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
**鉴于** 本仓库原来的训练模型 Chatllm3-6b 在低性能机器上部署比较困难,我在原基础上使用微型模型(但是较好满足微信聊天 AI 要求)的 Qwen2-0.5b-Instruct 模型完成模型训练到部署到免费
|
||||||
|
Modelspace 创空间中,比较简单,**并且可做到全程免费** 下面是流程:
|
||||||
|
***
|
||||||
|
# 第一步,[创建 Modelspace 免费 GPU](https://www.modelscope.cn/my/mynotebook/preset)
|
||||||
|
![GPU](/MemoAI/img/img3.png)
|
||||||
|
|
||||||
|
# 开始训练
|
||||||
|
**可以参照我的训练[模板](/MemoAI/qwen2-0.5b/train.ipynb)**
|
||||||
|
<br>
|
||||||
|
把 train.json 上传,一步步点击即可<br>
|
||||||
|
最后把模型上传到 Modelspace
|
||||||
|
<br />
|
||||||
|
![训练](/MemoAI/img/img4.png)
|
||||||
|
# 部署到创空间
|
||||||
|
编辑 di_config.json 的一下两个字段
|
||||||
|
> "model_space": "YOUR-NAME-SPACE"
|
||||||
|
|
||||||
|
> "model_name": "YOUR-MODEL-NAME"
|
||||||
|
|
||||||
|
**然后把一下MemoAI/qwen2-0.5b的三个文件上传到创空间,点击部署!**
|
||||||
|
|
||||||
|
**最后看看成功吧:**[成品](https://www.modelscope.cn/studios/sanbei101/qwen-haoran/summary)
|
Loading…
Reference in New Issue
Block a user