diff --git a/MemoAI/img/img3.png b/MemoAI/img/img3.png
new file mode 100644
index 0000000..21fb117
Binary files /dev/null and b/MemoAI/img/img3.png differ
diff --git a/MemoAI/img/img4.png b/MemoAI/img/img4.png
new file mode 100644
index 0000000..2c217ce
Binary files /dev/null and b/MemoAI/img/img4.png differ
diff --git a/MemoAI/qwen2-0.5b/app.py b/MemoAI/qwen2-0.5b/app.py
new file mode 100644
index 0000000..e3727db
--- /dev/null
+++ b/MemoAI/qwen2-0.5b/app.py
@@ -0,0 +1,186 @@
+import os
+import copy
+import random
+import threading
+import subprocess
+import gradio as gr
+from typing import List, Optional, Tuple, Dict
+
+
+os.system("pip uninstall -y tensorflow tensorflow-estimator tensorflow-io-gcs-filesystem")
+os.environ["LANG"] = "C"
+os.environ["LC_ALL"] = "C"
+
+default_system = '你是一个微信聊天机器人'
+
+from dashinfer.helper import EngineHelper, ConfigManager
+
+log_lock = threading.Lock()
+
+config_file = "di_config.json"
+config = ConfigManager.get_config_from_json(config_file)
+
+def download_model(model_id, revision, source="modelscope"):
+ print(f"Downloading model {model_id} (revision: {revision}) from {source}")
+ if source == "modelscope":
+ from modelscope import snapshot_download
+ model_dir = snapshot_download(model_id, revision=revision)
+ elif source == "huggingface":
+ from huggingface_hub import snapshot_download
+ model_dir = snapshot_download(repo_id=model_id)
+ else:
+ raise ValueError("Unknown source")
+
+ print(f"Save model to path {model_dir}")
+
+ return model_dir
+
+cmd = f"pip show dashinfer | grep 'Location' | cut -d ' ' -f 2"
+package_location = subprocess.run(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ text=True)
+package_location = package_location.stdout.strip()
+os.environ["AS_DAEMON_PATH"] = package_location + "/dashinfer/allspark/bin"
+os.environ["AS_NUMA_NUM"] = str(len(config["device_ids"]))
+os.environ["AS_NUMA_OFFSET"] = str(config["device_ids"][0])
+
+## download original model
+## download model from modelscope
+original_model = {
+ "source": "modelscope",
+ "model_id": config["model_space"] + config["model_name"],
+ "revision": "master",
+ "model_path": ""
+}
+original_model["model_path"] = download_model(original_model["model_id"],
+ original_model["revision"],
+ original_model["source"])
+
+engine_helper = EngineHelper(config)
+engine_helper.verbose = True
+engine_helper.init_tokenizer(original_model["model_path"])
+
+## convert huggingface model to dashinfer model
+## only one conversion is required
+engine_helper.convert_model(original_model["model_path"])
+
+engine_helper.init_engine()
+engine_max_batch = engine_helper.engine_config["engine_max_batch"]
+
+###################################################
+
+History = List[Tuple[str, str]]
+Messages = List[Dict[str, str]]
+
+
+class Role:
+ USER = 'user'
+ SYSTEM = 'system'
+ BOT = 'bot'
+ ASSISTANT = 'assistant'
+ ATTACHMENT = 'attachment'
+
+
+def clear_session() -> History:
+ return '', []
+
+
+def modify_system_session(system: str) -> str:
+ if system is None or len(system) == 0:
+ system = default_system
+ return system, system, []
+
+
+def history_to_messages(history: History, system: str) -> Messages:
+ messages = [{'role': Role.SYSTEM, 'content': system}]
+ for h in history:
+ messages.append({'role': Role.USER, 'content': h[0]})
+ messages.append({'role': Role.ASSISTANT, 'content': h[1]})
+ return messages
+
+
+def messages_to_history(messages: Messages) -> Tuple[str, History]:
+ assert messages[0]['role'] == Role.SYSTEM
+ system = messages[0]['content']
+ history = []
+ for q, r in zip(messages[1::2], messages[2::2]):
+ history.append([q['content'], r['content']])
+ return system, history
+
+
+def message_to_prompt(messages: Messages) -> str:
+ prompt = ""
+ for item in messages:
+ im_start, im_end = "<|im_start|>", "<|im_end|>"
+ prompt += f"\n{im_start}{item['role']}\n{item['content']}{im_end}"
+ prompt += f"\n{im_start}assistant\n"
+ return prompt
+
+
+def model_chat(query: Optional[str], history: Optional[History],
+ system: str) -> Tuple[str, str, History]:
+ if query is None:
+ query = ''
+ if history is None:
+ history = []
+
+ messages = history_to_messages(history, system)
+ messages.append({'role': Role.USER, 'content': query})
+ prompt = message_to_prompt(messages)
+
+ gen_cfg = copy.deepcopy(engine_helper.default_gen_cfg)
+ gen_cfg["max_length"] = 1024
+ gen_cfg["seed"] = random.randint(0, 10000)
+
+ request_list = engine_helper.create_request([prompt], [gen_cfg])
+
+ request = request_list[0]
+ gen = engine_helper.process_one_request_stream(request)
+ for response in gen:
+ role = Role.ASSISTANT
+ system, history = messages_to_history(messages + [{'role': role, 'content': response}])
+ yield '', history, system
+
+ json_str = engine_helper.convert_request_to_jsonstr(request)
+ log_lock.acquire()
+ try:
+ print(f"{json_str}\n")
+ finally:
+ log_lock.release()
+
+###################################################
+
+with gr.Blocks() as demo:
+ demo_title = "
微信的你"
+ gr.Markdown(demo_title)
+ with gr.Row():
+ with gr.Column(scale=3):
+ system_input = gr.Textbox(value=default_system,
+ lines=1,
+ label='System')
+ with gr.Column(scale=1):
+ modify_system = gr.Button("🛠️ Set system prompt and clear history.", scale=2)
+ system_state = gr.Textbox(value=default_system, visible=False)
+ chatbot = gr.Chatbot(label=config["model_name"])
+ textbox = gr.Textbox(lines=2, label='Input')
+
+ with gr.Row():
+ clear_history = gr.Button("🧹清除历史记录")
+ sumbit = gr.Button("🚀和我聊天!")
+
+ sumbit.click(model_chat,
+ inputs=[textbox, chatbot, system_state],
+ outputs=[textbox, chatbot, system_input],
+ concurrency_limit=engine_max_batch)
+ clear_history.click(fn=clear_session,
+ inputs=[],
+ outputs=[textbox, chatbot],
+ concurrency_limit=engine_max_batch)
+ modify_system.click(fn=modify_system_session,
+ inputs=[system_input],
+ outputs=[system_state, system_input, chatbot],
+ concurrency_limit=engine_max_batch)
+
+demo.queue(api_open=False).launch(height=800, share=False, server_name="127.0.0.1", server_port=7860)
diff --git a/MemoAI/qwen2-0.5b/di_config.json b/MemoAI/qwen2-0.5b/di_config.json
new file mode 100644
index 0000000..b6d5b37
--- /dev/null
+++ b/MemoAI/qwen2-0.5b/di_config.json
@@ -0,0 +1,52 @@
+{
+ "model_space": "YOUR-NAME-SPACE",
+ "model_name": "YOUR-MODEL-NAME",
+ "model_type": "Qwen_v20",
+ "model_path": "./dashinfer_models/",
+ "data_type": "float32",
+ "device_type": "CPU",
+ "device_ids": [
+ 0
+ ],
+ "multinode_mode": false,
+ "engine_config": {
+ "engine_max_length": 1024,
+ "engine_max_batch": 2,
+ "do_profiling": false,
+ "num_threads": 0,
+ "matmul_precision": "medium"
+ },
+ "generation_config": {
+ "temperature": 0.7,
+ "early_stopping": true,
+ "top_k": 20,
+ "top_p": 0.8,
+ "repetition_penalty": 1.05,
+ "presence_penalty": 0.0,
+ "min_length": 0,
+ "max_length": 8192,
+ "no_repeat_ngram_size": 0,
+ "eos_token_id": 151643,
+ "seed": 1234,
+ "stop_words_ids": [
+ [
+ 151643
+ ],
+ [
+ 151644
+ ],
+ [
+ 151645
+ ]
+ ]
+ },
+ "convert_config": {
+ "do_dynamic_quantize_convert": false
+ },
+ "quantization_config": {
+ "activation_type": "bfloat16",
+ "weight_type": "uint8",
+ "SubChannel": true,
+ "GroupSize": 512
+ }
+}
\ No newline at end of file
diff --git a/MemoAI/qwen2-0.5b/requirements.txt b/MemoAI/qwen2-0.5b/requirements.txt
new file mode 100644
index 0000000..37d1d6e
--- /dev/null
+++ b/MemoAI/qwen2-0.5b/requirements.txt
@@ -0,0 +1 @@
+dashinfer
diff --git a/MemoAI/qwen2-0.5b/train.ipynb b/MemoAI/qwen2-0.5b/train.ipynb
new file mode 100644
index 0000000..bc832d1
--- /dev/null
+++ b/MemoAI/qwen2-0.5b/train.ipynb
@@ -0,0 +1,419 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "de53995b-32ed-4722-8cac-ba104c8efacb",
+ "metadata": {},
+ "source": [
+ "# 导入环境"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "52fac949-4150-4091-b0c3-2968ab5e385c",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "from datasets import Dataset\n",
+ "import pandas as pd\n",
+ "from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer, GenerationConfig"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e098d9eb",
+ "metadata": {
+ "ExecutionIndicator": {
+ "show": true
+ },
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "df = pd.read_json('train.json')\n",
+ "ds = Dataset.from_pandas(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8ac92d42-efae-49b1-a00e-ccaa75b98938",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "ds[:3]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "380d9f69-9e98-4d2d-b044-1d608a057b0b",
+ "metadata": {},
+ "source": [
+ "# 下载模型"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "312d6439-1932-44a3-b592-9adbdb7ab702",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "from modelscope import snapshot_download\n",
+ "model_dir = snapshot_download('qwen/Qwen2-0.5B-Instruct', cache_dir='qwen2-0.5b/')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "51d05e5d-d14e-4f03-92be-9a9677d41918",
+ "metadata": {},
+ "source": [
+ "# 处理数据集"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "74ee5a67-2e55-4974-b90e-cbf492de500a",
+ "metadata": {
+ "ExecutionIndicator": {
+ "show": true
+ },
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "tokenizer = AutoTokenizer.from_pretrained('./qwen2-0.5b/qwen/Qwen2-0___5B-Instruct/', use_fast=False, trust_remote_code=True)\n",
+ "tokenizer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2503a5fa-9621-4495-9035-8e7ef6525691",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "def process_func(example):\n",
+ " MAX_LENGTH = 384 # Llama分词器会将一个中文字切分为多个token,因此需要放开一些最大长度,保证数据的完整性\n",
+ " input_ids, attention_mask, labels = [], [], []\n",
+ " instruction = tokenizer(f\"<|im_start|>system\\n现在你需要扮演我,和我的微信好友快乐聊天!<|im_end|>\\n<|im_start|>user\\n{example['instruction'] + example['input']}<|im_end|>\\n<|im_start|>assistant\\n\", add_special_tokens=False)\n",
+ " response = tokenizer(f\"{example['output']}\", add_special_tokens=False)\n",
+ " input_ids = instruction[\"input_ids\"] + response[\"input_ids\"] + [tokenizer.pad_token_id]\n",
+ " attention_mask = instruction[\"attention_mask\"] + response[\"attention_mask\"] + [1] # 因为eos token咱们也是要关注的所以 补充为1\n",
+ " labels = [-100] * len(instruction[\"input_ids\"]) + response[\"input_ids\"] + [tokenizer.pad_token_id] \n",
+ " if len(input_ids) > MAX_LENGTH: # 做一个截断\n",
+ " input_ids = input_ids[:MAX_LENGTH]\n",
+ " attention_mask = attention_mask[:MAX_LENGTH]\n",
+ " labels = labels[:MAX_LENGTH]\n",
+ " return {\n",
+ " \"input_ids\": input_ids,\n",
+ " \"attention_mask\": attention_mask,\n",
+ " \"labels\": labels\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "84f870d6-73a9-4b0f-8abf-687b32224ad8",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "tokenized_id = ds.map(process_func, remove_columns=ds.column_names)\n",
+ "tokenized_id"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1f7e15a0-4d9a-4935-9861-00cc472654b1",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "tokenizer.decode(tokenized_id[0]['input_ids'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "97f16f66-324a-454f-8cc3-ef23b100ecff",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "tokenizer.decode(list(filter(lambda x: x != -100, tokenized_id[1][\"labels\"])))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "424823a8-ed0d-4309-83c8-3f6b1cdf274c",
+ "metadata": {},
+ "source": [
+ "# 创建模型"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "170764e5-d899-4ef4-8c53-36f6dec0d198",
+ "metadata": {
+ "ExecutionIndicator": {
+ "show": true
+ },
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "import torch\n",
+ "\n",
+ "model = AutoModelForCausalLM.from_pretrained('./qwen2-0.5b/qwen/Qwen2-0___5B-Instruct', device_map=\"auto\",torch_dtype=torch.bfloat16)\n",
+ "model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2323eac7-37d5-4288-8bc5-79fac7113402",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "model.enable_input_require_grads()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f808b05c-f2cb-48cf-a80d-0c42be6051c7",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "model.dtype"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "13d71257-3c1c-4303-8ff8-af161ebc2cf1",
+ "metadata": {},
+ "source": [
+ "# lora "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2d304ae2-ab60-4080-a80d-19cac2e3ade3",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "from peft import LoraConfig, TaskType, get_peft_model\n",
+ "\n",
+ "config = LoraConfig(\n",
+ " task_type=TaskType.CAUSAL_LM, \n",
+ " target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
+ " inference_mode=False, # 训练模式\n",
+ " r=8, # Lora 秩\n",
+ " lora_alpha=32, # Lora alaph,具体作用参见 Lora 原理\n",
+ " lora_dropout=0.1# Dropout 比例\n",
+ ")\n",
+ "config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2c2489c5-eaab-4e1f-b06a-c3f914b4bf8e",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "model = get_peft_model(model, config)\n",
+ "config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ebf5482b-fab9-4eb3-ad88-c116def4be12",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "model.print_trainable_parameters()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ca055683-837f-4865-9c57-9164ba60c00f",
+ "metadata": {},
+ "source": [
+ "# 配置训练参数"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7e76bbff-15fd-4995-a61d-8364dc5e9ea0",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "args = TrainingArguments(\n",
+ " output_dir=\"./output/\",\n",
+ " per_device_train_batch_size=4,\n",
+ " gradient_accumulation_steps=4,\n",
+ " logging_steps=10,\n",
+ " num_train_epochs=3,\n",
+ " learning_rate=1e-4,\n",
+ " gradient_checkpointing=True\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f142cb9c-ad99-48e6-ba86-6df198f9ed96",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "trainer = Trainer(\n",
+ " model=model,\n",
+ " args=args,\n",
+ " train_dataset=tokenized_id,\n",
+ " data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "aec9bc36-b297-45af-99e1-d4c4d82be081",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "trainer.train()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8abb2327-458e-4e96-ac98-2141b5b97c8e",
+ "metadata": {},
+ "source": [
+ "# 合并加载模型,这里的路径可能有点不太一样,lora_path填写为Output的最后的checkpoint"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bd2a415a-a9ad-49ea-877f-243558a83bfc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
+ "import torch\n",
+ "from peft import PeftModel\n",
+ "\n",
+ "mode_path = './qwen2-0.5b/qwen/Qwen2-0___5B-Instruct'\n",
+ "lora_path = './output/checkpoint-10' #修改这里\n",
+ "# 加载tokenizer\n",
+ "tokenizer = AutoTokenizer.from_pretrained(mode_path, trust_remote_code=True)\n",
+ "\n",
+ "# 加载模型\n",
+ "model = AutoModelForCausalLM.from_pretrained(mode_path, device_map=\"auto\",torch_dtype=torch.bfloat16, trust_remote_code=True).eval()\n",
+ "\n",
+ "# 加载lora权重\n",
+ "model = PeftModel.from_pretrained(model, model_id=lora_path)\n",
+ "\n",
+ "prompt = \"在干啥呢?\"\n",
+ "inputs = tokenizer.apply_chat_template([{\"role\": \"user\", \"content\": \"现在你需要扮演我,和我的微信好友快乐聊天!\"},{\"role\": \"user\", \"content\": prompt}],\n",
+ " add_generation_prompt=True,\n",
+ " tokenize=True,\n",
+ " return_tensors=\"pt\",\n",
+ " return_dict=True\n",
+ " ).to('cuda')\n",
+ "\n",
+ "\n",
+ "gen_kwargs = {\"max_length\": 2500, \"do_sample\": True, \"top_k\": 1}\n",
+ "with torch.no_grad():\n",
+ " outputs = model.generate(**inputs, **gen_kwargs)\n",
+ " outputs = outputs[:, inputs['input_ids'].shape[1]:]\n",
+ " print(tokenizer.decode(outputs[0], skip_special_tokens=True))\n",
+ "\n",
+ "# 保存合并后的模型和tokenizer\n",
+ "save_directory = './model_merge'\n",
+ "\n",
+ "# 保存模型\n",
+ "\n",
+ "model.save_pretrained(save_directory)\n",
+ "\n",
+ "# 保存tokenizer\n",
+ "tokenizer.save_pretrained(save_directory)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b67e5e0a-2566-4483-9bce-92b5be8b4b34",
+ "metadata": {},
+ "source": [
+ "# 然后把模型上传到modelscope开始下一步"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dafe4f24-af5c-407e-abbc-eefd9d44cb15",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.14"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/MemoAI/qwen2-0.5b/train.md b/MemoAI/qwen2-0.5b/train.md
new file mode 100644
index 0000000..e891f8e
--- /dev/null
+++ b/MemoAI/qwen2-0.5b/train.md
@@ -0,0 +1,199 @@
+# Qwen2-0.B-Instruct 微信AI 微调
+
+这个教程给大家提供一个 [nodebook](./train.ipynb) 文件,来让大家更好的学习。
+
+## 模型下载
+
+使用 modelscope 中的 snapshot_download 函数下载模型,第一个参数为模型名称,参数 cache_dir 为模型的下载路径。
+
+
+```python
+import torch
+from modelscope import snapshot_download, AutoModel, AutoTokenizer
+import os
+model_dir = snapshot_download('qwen/Qwen2-7B-Instruct', cache_dir='/root/autodl-tmp', revision='master')
+```
+
+## 环境配置
+
+在完成基本环境配置和本地模型部署的情况下,你还需要安装一些第三方库,可以使用以下命令:
+
+```bash
+python -m pip install --upgrade pip
+# 更换 pypi 源加速库的安装
+pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
+
+pip install modelscope==1.9.5
+pip install "transformers>=4.39.0"
+pip install streamlit==1.24.0
+pip install sentencepiece==0.1.99
+pip install accelerate==0.27
+pip install transformers_stream_generator==0.0.4
+pip install datasets==2.18.0
+pip install peft==0.10.0
+
+```
+
+LLM 的微调一般指指令微调过程。所谓指令微调,是说我们使用的微调数据形如:
+
+```json
+{
+ "instruction":"以下是你的好友在和你聊天,你需要和他聊天",
+ "input":"吃了吗?",
+ "output":"还在食堂"
+}
+```
+
+其中,`instruction` 是用户指令,告知模型其需要完成的任务;`input` 是用户输入,是完成用户指令所必须的输入内容;`output` 是模型应该给出的输出。
+
+
+
+
+## 数据格式化
+
+`Lora` 训练的数据是需要经过格式化、编码之后再输入给模型进行训练的,如果是熟悉 `Pytorch` 模型训练流程的同学会知道,我们一般需要将输入文本编码为 input_ids,将输出文本编码为 `labels`,编码之后的结果都是多维的向量。我们首先定义一个预处理函数,这个函数用于对每一个样本,编码其输入、输出文本并返回一个编码后的字典:
+
+```python
+def process_func(example):
+ MAX_LENGTH = 384 # Llama分词器会将一个中文字切分为多个token,因此需要放开一些最大长度,保证数据的完整性
+ input_ids, attention_mask, labels = [], [], []
+ instruction = tokenizer(f"<|im_start|>system\n现在你要扮演皇帝身边的女人--甄嬛<|im_end|>\n<|im_start|>user\n{example['instruction'] + example['input']}<|im_end|>\n<|im_start|>assistant\n", add_special_tokens=False) # add_special_tokens 不在开头加 special_tokens
+ response = tokenizer(f"{example['output']}", add_special_tokens=False)
+ input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id]
+ attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1] # 因为eos token咱们也是要关注的所以 补充为1
+ labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.pad_token_id]
+ if len(input_ids) > MAX_LENGTH: # 做一个截断
+ input_ids = input_ids[:MAX_LENGTH]
+ attention_mask = attention_mask[:MAX_LENGTH]
+ labels = labels[:MAX_LENGTH]
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "labels": labels
+ }
+```
+
+`Qwen2` 采用的`Prompt Template`格式如下:
+
+```text
+<|im_start|>system
+You are a helpful assistant.<|im_end|>
+<|im_start|>user
+你是谁?<|im_end|>
+<|im_start|>assistant
+我是一个有用的助手。<|im_end|>
+```
+
+## 加载tokenizer和半精度模型
+
+模型以半精度形式加载,如果你的显卡比较新的话,可以用`torch.bfolat`形式加载。对于自定义的模型一定要指定`trust_remote_code`参数为`True`。
+
+```python
+tokenizer = AutoTokenizer.from_pretrained('./qwen2-0.5b/qwen/Qwen2-0___5B-Instruct/', use_fast=False, trust_remote_code=True)
+
+model = AutoModelForCausalLM.from_pretrained('./qwen2-0.5b/qwen/Qwen2-0___5B-Instruct/', device_map="auto",torch_dtype=torch.bfloat16)
+```
+
+## 定义LoraConfig
+
+`LoraConfig`这个类中可以设置很多参数,但主要的参数没多少,简单讲一讲,感兴趣的同学可以直接看源码。
+
+- `task_type`:模型类型
+- `target_modules`:需要训练的模型层的名字,主要就是`attention`部分的层,不同的模型对应的层的名字不同,可以传入数组,也可以字符串,也可以正则表达式。
+- `r`:`lora`的秩,具体可以看`Lora`原理
+- `lora_alpha`:`Lora alaph`,具体作用参见 `Lora` 原理
+
+`Lora`的缩放是啥嘞?当然不是`r`(秩),这个缩放就是`lora_alpha/r`, 在这个`LoraConfig`中缩放就是4倍。
+
+```python
+config = LoraConfig(
+ task_type=TaskType.CAUSAL_LM,
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
+ inference_mode=False, # 训练模式
+ r=8, # Lora 秩
+ lora_alpha=32, # Lora alaph,具体作用参见 Lora 原理
+ lora_dropout=0.1# Dropout 比例
+)
+```
+
+## 自定义 TrainingArguments 参数
+
+`TrainingArguments`这个类的源码也介绍了每个参数的具体作用,当然大家可以来自行探索,这里就简单说几个常用的。
+
+- `output_dir`:模型的输出路径
+- `per_device_train_batch_size`:顾名思义 `batch_size`
+- `gradient_accumulation_steps`: 梯度累加,如果你的显存比较小,那可以把 `batch_size` 设置小一点,梯度累加增大一些。
+- `logging_steps`:多少步,输出一次`log`
+- `num_train_epochs`:顾名思义 `epoch`
+- `gradient_checkpointing`:梯度检查,这个一旦开启,模型就必须执行`model.enable_input_require_grads()`,这个原理大家可以自行探索,这里就不细说了。
+
+```python
+args = TrainingArguments(
+ output_dir="./output",
+ per_device_train_batch_size=4,
+ gradient_accumulation_steps=4,
+ logging_steps=10,
+ num_train_epochs=3,
+ save_steps=100,
+ learning_rate=1e-4,
+ save_on_each_node=True,
+ gradient_checkpointing=True
+)
+```
+
+## 使用 Trainer 训练
+
+```python
+trainer = Trainer(
+ model=model,
+ args=args,
+ train_dataset=tokenized_id,
+ data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
+)
+trainer.train()
+```
+
+## 加载 lora 权重推理
+
+训练好了之后可以使用如下方式加载`lora`权重进行推理:
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+import torch
+from peft import PeftModel
+
+mode_path = './qwen2-0.5b/qwen/Qwen2-0___5B-Instruct/'
+lora_path = 'lora_path'
+
+# 加载tokenizer
+tokenizer = AutoTokenizer.from_pretrained(mode_path)
+
+# 加载模型
+model = AutoModelForCausalLM.from_pretrained(mode_path, device_map="auto",torch_dtype=torch.bfloat16)
+
+# 加载lora权重
+model = PeftModel.from_pretrained(model, model_id=lora_path, config=config)
+
+prompt = "你是谁?"
+messages = [
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": prompt}
+]
+
+text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
+
+model_inputs = tokenizer([text], return_tensors="pt").to('cuda')
+
+generated_ids = model.generate(
+ model_inputs.input_ids,
+ max_new_tokens=512
+)
+generated_ids = [
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
+]
+
+response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
+
+print(response)
+```
+
diff --git a/doc/ai-qwen/readme.md b/doc/ai-qwen/readme.md
new file mode 100644
index 0000000..ae990a1
--- /dev/null
+++ b/doc/ai-qwen/readme.md
@@ -0,0 +1,21 @@
+**鉴于** 本仓库原来的训练模型 `Chatllm3-6b` 在低性能机器上部署比较困难,我在原基础上使用微型模型 `Qwen2-0.5b-Instruct` 模型完成模型训练到部署到免费
+Modelspace 创空间中,比较简单,`并且可做到全程免费` 下面是流程:
+***
+# 第一步,[创建 Modelspace 免费 GPU](https://www.modelscope.cn/my/mynotebook/preset)
+
+
+# 开始训练
+**可以参照训练[模板](/MemoAI/qwen2-0.5b/train.md)**
+
+把 `train.json` 上传,一步步点击即可
+最后把模型上传到 `Modelspace`
+
+
+# 部署到创空间
+编辑 `di_config.json` 的一下两个字段
+`model_space: YOUR-NAME-SPACE`
+`model_name: YOUR-MODEL-NAME`
+
+**然后把一下MemoAI/qwen2-0.5b的三个文件:`di_config.json`,`app.py`,`requirements.txt`上传到创空间,点击部署!**
+
+**最后看看成品吧:**[成品](https://www.modelscope.cn/studios/sanbei101/qwen-haoran/summary)
\ No newline at end of file