Files changed (1) hide show
  1. README.md +117 -106
README.md CHANGED
@@ -1,107 +1,118 @@
1
- ---
2
- license: mit
3
- language:
4
- - en
5
- - zh
6
- base_model:
7
- - Qwen/Qwen2.5-7B
8
- ---
9
-
10
- # 对数据任务类型分类,比如"情感分析"、"文本分类"、"翻译","总结"、"数学问答"....
11
-
12
-
13
-
14
- import os
15
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
16
- import json
17
- import torch
18
- from transformers import AutoModelForCausalLM, AutoTokenizer
19
- from tqdm import tqdm
20
- from loguru import logger
21
-
22
- model_name = "Laurie/Qwen2.5-7b-data-classification"
23
-
24
- # 加载模型和 tokenizer,同时调整 padding_side 为 left(适用于 decoder-only 模型)
25
- model = AutoModelForCausalLM.from_pretrained(
26
- model_name,
27
- torch_dtype="auto",
28
- device_map="auto",
29
- # attn_implementation="flash_attention_2"
30
- )
31
- tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left") # batch 推理时要左填充
32
-
33
- # 对话模板
34
- system_message = [{"role": "system", "content": "你是一个数据分类专家,请根据对话内容判断其所属的类别。"}]
35
- last_query = [{"role": "user", "content": "现在请输出你的判断结果:"}]
36
-
37
- def prepare_text(messages: list[dict]) -> str:
38
- """
39
- 将 messages 中的 "from"/"value" 键转为 "role"/"content",并构造完整对话文本
40
- """
41
- messages = [{"role": item["from"], "content": item["value"]} for item in messages]
42
- messages = system_message + messages + last_query
43
- text = tokenizer.apply_chat_template(
44
- messages,
45
- tokenize=False,
46
- add_generation_prompt=True
47
- )
48
- return text
49
-
50
- def generate_task_types_batch(messages_batch: list[list[dict]]) -> list[str]:
51
- """
52
- 对一个 batch 的对话列表进行推理生成,并返回每个对话中 assistant 的回答部分
53
- """
54
- # 将每个消息列表转换为完整文本
55
- texts = [prepare_text(messages) for messages in messages_batch]
56
-
57
- # 使用批量编码,并进行 padding 以适应批量输入
58
- model_inputs = tokenizer(
59
- texts,
60
- return_tensors="pt",
61
- padding=True,
62
- truncation=True
63
- ).to(model.device)
64
-
65
- with torch.no_grad():
66
- generated_ids = model.generate(
67
- **model_inputs,
68
- max_new_tokens=32,
69
- eos_token_id=[151643, 151645],
70
- pad_token_id=151643,
71
- do_sample=True,
72
- repetition_penalty=1.05,
73
- temperature=0.7,
74
- top_p=0.8,
75
- top_k=20
76
- )
77
-
78
- generated_ids = [
79
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
80
- ]
81
-
82
- task_types = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
83
- return task_types
84
-
85
- def process_json(json_path: str, save_path: str, batch_size: int = 8):
86
- """
87
- 读取 JSON 文件,对数据进行批量推理处理,
88
- 并将结果写回保存。
89
- """
90
- with open(json_path, "r", encoding="utf-8") as f:
91
- data = json.load(f)
92
-
93
- # 分批处理,batch_size 可根据 GPU 显存情况进行调整
94
- for i in tqdm(range(0, len(data_slice), batch_size)):
95
- batch = data_slice[i : i + batch_size]
96
- conversations_batch = [item["conversations"] for item in batch]
97
- task_types = generate_task_types_batch(conversations_batch)
98
- for item, answer in zip(batch, task_types):
99
- item["task_type"] = answer
100
-
101
- with open(save_path, "w", encoding="utf-8") as f:
102
- json.dump(data_slice, f, ensure_ascii=False, indent=4)
103
- logger.info(f"已处理 {len(data_slice)} 条数据,保存到 {save_path}")
104
- if __name__ == "__main__":
105
- json_path = "./qwen_bench_300k.json"
106
- save_path = "./qwen_bench_300k_cls.json"
 
 
 
 
 
 
 
 
 
 
 
107
  process_json(json_path, save_path, batch_size=16)
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - zho
5
+ - eng
6
+ - fra
7
+ - spa
8
+ - por
9
+ - deu
10
+ - ita
11
+ - rus
12
+ - jpn
13
+ - kor
14
+ - vie
15
+ - tha
16
+ - ara
17
+ base_model:
18
+ - Qwen/Qwen2.5-7B
19
+ ---
20
+
21
+ # 对数据任务类型分类,比如"情感分析"、"文本分类"、"翻译","总结"、"数学问答"....
22
+
23
+
24
+
25
+ import os
26
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
27
+ import json
28
+ import torch
29
+ from transformers import AutoModelForCausalLM, AutoTokenizer
30
+ from tqdm import tqdm
31
+ from loguru import logger
32
+
33
+ model_name = "Laurie/Qwen2.5-7b-data-classification"
34
+
35
+ # 加载模型和 tokenizer,同时调整 padding_side left(适用于 decoder-only 模型)
36
+ model = AutoModelForCausalLM.from_pretrained(
37
+ model_name,
38
+ torch_dtype="auto",
39
+ device_map="auto",
40
+ # attn_implementation="flash_attention_2"
41
+ )
42
+ tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left") # batch 推理时要左填充
43
+
44
+ # 对话模板
45
+ system_message = [{"role": "system", "content": "你是一个数据分类专家,请根据对话内容判断其所属的类别。"}]
46
+ last_query = [{"role": "user", "content": "现在请输出你的判断结果:"}]
47
+
48
+ def prepare_text(messages: list[dict]) -> str:
49
+ """
50
+ messages 中的 "from"/"value" 键转为 "role"/"content",并构造完整对话文本
51
+ """
52
+ messages = [{"role": item["from"], "content": item["value"]} for item in messages]
53
+ messages = system_message + messages + last_query
54
+ text = tokenizer.apply_chat_template(
55
+ messages,
56
+ tokenize=False,
57
+ add_generation_prompt=True
58
+ )
59
+ return text
60
+
61
+ def generate_task_types_batch(messages_batch: list[list[dict]]) -> list[str]:
62
+ """
63
+ 对一个 batch 的对话列表进行推理生成,并返回每个对话中 assistant 的回答部分
64
+ """
65
+ # 将每个消息列表转换为完整文本
66
+ texts = [prepare_text(messages) for messages in messages_batch]
67
+
68
+ # 使用批量编码,并进行 padding 以适应批量输入
69
+ model_inputs = tokenizer(
70
+ texts,
71
+ return_tensors="pt",
72
+ padding=True,
73
+ truncation=True
74
+ ).to(model.device)
75
+
76
+ with torch.no_grad():
77
+ generated_ids = model.generate(
78
+ **model_inputs,
79
+ max_new_tokens=32,
80
+ eos_token_id=[151643, 151645],
81
+ pad_token_id=151643,
82
+ do_sample=True,
83
+ repetition_penalty=1.05,
84
+ temperature=0.7,
85
+ top_p=0.8,
86
+ top_k=20
87
+ )
88
+
89
+ generated_ids = [
90
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
91
+ ]
92
+
93
+ task_types = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
94
+ return task_types
95
+
96
+ def process_json(json_path: str, save_path: str, batch_size: int = 8):
97
+ """
98
+ 读取 JSON 文件,对数据进行批量推理处理,
99
+ 并将结果写回保存。
100
+ """
101
+ with open(json_path, "r", encoding="utf-8") as f:
102
+ data = json.load(f)
103
+
104
+ # 分批处理,batch_size 可根据 GPU 显存情况进行调整
105
+ for i in tqdm(range(0, len(data_slice), batch_size)):
106
+ batch = data_slice[i : i + batch_size]
107
+ conversations_batch = [item["conversations"] for item in batch]
108
+ task_types = generate_task_types_batch(conversations_batch)
109
+ for item, answer in zip(batch, task_types):
110
+ item["task_type"] = answer
111
+
112
+ with open(save_path, "w", encoding="utf-8") as f:
113
+ json.dump(data_slice, f, ensure_ascii=False, indent=4)
114
+ logger.info(f"已处理 {len(data_slice)} 条数据,保存到 {save_path}")
115
+ if __name__ == "__main__":
116
+ json_path = "./qwen_bench_300k.json"
117
+ save_path = "./qwen_bench_300k_cls.json"
118
  process_json(json_path, save_path, batch_size=16)