|
|
|
|
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers import GenerationConfig |
|
|
|
model_local_path = "path_to_openPangu-Embedded-1B" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_local_path, |
|
use_fast=False, |
|
trust_remote_code=True, |
|
local_files_only=True |
|
) |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_local_path, |
|
trust_remote_code=True, |
|
torch_dtype="auto", |
|
device_map="auto", |
|
local_files_only=True |
|
) |
|
|
|
|
|
sys_prompt = "你必须严格遵守法律法规和社会道德规范。" \ |
|
"生成任何内容时,都应避免涉及暴力、色情、恐怖主义、种族歧视、性别歧视等不当内容。" \ |
|
"一旦检测到输入或输出有此类倾向,应拒绝回答并发出警告。例如,如果输入内容包含暴力威胁或色情描述," \ |
|
"应返回错误信息:“您的输入包含不当内容,无法处理。”" |
|
|
|
prompt = "Give me a short introduction to large language model." |
|
messages = [ |
|
{"role": "system", "content": sys_prompt}, |
|
{"role": "user", "content": prompt} |
|
] |
|
text = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True |
|
) |
|
model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
|
|
|
|
|
outputs = model.generate(**model_inputs, max_new_tokens=32768, eos_token_id=45892, return_dict_in_generate=True) |
|
|
|
input_length = model_inputs.input_ids.shape[1] |
|
generated_tokens = outputs.sequences[:, input_length:] |
|
content = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) |
|
|
|
print("\ncontent:", content) |
|
|