使用Prompt Tuning方法微调 **Usage** from peft import PeftModel, PeftConfig peft_model_id = "Laurie/bloomz-560m_PROMPT_TUNING_CAUSAL_LM" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) # Grab a tweet and tokenize it: inputs = tokenizer( f'{text_column} : {"@nationalgridus I have no water and the bill is current and paid. Can you do something about this?"} Label : ', return_tensors="pt") # Put the model on a GPU and generate the predicted label: model.to(device) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) [ "Tweet text : @nationalgridus I have no water and the bill is current and paid. Can you do something about this? Label : complaint" ]