Laurie commited on
Commit
42bea49
·
1 Parent(s): 8c54b62

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +29 -0
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 使用Prompt Tuning方法微调
2
+
3
+ **Usage**
4
+
5
+ from peft import PeftModel, PeftConfig
6
+
7
+ peft_model_id = "Laurie/bloomz-560m_PROMPT_TUNING_CAUSAL_LM"
8
+
9
+ config = PeftConfig.from_pretrained(peft_model_id)
10
+ model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
11
+ model = PeftModel.from_pretrained(model, peft_model_id)
12
+
13
+ # Grab a tweet and tokenize it:
14
+ inputs = tokenizer(
15
+ f'{text_column} : {"@nationalgridus I have no water and the bill is current and paid. Can you do something about this?"} Label : ',
16
+ return_tensors="pt")
17
+
18
+ # Put the model on a GPU and generate the predicted label:
19
+ model.to(device)
20
+
21
+ with torch.no_grad():
22
+ inputs = {k: v.to(device) for k, v in inputs.items()}
23
+ outputs = model.generate(
24
+ input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3
25
+ )
26
+ print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
27
+ [
28
+ "Tweet text : @nationalgridus I have no water and the bill is current and paid. Can you do something about this? Label : complaint"
29
+ ]