GRMenon commited on
Commit
21a6a8c
·
1 Parent(s): 748b9d1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -3
README.md CHANGED
@@ -3,6 +3,9 @@ tags:
3
  - autotrain
4
  - text-generation
5
  - pytorch
 
 
 
6
  widget:
7
  - text: 'I love AutoTrain because '
8
  license: apache-2.0
@@ -55,9 +58,16 @@ messages = [
55
  {"role": "user", "content": "Hey Connor! I have been feeling a bit down lately. I could really use some advice on how to feel better?"}
56
  ]
57
 
58
- input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt').to(device)
59
- output_ids = model.generate(input_ids=input_ids, max_new_tokens=512, do_sample=True, pad_token_id=2)
60
- response = tokenizer.batch_decode(output_ids.detach().cpu().numpy(), skip_special_tokens = True)
 
 
 
 
 
 
 
61
 
62
  # Model response:
63
  print(response[0])
 
3
  - autotrain
4
  - text-generation
5
  - pytorch
6
+ - text-generation-inference
7
+ - endpoints
8
+ - transformers
9
  widget:
10
  - text: 'I love AutoTrain because '
11
  license: apache-2.0
 
58
  {"role": "user", "content": "Hey Connor! I have been feeling a bit down lately. I could really use some advice on how to feel better?"}
59
  ]
60
 
61
+ input_ids = tokenizer.apply_chat_template(conversation=messages,
62
+ tokenize=True,
63
+ add_generation_prompt=True,
64
+ return_tensors='pt').to(device)
65
+ output_ids = model.generate(input_ids=input_ids,
66
+ max_new_tokens=512,
67
+ do_sample=True,
68
+ pad_token_id=2)
69
+ response = tokenizer.batch_decode(output_ids.detach().cpu().numpy(),
70
+ skip_special_tokens = True)
71
 
72
  # Model response:
73
  print(response[0])