Update README.md
Browse files
README.md
CHANGED
@@ -17,7 +17,7 @@ The following `bitsandbytes` quantization config was used during training:
|
|
17 |
- PEFT 0.4.0
|
18 |
|
19 |
## Inference Code
|
20 |
-
```
|
21 |
from peft import PeftModel, PeftConfig
|
22 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
23 |
import torch
|
|
|
17 |
- PEFT 0.4.0
|
18 |
|
19 |
## Inference Code
|
20 |
+
```shell
|
21 |
from peft import PeftModel, PeftConfig
|
22 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
23 |
import torch
|