Update README.md
Browse files
README.md
CHANGED
|
@@ -65,7 +65,7 @@ from sentence_transformers import SentenceTransformer
|
|
| 65 |
|
| 66 |
sentences = ["This is an example sentence", "Each sentence is converted"]
|
| 67 |
|
| 68 |
-
model = SentenceTransformer("{MODEL_NAME_OR_PATH}", trust_remote_code=True, model_kwargs={"torch_dtype": torch.bfloat16, "attn_implementation": "flash_attention_2"})
|
| 69 |
model.max_seq_length = 512
|
| 70 |
|
| 71 |
embeddings = model.encode(
|
|
@@ -87,7 +87,7 @@ from sentence_transformers import SentenceTransformer
|
|
| 87 |
|
| 88 |
sentences = ["This is an example sentence", "Each sentence is converted"]
|
| 89 |
|
| 90 |
-
model = SentenceTransformer("{MODEL_NAME_OR_PATH}", trust_remote_code=True, model_kwargs={"torch_dtype": torch.bfloat16, "attn_implementation": "flash_attention_2"})
|
| 91 |
model.max_seq_length = 512
|
| 92 |
|
| 93 |
prompt = "Instruct: Classifying the category of french news. \n Query: "
|
|
|
|
| 65 |
|
| 66 |
sentences = ["This is an example sentence", "Each sentence is converted"]
|
| 67 |
|
| 68 |
+
model = SentenceTransformer("{MODEL_NAME_OR_PATH}", trust_remote_code=True, truncate_dim=None, model_kwargs={"torch_dtype": torch.bfloat16, "attn_implementation": "flash_attention_2"})
|
| 69 |
model.max_seq_length = 512
|
| 70 |
|
| 71 |
embeddings = model.encode(
|
|
|
|
| 87 |
|
| 88 |
sentences = ["This is an example sentence", "Each sentence is converted"]
|
| 89 |
|
| 90 |
+
model = SentenceTransformer("{MODEL_NAME_OR_PATH}", trust_remote_code=True, truncate_dim=None, model_kwargs={"torch_dtype": torch.bfloat16, "attn_implementation": "flash_attention_2"})
|
| 91 |
model.max_seq_length = 512
|
| 92 |
|
| 93 |
prompt = "Instruct: Classifying the category of french news. \n Query: "
|