Update tokenizer_config.json
Browse files- tokenizer_config.json +2 -2
tokenizer_config.json
CHANGED
@@ -9009,13 +9009,13 @@
|
|
9009 |
"bos_token": "<s>",
|
9010 |
"clean_up_tokenization_spaces": false,
|
9011 |
"eos_token": "<|im_end|>",
|
9012 |
-
"chat_template": "{
|
9013 |
"extra_special_tokens": {},
|
9014 |
"legacy": true,
|
9015 |
"model_max_length": 32768,
|
9016 |
"pad_token": "<pad>",
|
9017 |
"padding_side": "left",
|
9018 |
-
"tokenizer_class": "
|
9019 |
"unk_token": "<unk>",
|
9020 |
"use_default_system_prompt": false
|
9021 |
}
|
|
|
9009 |
"bos_token": "<s>",
|
9010 |
"clean_up_tokenization_spaces": false,
|
9011 |
"eos_token": "<|im_end|>",
|
9012 |
+
"chat_template": "{% for message in messages %}{{ '<|im_start|>' + message['role'] | trim + '\n' + message['content'] | trim + '<|im_end|>' }}{% if not loop.last %}{{ '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n' + '<|im_start|>assistant\n' }}{% endif %}",
|
9013 |
"extra_special_tokens": {},
|
9014 |
"legacy": true,
|
9015 |
"model_max_length": 32768,
|
9016 |
"pad_token": "<pad>",
|
9017 |
"padding_side": "left",
|
9018 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
9019 |
"unk_token": "<unk>",
|
9020 |
"use_default_system_prompt": false
|
9021 |
}
|