### Note: DO NOT use quantized model or quantization_bit when merging lora adapters | |
### model | |
model_name_or_path: EleutherAI/llemma_7b | |
adapter_name_or_path: /u/jhu11/projects/saves/llemma-7b-3epoch-r32-e5-RQ1/lora/sft | |
template: llama3 | |
trust_remote_code: true | |
resize_vocab: true | |
### export | |
export_dir: /u/jhu11/hdd/output/llemma_7b_3epoch_r32_e5_RQ1 | |
export_size: 5 | |
export_device: gpu | |
export_legacy_format: false | |