id
stringlengths
6
113
author
stringlengths
2
36
task_category
stringclasses
42 values
tags
listlengths
1
4.05k
created_time
timestamp[ns, tz=UTC]date
2022-03-02 23:29:04
2025-04-10 08:38:38
last_modified
stringdate
2020-05-14 13:13:12
2025-04-19 04:15:39
downloads
int64
0
118M
likes
int64
0
4.86k
README
stringlengths
30
1.01M
matched_bigbio_names
listlengths
1
8
is_bionlp
stringclasses
3 values
model_cards
stringlengths
0
1M
metadata
stringlengths
2
698k
source
stringclasses
2 values
matched_task
listlengths
1
10
__index_level_0__
int64
0
46.9k
yhwng/finetuning-sentiment-model-3000-samples
yhwng
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-11T02:43:24Z
2023-11-11T02:48:28+00:00
6
0
--- base_model: distilbert-base-uncased datasets: - imdb license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: finetuning-sentiment-model-3000-samples results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.87 name: Accuracy - type: f1 value: 0.8721311475409836 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3272 - Accuracy: 0.87 - F1: 0.8721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3272 - Accuracy: 0.87 - F1: 0.8721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
{"base_model": "distilbert-base-uncased", "datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "finetuning-sentiment-model-3000-samples", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.87, "name": "Accuracy"}, {"type": "f1", "value": 0.8721311475409836, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,237
cstr/Spaetzle-v69-7b
cstr
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "conversational", "de", "en", "base_model:DRXD1000/Phoenix-7B", "base_model:merge:DRXD1000/Phoenix-7B", "base_model:DiscoResearch/DiscoLM_German_7b_v1", "base_model:merge:DiscoResearch/DiscoLM_German_7b_v1", "base_model:LeoLM/leo-mistral-hessianai-7b", "base_model:merge:LeoLM/leo-mistral-hessianai-7b", "base_model:OpenPipe/mistral-ft-optimized-1227", "base_model:merge:OpenPipe/mistral-ft-optimized-1227", "base_model:PetroGPT/WestSeverus-7B-DPO-v2", "base_model:merge:PetroGPT/WestSeverus-7B-DPO-v2", "base_model:ResplendentAI/Flora_DPO_7B", "base_model:merge:ResplendentAI/Flora_DPO_7B", "base_model:VAGOsolutions/SauerkrautLM-7b-v1-mistral", "base_model:merge:VAGOsolutions/SauerkrautLM-7b-v1-mistral", "base_model:abideen/AlphaMonarch-dora", "base_model:merge:abideen/AlphaMonarch-dora", "base_model:cognitivecomputations/openchat-3.5-0106-laser", "base_model:merge:cognitivecomputations/openchat-3.5-0106-laser", "base_model:flemmingmiguel/NeuDist-Ro-7B", "base_model:merge:flemmingmiguel/NeuDist-Ro-7B", "base_model:malteos/hermeo-7b", "base_model:merge:malteos/hermeo-7b", "base_model:mayflowergmbh/Wiedervereinigung-7b-dpo", "base_model:merge:mayflowergmbh/Wiedervereinigung-7b-dpo", "base_model:occiglot/occiglot-7b-de-en-instruct", "base_model:merge:occiglot/occiglot-7b-de-en-instruct", "base_model:yleo/EmertonMonarch-7B", "base_model:merge:yleo/EmertonMonarch-7B", "license:cc-by-nc-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-17T03:40:34Z
2024-04-19T14:51:43+00:00
22
1
--- base_model: - abideen/AlphaMonarch-dora - mayflowergmbh/Wiedervereinigung-7b-dpo - flemmingmiguel/NeuDist-Ro-7B - ResplendentAI/Flora_DPO_7B - yleo/EmertonMonarch-7B - occiglot/occiglot-7b-de-en-instruct - OpenPipe/mistral-ft-optimized-1227 - DiscoResearch/DiscoLM_German_7b_v1 - LeoLM/leo-mistral-hessianai-7b - DRXD1000/Phoenix - VAGOsolutions/SauerkrautLM-7b-v1-mistral - malteos/hermeo-7b - FelixChao/WestSeverus-7B-DPO-v2 - cognitivecomputations/openchat-3.5-0106-laser language: - de - en license: cc-by-nc-4.0 tags: - merge - mergekit - lazymergekit --- # Spaetzle-v69-7b This is a progressive (mostly dare-ties, but also slerp) merge with the intention of a suitable compromise for English and German local tasks. There is also a 4q_k_m quantized [GGUF](https://huggingface.co/cstr/Spaetzle-v69-7b-GGUF). It should work sufficiently well with ChatML prompt template (for all merged models should have seen ChatML prompts at least in DPO stage). ## Evaluation Benchmark scores are not the possible optimum, as the model attempts a compromise with a number of parameters, like German language performance, instruction following, reasoning capabilities, robustness (so far, i did not encounter inserted tokens, e.g.), model licensing, and other criteria. Nevertheless, they are not too bad: It achieves (running quantized) in - German EQ Bench: Score (v2_de): 62.59 (Parseable: 171.0). - English EQ Bench: Score (v2): 76.43 (Parseable: 171.0). [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard): Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_cstr__Spaetzle-v69-7b) | Metric |Value| |---------------------------------|----:| |Avg. |72.87| |AI2 Reasoning Challenge (25-Shot)|69.54| |HellaSwag (10-Shot) |86.77| |MMLU (5-Shot) |64.63| |TruthfulQA (0-shot) |65.61| |Winogrande (5-shot) |81.93| |GSM8k (5-shot) |68.76| Nous benchmark results: | Model |AGIEval|GPT4All|TruthfulQA|Bigbench|Average| |--------------------------------------------------------------|------:|------:|---------:|-------:|------:| |[Spaetzle-v69-7b](https://huggingface.co/cstr/Spaetzle-v69-7b)| 44.48| 75.84| 66.15| 46.59| 58.27| ### AGIEval | Task |Version| Metric |Value| |Stderr| |------------------------------|------:|--------|----:|---|-----:| |agieval_aqua_rat | 0|acc |25.98|± | 2.76| | | |acc_norm|23.62|± | 2.67| |agieval_logiqa_en | 0|acc |39.78|± | 1.92| | | |acc_norm|39.48|± | 1.92| |agieval_lsat_ar | 0|acc |23.48|± | 2.80| | | |acc_norm|23.91|± | 2.82| |agieval_lsat_lr | 0|acc |50.00|± | 2.22| | | |acc_norm|51.76|± | 2.21| |agieval_lsat_rc | 0|acc |63.94|± | 2.93| | | |acc_norm|64.31|± | 2.93| |agieval_sat_en | 0|acc |76.70|± | 2.95| | | |acc_norm|77.67|± | 2.91| |agieval_sat_en_without_passage| 0|acc |46.12|± | 3.48| | | |acc_norm|44.17|± | 3.47| |agieval_sat_math | 0|acc |34.09|± | 3.20| | | |acc_norm|30.91|± | 3.12| Average: 44.48% ### GPT4All | Task |Version| Metric |Value| |Stderr| |-------------|------:|--------|----:|---|-----:| |arc_challenge| 0|acc |63.23|± | 1.41| | | |acc_norm|64.16|± | 1.40| |arc_easy | 0|acc |85.90|± | 0.71| | | |acc_norm|82.49|± | 0.78| |boolq | 1|acc |87.80|± | 0.57| |hellaswag | 0|acc |67.05|± | 0.47| | | |acc_norm|85.19|± | 0.35| |openbookqa | 0|acc |38.40|± | 2.18| | | |acc_norm|48.40|± | 2.24| |piqa | 0|acc |82.75|± | 0.88| | | |acc_norm|84.28|± | 0.85| |winogrande | 0|acc |78.53|± | 1.15| Average: 75.84% ### TruthfulQA | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |50.67|± | 1.75| | | |mc2 |66.15|± | 1.48| Average: 66.15% ### Bigbench | Task |Version| Metric |Value| |Stderr| |------------------------------------------------|------:|---------------------|----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|56.84|± | 3.60| |bigbench_date_understanding | 0|multiple_choice_grade|66.67|± | 2.46| |bigbench_disambiguation_qa | 0|multiple_choice_grade|40.70|± | 3.06| |bigbench_geometric_shapes | 0|multiple_choice_grade|24.79|± | 2.28| | | |exact_str_match |10.58|± | 1.63| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|31.00|± | 2.07| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|23.00|± | 1.59| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|58.00|± | 2.85| |bigbench_movie_recommendation | 0|multiple_choice_grade|45.80|± | 2.23| |bigbench_navigate | 0|multiple_choice_grade|52.10|± | 1.58| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|69.55|± | 1.03| |bigbench_ruin_names | 0|multiple_choice_grade|48.88|± | 2.36| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|30.96|± | 1.46| |bigbench_snarks | 0|multiple_choice_grade|73.48|± | 3.29| |bigbench_sports_understanding | 0|multiple_choice_grade|74.14|± | 1.40| |bigbench_temporal_sequences | 0|multiple_choice_grade|42.70|± | 1.56| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|23.60|± | 1.20| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|18.40|± | 0.93| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|58.00|± | 2.85| Average: 46.59% Average score: 58.27% ## 🧩 Merge Configuration Spaetzle-v69-7b is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [abideen/AlphaMonarch-dora](https://huggingface.co/abideen/AlphaMonarch-dora) * [cstr/Spaetzle-v68-7b](https://huggingface.co/cstr/Spaetzle-v68-7b) The merge tree in total involves the following original models: - [abideen/AlphaMonarch-dora](https://huggingface.co/abideen/AlphaMonarch-dora) - [mayflowergmbh/Wiedervereinigung-7b-dpo](https://huggingface.co/mayflowergmbh/Wiedervereinigung-7b-dpo) - [flemmingmiguel/NeuDist-Ro-7B](https://huggingface.co/flemmingmiguel/NeuDist-Ro-7B) - [ResplendentAI/Flora_DPO_7B](https://huggingface.co/ResplendentAI/Flora_DPO_7B) - [yleo/EmertonMonarch-7B](https://huggingface.co/yleo/EmertonMonarch-7B) - [occiglot/occiglot-7b-de-en-instruct](https://huggingface.co/occiglot/occiglot-7b-de-en-instruct) - [OpenPipe/mistral-ft-optimized-1227](https://huggingface.co/OpenPipe/mistral-ft-optimized-1227) - [DiscoResearch/DiscoLM_German_7b_v1](https://huggingface.co/DiscoResearch/DiscoLM_German_7b_v1) - [LeoLM/leo-mistral-hessianai-7b](https://huggingface.co/LeoLM/leo-mistral-hessianai-7b) - [DRXD1000/Phoenix](https://huggingface.co/DRXD1000/Phoenix) - [VAGOsolutions/SauerkrautLM-7b-v1-mistral](https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-v1-mistral) - [malteos/hermeo-7b](https://huggingface.co/malteos/hermeo-7b) - [FelixChao/WestSeverus-7B-DPO-v2](https://huggingface.co/FelixChao/WestSeverus-7B-DPO-v2) - [cognitivecomputations/openchat-3.5-0106-laser](https://huggingface.co/cognitivecomputations/openchat-3.5-0106-laser) For this last merge: ```yaml models: - model: cstr/Spaetzle-v68-7b # no parameters necessary for base model - model: abideen/AlphaMonarch-dora parameters: density: 0.60 weight: 0.30 merge_method: dare_ties base_model: cstr/Spaetzle-v68-7b parameters: int8_mask: true dtype: bfloat16 random_seed: 0 tokenizer_source: base ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "cstr/Spaetzle-v69-7b" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
null
Non_BioNLP
# Spaetzle-v69-7b This is a progressive (mostly dare-ties, but also slerp) merge with the intention of a suitable compromise for English and German local tasks. There is also a 4q_k_m quantized [GGUF](https://huggingface.co/cstr/Spaetzle-v69-7b-GGUF). It should work sufficiently well with ChatML prompt template (for all merged models should have seen ChatML prompts at least in DPO stage). ## Evaluation Benchmark scores are not the possible optimum, as the model attempts a compromise with a number of parameters, like German language performance, instruction following, reasoning capabilities, robustness (so far, i did not encounter inserted tokens, e.g.), model licensing, and other criteria. Nevertheless, they are not too bad: It achieves (running quantized) in - German EQ Bench: Score (v2_de): 62.59 (Parseable: 171.0). - English EQ Bench: Score (v2): 76.43 (Parseable: 171.0). [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard): Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_cstr__Spaetzle-v69-7b) | Metric |Value| |---------------------------------|----:| |Avg. |72.87| |AI2 Reasoning Challenge (25-Shot)|69.54| |HellaSwag (10-Shot) |86.77| |MMLU (5-Shot) |64.63| |TruthfulQA (0-shot) |65.61| |Winogrande (5-shot) |81.93| |GSM8k (5-shot) |68.76| Nous benchmark results: | Model |AGIEval|GPT4All|TruthfulQA|Bigbench|Average| |--------------------------------------------------------------|------:|------:|---------:|-------:|------:| |[Spaetzle-v69-7b](https://huggingface.co/cstr/Spaetzle-v69-7b)| 44.48| 75.84| 66.15| 46.59| 58.27| ### AGIEval | Task |Version| Metric |Value| |Stderr| |------------------------------|------:|--------|----:|---|-----:| |agieval_aqua_rat | 0|acc |25.98|± | 2.76| | | |acc_norm|23.62|± | 2.67| |agieval_logiqa_en | 0|acc |39.78|± | 1.92| | | |acc_norm|39.48|± | 1.92| |agieval_lsat_ar | 0|acc |23.48|± | 2.80| | | |acc_norm|23.91|± | 2.82| |agieval_lsat_lr | 0|acc |50.00|± | 2.22| | | |acc_norm|51.76|± | 2.21| |agieval_lsat_rc | 0|acc |63.94|± | 2.93| | | |acc_norm|64.31|± | 2.93| |agieval_sat_en | 0|acc |76.70|± | 2.95| | | |acc_norm|77.67|± | 2.91| |agieval_sat_en_without_passage| 0|acc |46.12|± | 3.48| | | |acc_norm|44.17|± | 3.47| |agieval_sat_math | 0|acc |34.09|± | 3.20| | | |acc_norm|30.91|± | 3.12| Average: 44.48% ### GPT4All | Task |Version| Metric |Value| |Stderr| |-------------|------:|--------|----:|---|-----:| |arc_challenge| 0|acc |63.23|± | 1.41| | | |acc_norm|64.16|± | 1.40| |arc_easy | 0|acc |85.90|± | 0.71| | | |acc_norm|82.49|± | 0.78| |boolq | 1|acc |87.80|± | 0.57| |hellaswag | 0|acc |67.05|± | 0.47| | | |acc_norm|85.19|± | 0.35| |openbookqa | 0|acc |38.40|± | 2.18| | | |acc_norm|48.40|± | 2.24| |piqa | 0|acc |82.75|± | 0.88| | | |acc_norm|84.28|± | 0.85| |winogrande | 0|acc |78.53|± | 1.15| Average: 75.84% ### TruthfulQA | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |50.67|± | 1.75| | | |mc2 |66.15|± | 1.48| Average: 66.15% ### Bigbench | Task |Version| Metric |Value| |Stderr| |------------------------------------------------|------:|---------------------|----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|56.84|± | 3.60| |bigbench_date_understanding | 0|multiple_choice_grade|66.67|± | 2.46| |bigbench_disambiguation_qa | 0|multiple_choice_grade|40.70|± | 3.06| |bigbench_geometric_shapes | 0|multiple_choice_grade|24.79|± | 2.28| | | |exact_str_match |10.58|± | 1.63| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|31.00|± | 2.07| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|23.00|± | 1.59| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|58.00|± | 2.85| |bigbench_movie_recommendation | 0|multiple_choice_grade|45.80|± | 2.23| |bigbench_navigate | 0|multiple_choice_grade|52.10|± | 1.58| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|69.55|± | 1.03| |bigbench_ruin_names | 0|multiple_choice_grade|48.88|± | 2.36| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|30.96|± | 1.46| |bigbench_snarks | 0|multiple_choice_grade|73.48|± | 3.29| |bigbench_sports_understanding | 0|multiple_choice_grade|74.14|± | 1.40| |bigbench_temporal_sequences | 0|multiple_choice_grade|42.70|± | 1.56| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|23.60|± | 1.20| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|18.40|± | 0.93| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|58.00|± | 2.85| Average: 46.59% Average score: 58.27% ## 🧩 Merge Configuration Spaetzle-v69-7b is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [abideen/AlphaMonarch-dora](https://huggingface.co/abideen/AlphaMonarch-dora) * [cstr/Spaetzle-v68-7b](https://huggingface.co/cstr/Spaetzle-v68-7b) The merge tree in total involves the following original models: - [abideen/AlphaMonarch-dora](https://huggingface.co/abideen/AlphaMonarch-dora) - [mayflowergmbh/Wiedervereinigung-7b-dpo](https://huggingface.co/mayflowergmbh/Wiedervereinigung-7b-dpo) - [flemmingmiguel/NeuDist-Ro-7B](https://huggingface.co/flemmingmiguel/NeuDist-Ro-7B) - [ResplendentAI/Flora_DPO_7B](https://huggingface.co/ResplendentAI/Flora_DPO_7B) - [yleo/EmertonMonarch-7B](https://huggingface.co/yleo/EmertonMonarch-7B) - [occiglot/occiglot-7b-de-en-instruct](https://huggingface.co/occiglot/occiglot-7b-de-en-instruct) - [OpenPipe/mistral-ft-optimized-1227](https://huggingface.co/OpenPipe/mistral-ft-optimized-1227) - [DiscoResearch/DiscoLM_German_7b_v1](https://huggingface.co/DiscoResearch/DiscoLM_German_7b_v1) - [LeoLM/leo-mistral-hessianai-7b](https://huggingface.co/LeoLM/leo-mistral-hessianai-7b) - [DRXD1000/Phoenix](https://huggingface.co/DRXD1000/Phoenix) - [VAGOsolutions/SauerkrautLM-7b-v1-mistral](https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-v1-mistral) - [malteos/hermeo-7b](https://huggingface.co/malteos/hermeo-7b) - [FelixChao/WestSeverus-7B-DPO-v2](https://huggingface.co/FelixChao/WestSeverus-7B-DPO-v2) - [cognitivecomputations/openchat-3.5-0106-laser](https://huggingface.co/cognitivecomputations/openchat-3.5-0106-laser) For this last merge: ```yaml models: - model: cstr/Spaetzle-v68-7b # no parameters necessary for base model - model: abideen/AlphaMonarch-dora parameters: density: 0.60 weight: 0.30 merge_method: dare_ties base_model: cstr/Spaetzle-v68-7b parameters: int8_mask: true dtype: bfloat16 random_seed: 0 tokenizer_source: base ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "cstr/Spaetzle-v69-7b" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
{"base_model": ["abideen/AlphaMonarch-dora", "mayflowergmbh/Wiedervereinigung-7b-dpo", "flemmingmiguel/NeuDist-Ro-7B", "ResplendentAI/Flora_DPO_7B", "yleo/EmertonMonarch-7B", "occiglot/occiglot-7b-de-en-instruct", "OpenPipe/mistral-ft-optimized-1227", "DiscoResearch/DiscoLM_German_7b_v1", "LeoLM/leo-mistral-hessianai-7b", "DRXD1000/Phoenix", "VAGOsolutions/SauerkrautLM-7b-v1-mistral", "malteos/hermeo-7b", "FelixChao/WestSeverus-7B-DPO-v2", "cognitivecomputations/openchat-3.5-0106-laser"], "language": ["de", "en"], "license": "cc-by-nc-4.0", "tags": ["merge", "mergekit", "lazymergekit"]}
task
[ "TRANSLATION" ]
40,238
avneet/distilbert-base-uncased-finetuned-cola
avneet
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2021-07-30T00:15:09+00:00
15
0
--- datasets: - glue license: apache-2.0 metrics: - matthews_correlation tags: - generated_from_trainer model_index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola metric: name: Matthews Correlation type: matthews_correlation value: 0.42176824452830747 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4981 - Matthews Correlation: 0.4218 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5248 | 1.0 | 535 | 0.4981 | 0.4218 | ### Framework versions - Transformers 4.9.1 - Pytorch 1.9.0+cu102 - Datasets 1.10.2 - Tokenizers 0.10.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4981 - Matthews Correlation: 0.4218 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5248 | 1.0 | 535 | 0.4981 | 0.4218 | ### Framework versions - Transformers 4.9.1 - Pytorch 1.9.0+cu102 - Datasets 1.10.2 - Tokenizers 0.10.3
{"datasets": ["glue"], "license": "apache-2.0", "metrics": ["matthews_correlation"], "tags": ["generated_from_trainer"], "model_index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"name": "Text Classification", "type": "text-classification"}, "dataset": {"name": "glue", "type": "glue", "args": "cola"}, "metric": {"name": "Matthews Correlation", "type": "matthews_correlation", "value": 0.42176824452830747}}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,239
metoledo/autotrain-x03ts-qcf7e
metoledo
text-classification
[ "tensorboard", "safetensors", "deberta-v2", "autotrain", "text-classification", "base_model:MoritzLaurer/mDeBERTa-v3-base-mnli-xnli", "base_model:finetune:MoritzLaurer/mDeBERTa-v3-base-mnli-xnli", "region:us" ]
2025-01-02T20:12:21Z
2025-01-02T21:36:37+00:00
7
0
--- base_model: MoritzLaurer/mDeBERTa-v3-base-mnli-xnli tags: - autotrain - text-classification widget: - text: I love AutoTrain --- # Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 2.303269863128662 f1_macro: 0.040508170738823165 f1_micro: 0.3367965367965368 f1_weighted: 0.2413478397672539 precision_macro: 0.0434917012251169 precision_micro: 0.3367965367965368 precision_weighted: 0.21644678791353103 recall_macro: 0.05803911531326899 recall_micro: 0.3367965367965368 recall_weighted: 0.3367965367965368 accuracy: 0.3367965367965368
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 2.303269863128662 f1_macro: 0.040508170738823165 f1_micro: 0.3367965367965368 f1_weighted: 0.2413478397672539 precision_macro: 0.0434917012251169 precision_micro: 0.3367965367965368 precision_weighted: 0.21644678791353103 recall_macro: 0.05803911531326899 recall_micro: 0.3367965367965368 recall_weighted: 0.3367965367965368 accuracy: 0.3367965367965368
{"base_model": "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli", "tags": ["autotrain", "text-classification"], "widget": [{"text": "I love AutoTrain"}]}
task
[ "TEXT_CLASSIFICATION" ]
40,240
sujeethreddy567/sathwik_reddy_t5_summary
sujeethreddy567
summarization
[ "transformers", "tensorboard", "safetensors", "mt5", "text2text-generation", "summarization", "generated_from_trainer", "base_model:google/mt5-small", "base_model:finetune:google/mt5-small", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-12-15T05:30:52Z
2024-12-15T06:50:32+00:00
16
0
--- base_model: google/mt5-small library_name: transformers license: apache-2.0 metrics: - rouge tags: - summarization - generated_from_trainer model-index: - name: sathwik_reddy_t5_summary results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sathwik_reddy_t5_summary This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.4258 - Rouge1: 0.3185 - Rouge2: 0.1246 - Rougel: 0.2850 - Rougelsum: 0.2855 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 3.1533 | 1.0 | 440 | 2.5586 | 0.2832 | 0.1122 | 0.2578 | 0.2589 | | 3.0151 | 2.0 | 880 | 2.5020 | 0.2943 | 0.1113 | 0.2626 | 0.2635 | | 2.9157 | 3.0 | 1320 | 2.4919 | 0.3029 | 0.1158 | 0.2710 | 0.2716 | | 2.8355 | 4.0 | 1760 | 2.4670 | 0.3043 | 0.1163 | 0.2749 | 0.2753 | | 2.7702 | 5.0 | 2200 | 2.4386 | 0.3143 | 0.1220 | 0.2826 | 0.2834 | | 2.7169 | 6.0 | 2640 | 2.4322 | 0.3120 | 0.1202 | 0.2805 | 0.2807 | | 2.6884 | 7.0 | 3080 | 2.4255 | 0.3154 | 0.1222 | 0.2834 | 0.2839 | | 2.6671 | 8.0 | 3520 | 2.4258 | 0.3185 | 0.1246 | 0.2850 | 0.2855 | ### Framework versions - Transformers 4.47.0 - Pytorch 2.5.1+cu124 - Datasets 3.2.0 - Tokenizers 0.21.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sathwik_reddy_t5_summary This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.4258 - Rouge1: 0.3185 - Rouge2: 0.1246 - Rougel: 0.2850 - Rougelsum: 0.2855 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 3.1533 | 1.0 | 440 | 2.5586 | 0.2832 | 0.1122 | 0.2578 | 0.2589 | | 3.0151 | 2.0 | 880 | 2.5020 | 0.2943 | 0.1113 | 0.2626 | 0.2635 | | 2.9157 | 3.0 | 1320 | 2.4919 | 0.3029 | 0.1158 | 0.2710 | 0.2716 | | 2.8355 | 4.0 | 1760 | 2.4670 | 0.3043 | 0.1163 | 0.2749 | 0.2753 | | 2.7702 | 5.0 | 2200 | 2.4386 | 0.3143 | 0.1220 | 0.2826 | 0.2834 | | 2.7169 | 6.0 | 2640 | 2.4322 | 0.3120 | 0.1202 | 0.2805 | 0.2807 | | 2.6884 | 7.0 | 3080 | 2.4255 | 0.3154 | 0.1222 | 0.2834 | 0.2839 | | 2.6671 | 8.0 | 3520 | 2.4258 | 0.3185 | 0.1246 | 0.2850 | 0.2855 | ### Framework versions - Transformers 4.47.0 - Pytorch 2.5.1+cu124 - Datasets 3.2.0 - Tokenizers 0.21.0
{"base_model": "google/mt5-small", "library_name": "transformers", "license": "apache-2.0", "metrics": ["rouge"], "tags": ["summarization", "generated_from_trainer"], "model-index": [{"name": "sathwik_reddy_t5_summary", "results": []}]}
task
[ "SUMMARIZATION" ]
40,241
dp011/cluster-ntrials-400
dp011
text-classification
[ "setfit", "safetensors", "bert", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:aspire/acge_text_embedding", "base_model:finetune:aspire/acge_text_embedding", "model-index", "region:us" ]
2024-08-16T22:23:01Z
2024-08-16T22:23:56+00:00
4
0
--- base_model: aspire/acge_text_embedding library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: [] inference: true model-index: - name: SetFit with aspire/acge_text_embedding results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.7089986849520947 name: Accuracy --- # SetFit with aspire/acge_text_embedding This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [aspire/acge_text_embedding](https://huggingface.co/aspire/acge_text_embedding) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [aspire/acge_text_embedding](https://huggingface.co/aspire/acge_text_embedding) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 1024 tokens <!-- - **Number of Classes:** Unknown --> <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.7090 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("dp011/cluster-ntrials-400") # Run inference preds = model("I loved the spiderman movie!") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Framework Versions - Python: 3.10.13 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.1.2 - Datasets: 2.20.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with aspire/acge_text_embedding This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [aspire/acge_text_embedding](https://huggingface.co/aspire/acge_text_embedding) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [aspire/acge_text_embedding](https://huggingface.co/aspire/acge_text_embedding) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 1024 tokens <!-- - **Number of Classes:** Unknown --> <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.7090 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("dp011/cluster-ntrials-400") # Run inference preds = model("I loved the spiderman movie!") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Framework Versions - Python: 3.10.13 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.1.2 - Datasets: 2.20.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "aspire/acge_text_embedding", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [], "inference": true, "model-index": [{"name": "SetFit with aspire/acge_text_embedding", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.7089986849520947, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,242
KnutJaegersberg/Walter-BTLM-3B
KnutJaegersberg
text-generation
[ "transformers", "safetensors", "btlm", "text-generation", "custom_code", "dataset:KnutJaegersberg/Auton", "license:apache-2.0", "autotrain_compatible", "region:us" ]
2023-12-12T19:21:14Z
2023-12-12T23:12:42+00:00
6
0
--- datasets: - KnutJaegersberg/Auton license: apache-2.0 --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/63732ebbbd81fae2b3aaf3fb/b3PQZKV_xZCGL8W5pjCpu.png) Walter is an unaligned, free thinking AI assistant that has been given time to think about things. It's trained on instruction datasets with open source licenses. It covers a lot of tasks, 2/3 of the samples are from large datasets like flan, but also other datasets. It knows a few tricks, shown by the prompting examples below: General Prompt Examples: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: Please answer the following question: Process: - The bat sends out sound waves from their mouth or nose - The sound waves hit an object - Echoes are produced - The echo returns to the bat&#x27;s ears - The bat hears the echo - The bat can figure out where the object is located. suppose No bats live here happens, how will it affect STRONGER echolocation. Which of the following is the supposed perturbation? - directly impacting a step of the process - indirectly impacting a step of the process - not impacting any step of the process Answer: ### Response: indirectly impacting a step of the process ``` ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: You will be given a text in Russian language which contain different emotion labels from the list - ['joy', ' sadness', 'surprise', 'fear', 'anger']. You need to output the incorrect emotion label, which is irrelevant to the input text. Your answer (i) should contain only one emotion label (ii) should be unambiguous. ### Input: То чувство, когда идешь домой после 3-х часовой тренировки и в подъезде пахнет запеченной курицей - зачем вы меня дразните :( ### Response: surprise ``` CoT Prompt Example: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: In this task, you are given a sentence with a missing word that can be an object, a person, and/or an action. Fill in the blank with a plausible word. Although each sentence has many correct answers, you only have to write one answer. PersonX puts ___ on PersonY's car ### Reasoning: Since PersonX is putting water on PersonY's car, it could be assumed that the car is on fire. ### Response: water ``` Summarization Example: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: Below is a text. Write a comprehensive, concise and coherent summary. ### Input: Embodied bilingual language, also known as L2 embodiment, is the idea that people mentally simulate their actions, perceptions, and emotions when speaking and understanding a second language (L2) as with their first language (L1). It is closely related to embodied cognition and embodied language processing, both of which only refer to native language thinking and speaking. An example of embodied bilingual language would be situation in which a L1 English speaker learning Spanish as a second language hears the word rápido ("fast") in Spanish while taking notes and then proceeds to take notes more quickly. ==Overview== Embodied bilingual language refers to the role ...... ### Response: Embodied bilingual language, also known as L2 embodiment, is the idea that people mentally simulate their actions, perceptions, and emotions when speaking and understanding a second language (L2) as with their first language (L1). It is closely related to embodied cognition and embodied language processing, both of which only refer to native language thinking and speaking. An example of embodied bilingual language would be situation in which a L1 English speaker learning Spanish as a second language hears the word rápido ("fast") in Spanish while taking notes and then proceeds to take notes more quickly. ``` Essay Example: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: Write the original essay for the following summary: A sleeveless dress with a round neck is a great day dress and can be worn for both formal and casual occasions. It has a sweetheart neckline and is made from 100% silk. The dress has a chiffon overlay that covers up the miniskirt and still adds to the class of the dress. ### Response: Sleeveless Dress with a Round Neck Essay Casual wear, day dress, more formal dress The garment is a sleeveless dress with a round neck and has a sweetheart neckline. It has sheer detail on the neckline and on the back. These aspects of design make it suitable to be worn for day and evening occasions. Its great detail and chicness make it suitable for more formal events, .... ```
null
Non_BioNLP
![image/png](https://cdn-uploads.huggingface.co/production/uploads/63732ebbbd81fae2b3aaf3fb/b3PQZKV_xZCGL8W5pjCpu.png) Walter is an unaligned, free thinking AI assistant that has been given time to think about things. It's trained on instruction datasets with open source licenses. It covers a lot of tasks, 2/3 of the samples are from large datasets like flan, but also other datasets. It knows a few tricks, shown by the prompting examples below: General Prompt Examples: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: Please answer the following question: Process: - The bat sends out sound waves from their mouth or nose - The sound waves hit an object - Echoes are produced - The echo returns to the bat&#x27;s ears - The bat hears the echo - The bat can figure out where the object is located. suppose No bats live here happens, how will it affect STRONGER echolocation. Which of the following is the supposed perturbation? - directly impacting a step of the process - indirectly impacting a step of the process - not impacting any step of the process Answer: ### Response: indirectly impacting a step of the process ``` ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: You will be given a text in Russian language which contain different emotion labels from the list - ['joy', ' sadness', 'surprise', 'fear', 'anger']. You need to output the incorrect emotion label, which is irrelevant to the input text. Your answer (i) should contain only one emotion label (ii) should be unambiguous. ### Input: То чувство, когда идешь домой после 3-х часовой тренировки и в подъезде пахнет запеченной курицей - зачем вы меня дразните :( ### Response: surprise ``` CoT Prompt Example: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: In this task, you are given a sentence with a missing word that can be an object, a person, and/or an action. Fill in the blank with a plausible word. Although each sentence has many correct answers, you only have to write one answer. PersonX puts ___ on PersonY's car ### Reasoning: Since PersonX is putting water on PersonY's car, it could be assumed that the car is on fire. ### Response: water ``` Summarization Example: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: Below is a text. Write a comprehensive, concise and coherent summary. ### Input: Embodied bilingual language, also known as L2 embodiment, is the idea that people mentally simulate their actions, perceptions, and emotions when speaking and understanding a second language (L2) as with their first language (L1). It is closely related to embodied cognition and embodied language processing, both of which only refer to native language thinking and speaking. An example of embodied bilingual language would be situation in which a L1 English speaker learning Spanish as a second language hears the word rápido ("fast") in Spanish while taking notes and then proceeds to take notes more quickly. ==Overview== Embodied bilingual language refers to the role ...... ### Response: Embodied bilingual language, also known as L2 embodiment, is the idea that people mentally simulate their actions, perceptions, and emotions when speaking and understanding a second language (L2) as with their first language (L1). It is closely related to embodied cognition and embodied language processing, both of which only refer to native language thinking and speaking. An example of embodied bilingual language would be situation in which a L1 English speaker learning Spanish as a second language hears the word rápido ("fast") in Spanish while taking notes and then proceeds to take notes more quickly. ``` Essay Example: ``` ### System: You are an AI assistant. You will be given a task. Your goal is to complete the task as faithfully as you can. Let’s work this out in a step by step way to be sure we have the right answer. Always respond without hesitation. ### Instruction: Write the original essay for the following summary: A sleeveless dress with a round neck is a great day dress and can be worn for both formal and casual occasions. It has a sweetheart neckline and is made from 100% silk. The dress has a chiffon overlay that covers up the miniskirt and still adds to the class of the dress. ### Response: Sleeveless Dress with a Round Neck Essay Casual wear, day dress, more formal dress The garment is a sleeveless dress with a round neck and has a sweetheart neckline. It has sheer detail on the neckline and on the back. These aspects of design make it suitable to be worn for day and evening occasions. Its great detail and chicness make it suitable for more formal events, .... ```
{"datasets": ["KnutJaegersberg/Auton"], "license": "apache-2.0"}
task
[ "SUMMARIZATION" ]
40,243
kr-manish/fine-tune-embedding-bge-base-HrPolicy
kr-manish
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:160", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:2205.13147", "arxiv:1705.00652", "base_model:BAAI/bge-base-en-v1.5", "base_model:finetune:BAAI/bge-base-en-v1.5", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-07-03T15:18:24Z
2024-07-03T15:18:56+00:00
8
1
--- base_model: BAAI/bge-base-en-v1.5 datasets: [] language: [] library_name: sentence-transformers metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:160 - loss:MatryoshkaLoss - loss:MultipleNegativesRankingLoss widget: - source_sentence: Priya Softweb emphasizes the importance of maintaining a clean and organized workspace. The company's HR policies clearly state that employees are responsible for keeping their assigned workspaces clean, orderly, and free from unnecessary items. Spitting tobacco, gum, or other substances in the washrooms is strictly prohibited. The company believes that a clean and organized work environment contributes to a more efficient and professional work experience for everyone. This emphasis on cleanliness reflects the company's commitment to creating a pleasant and hygienic workspace for its employees. sentences: - What is Priya Softweb's policy on the use of mobile phones during work hours? - What steps does Priya Softweb take to ensure that the workspace is clean and organized? - What are the repercussions for employees who violate the Non-Disclosure Agreement at Priya Softweb? - source_sentence: Priya Softweb provides allocated basement parking facilities for employees to park their two-wheelers and four-wheelers. However, parking on the ground floor, around the lawn or main premises, is strictly prohibited as this space is reserved for Directors. Employees should use the parking under wings 5 and 6, while other parking spaces are allocated to different wings. Parking two-wheelers in the car parking zone is not permitted, even if space is available. Two-wheelers should be parked in the designated basement space on the main stand, not on the side stand. Employees are encouraged to park in common spaces on a first-come, first-served basis. The company clarifies that it is not responsible for providing parking and that employees park their vehicles at their own risk. This comprehensive parking policy ensures organized parking arrangements and clarifies the company's liability regarding vehicle safety. sentences: - What is the application process for planned leaves at Priya Softweb? - What are the parking arrangements at Priya Softweb? - What is the process for reporting a security breach at Priya Softweb? - source_sentence: The Diwali bonus at Priya Softweb is a discretionary benefit linked to the company's business performance. Distributed during the festive season of Diwali, it serves as a gesture of appreciation for employees' contributions throughout the year. However, it's important to note that employees currently under the notice period are not eligible for this bonus. This distinction highlights that the bonus is intended to reward ongoing commitment and contribution to the company's success. sentences: - What steps does Priya Softweb take to promote responsible use of company resources? - How does Priya Softweb demonstrate its commitment to Diversity, Equity, and Inclusion (DEI)? - What is the significance of the company's Diwali bonus at Priya Softweb? - source_sentence: Priya Softweb's HR Manual paints a picture of a company that values its employees while upholding a strong sense of professionalism and ethical conduct. The company emphasizes a structured and transparent approach to its HR processes, ensuring clarity and fairness in areas like recruitment, performance appraisals, compensation, leave management, work-from-home arrangements, and incident reporting. The manual highlights the importance of compliance with company policies, promotes diversity and inclusion, and encourages a culture of continuous learning and development. Overall, the message conveyed is one of creating a supportive, respectful, and growth-oriented work environment for all employees. sentences: - What is the overall message conveyed by Priya Softweb's HR Manual? - What is the process for reporting employee misconduct at Priya Softweb? - What is Priya Softweb's policy on salary disbursement and payslips? - source_sentence: No, work-from-home arrangements do not affect an employee's employment terms, compensation, and benefits at Priya Softweb. This clarifies that work-from-home is a flexible work arrangement and does not impact the employee's overall employment status or benefits. sentences: - Do work-from-home arrangements affect compensation and benefits at Priya Softweb? - What is the objective of the Work From Home Policy at Priya Softweb? - What is the procedure for a new employee joining Priya Softweb? model-index: - name: SentenceTransformer based on BAAI/bge-base-en-v1.5 results: - task: type: information-retrieval name: Information Retrieval dataset: name: dim 768 type: dim_768 metrics: - type: cosine_accuracy@1 value: 0.8333333333333334 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 1.0 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 1.0 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 1.0 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.8333333333333334 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.33333333333333326 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.20000000000000004 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.10000000000000002 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.8333333333333334 name: Cosine Recall@1 - type: cosine_recall@3 value: 1.0 name: Cosine Recall@3 - type: cosine_recall@5 value: 1.0 name: Cosine Recall@5 - type: cosine_recall@10 value: 1.0 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.923940541865081 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.898148148148148 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.898148148148148 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 512 type: dim_512 metrics: - type: cosine_accuracy@1 value: 0.8333333333333334 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 1.0 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 1.0 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 1.0 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.8333333333333334 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.33333333333333326 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.20000000000000004 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.10000000000000002 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.8333333333333334 name: Cosine Recall@1 - type: cosine_recall@3 value: 1.0 name: Cosine Recall@3 - type: cosine_recall@5 value: 1.0 name: Cosine Recall@5 - type: cosine_recall@10 value: 1.0 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.923940541865081 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.898148148148148 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.898148148148148 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 256 type: dim_256 metrics: - type: cosine_accuracy@1 value: 0.8333333333333334 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 1.0 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 1.0 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 1.0 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.8333333333333334 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.33333333333333326 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.20000000000000004 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.10000000000000002 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.8333333333333334 name: Cosine Recall@1 - type: cosine_recall@3 value: 1.0 name: Cosine Recall@3 - type: cosine_recall@5 value: 1.0 name: Cosine Recall@5 - type: cosine_recall@10 value: 1.0 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.9312144170634953 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.9074074074074076 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.9074074074074073 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 128 type: dim_128 metrics: - type: cosine_accuracy@1 value: 0.7777777777777778 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 1.0 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 1.0 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 1.0 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.7777777777777778 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.33333333333333326 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.20000000000000004 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.10000000000000002 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.7777777777777778 name: Cosine Recall@1 - type: cosine_recall@3 value: 1.0 name: Cosine Recall@3 - type: cosine_recall@5 value: 1.0 name: Cosine Recall@5 - type: cosine_recall@10 value: 1.0 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.9107105144841319 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.8796296296296297 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.8796296296296295 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 64 type: dim_64 metrics: - type: cosine_accuracy@1 value: 0.6111111111111112 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.9444444444444444 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.9444444444444444 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 1.0 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.6111111111111112 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.31481481481481477 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.1888888888888889 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.10000000000000002 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.6111111111111112 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.9444444444444444 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.9444444444444444 name: Cosine Recall@5 - type: cosine_recall@10 value: 1.0 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.826662566744103 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7685185185185186 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7685185185185185 name: Cosine Map@100 --- # SentenceTransformer based on BAAI/bge-base-en-v1.5 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) <!-- at revision a5beb1e3e68b9ab74eb54cfd186867f64f240e1a --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("kr-manish/fine-tune-embedding-bge-base-HrPolicy") # Run inference sentences = [ "No, work-from-home arrangements do not affect an employee's employment terms, compensation, and benefits at Priya Softweb. This clarifies that work-from-home is a flexible work arrangement and does not impact the employee's overall employment status or benefits.", 'Do work-from-home arrangements affect compensation and benefits at Priya Softweb?', 'What is the objective of the Work From Home Policy at Priya Softweb?', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `dim_768` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8333 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.8333 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.8333 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9239 | | cosine_mrr@10 | 0.8981 | | **cosine_map@100** | **0.8981** | #### Information Retrieval * Dataset: `dim_512` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8333 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.8333 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.8333 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9239 | | cosine_mrr@10 | 0.8981 | | **cosine_map@100** | **0.8981** | #### Information Retrieval * Dataset: `dim_256` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8333 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.8333 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.8333 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9312 | | cosine_mrr@10 | 0.9074 | | **cosine_map@100** | **0.9074** | #### Information Retrieval * Dataset: `dim_128` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.7778 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.7778 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.7778 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9107 | | cosine_mrr@10 | 0.8796 | | **cosine_map@100** | **0.8796** | #### Information Retrieval * Dataset: `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.6111 | | cosine_accuracy@3 | 0.9444 | | cosine_accuracy@5 | 0.9444 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.6111 | | cosine_precision@3 | 0.3148 | | cosine_precision@5 | 0.1889 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.6111 | | cosine_recall@3 | 0.9444 | | cosine_recall@5 | 0.9444 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.8267 | | cosine_mrr@10 | 0.7685 | | **cosine_map@100** | **0.7685** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 160 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 18 tokens</li><li>mean: 93.95 tokens</li><li>max: 381 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 20.32 tokens</li><li>max: 34 tokens</li></ul> | * Samples: | positive | anchor | |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------| | <code>Priya Softweb's HR Manual provides valuable insights into the company's culture and values. Key takeaways include: * **Structure and Transparency:** The company emphasizes a structured and transparent approach to its HR processes. This is evident in its clear policies for recruitment, performance appraisals, compensation, leave management, work-from-home arrangements, and incident reporting. * **Professionalism and Ethics:** Priya Softweb places a high value on professionalism and ethical conduct. Its dress code, guidelines for mobile phone usage, and strict policies against tobacco use within the office all point toward a commitment to maintaining a professional and respectful work environment. * **Employee Well-being:** The company demonstrates a genuine concern for the well-being of its employees. This is reflected in its comprehensive leave policies, flexible work-from-home arrangements, and efforts to promote a healthy and clean workspace. * **Diversity and Inclusion:** Priya Softweb is committed to fostering a diverse and inclusive workplace, where employees from all backgrounds feel valued and respected. Its DEI policy outlines the company's commitment to equal opportunities, diverse hiring practices, and inclusive benefits and policies. * **Continuous Learning and Development:** The company encourages a culture of continuous learning and development, providing opportunities for employees to expand their skillsets and stay current with industry advancements. This is evident in its policies for Ethics & Compliance training and its encouragement of utilizing idle time for self-learning and exploring new technologies. Overall, Priya Softweb's HR Manual reveals a company culture that prioritizes structure, transparency, professionalism, employee well-being, diversity, and a commitment to continuous improvement. The company strives to create a supportive and growth-oriented work environment where employees feel valued and empowered to succeed.</code> | <code>What are the key takeaways from Priya Softweb's HR Manual regarding the company's culture and values?</code> | | <code>Priya Softweb provides allocated basement parking facilities for employees to park their two-wheelers and four-wheelers. However, parking on the ground floor, around the lawn or main premises, is strictly prohibited as this space is reserved for Directors. Employees should use the parking under wings 5 and 6, while other parking spaces are allocated to different wings. Parking two-wheelers in the car parking zone is not permitted, even if space is available. Two-wheelers should be parked in the designated basement space on the main stand, not on the side stand. Employees are encouraged to park in common spaces on a first-come, first-served basis. The company clarifies that it is not responsible for providing parking and that employees park their vehicles at their own risk. This comprehensive parking policy ensures organized parking arrangements and clarifies the company's liability regarding vehicle safety.</code> | <code>What are the parking arrangements at Priya Softweb?</code> | | <code>Investments and declarations must be submitted on or before the 25th of each month through OMS at Priya Softweb.</code> | <code>What is the deadline for submitting investments and declarations at Priya Softweb?</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `gradient_accumulation_steps`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 10 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `load_best_model_at_end`: True - `optim`: adamw_torch_fused #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 16 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 10 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 | |:-------:|:-----:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:| | 0 | 0 | - | 0.5729 | 0.5863 | 0.6595 | 0.5079 | 0.6896 | | 1.0 | 1 | - | 0.6636 | 0.6914 | 0.8213 | 0.6036 | 0.8472 | | 2.0 | 2 | - | 0.7833 | 0.8148 | 0.9352 | 0.7171 | 0.8796 | | 3.0 | 3 | - | 0.8213 | 0.8519 | 0.8981 | 0.7333 | 0.8981 | | 4.0 | 5 | - | 0.8426 | 0.9074 | 0.8981 | 0.75 | 0.8981 | | 5.0 | 6 | - | 0.8426 | 0.9074 | 0.8981 | 0.7685 | 0.8981 | | **6.0** | **7** | **-** | **0.8796** | **0.9074** | **0.8981** | **0.7685** | **0.8981** | | 7.0 | 9 | - | 0.8796 | 0.9074 | 0.8981 | 0.7685 | 0.8981 | | 8.0 | 10 | 0.5275 | 0.8796 | 0.9074 | 0.8981 | 0.7685 | 0.8981 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.1.2+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on BAAI/bge-base-en-v1.5 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) <!-- at revision a5beb1e3e68b9ab74eb54cfd186867f64f240e1a --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("kr-manish/fine-tune-embedding-bge-base-HrPolicy") # Run inference sentences = [ "No, work-from-home arrangements do not affect an employee's employment terms, compensation, and benefits at Priya Softweb. This clarifies that work-from-home is a flexible work arrangement and does not impact the employee's overall employment status or benefits.", 'Do work-from-home arrangements affect compensation and benefits at Priya Softweb?', 'What is the objective of the Work From Home Policy at Priya Softweb?', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `dim_768` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8333 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.8333 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.8333 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9239 | | cosine_mrr@10 | 0.8981 | | **cosine_map@100** | **0.8981** | #### Information Retrieval * Dataset: `dim_512` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8333 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.8333 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.8333 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9239 | | cosine_mrr@10 | 0.8981 | | **cosine_map@100** | **0.8981** | #### Information Retrieval * Dataset: `dim_256` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8333 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.8333 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.8333 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9312 | | cosine_mrr@10 | 0.9074 | | **cosine_map@100** | **0.9074** | #### Information Retrieval * Dataset: `dim_128` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.7778 | | cosine_accuracy@3 | 1.0 | | cosine_accuracy@5 | 1.0 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.7778 | | cosine_precision@3 | 0.3333 | | cosine_precision@5 | 0.2 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.7778 | | cosine_recall@3 | 1.0 | | cosine_recall@5 | 1.0 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.9107 | | cosine_mrr@10 | 0.8796 | | **cosine_map@100** | **0.8796** | #### Information Retrieval * Dataset: `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.6111 | | cosine_accuracy@3 | 0.9444 | | cosine_accuracy@5 | 0.9444 | | cosine_accuracy@10 | 1.0 | | cosine_precision@1 | 0.6111 | | cosine_precision@3 | 0.3148 | | cosine_precision@5 | 0.1889 | | cosine_precision@10 | 0.1 | | cosine_recall@1 | 0.6111 | | cosine_recall@3 | 0.9444 | | cosine_recall@5 | 0.9444 | | cosine_recall@10 | 1.0 | | cosine_ndcg@10 | 0.8267 | | cosine_mrr@10 | 0.7685 | | **cosine_map@100** | **0.7685** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 160 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 18 tokens</li><li>mean: 93.95 tokens</li><li>max: 381 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 20.32 tokens</li><li>max: 34 tokens</li></ul> | * Samples: | positive | anchor | |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------| | <code>Priya Softweb's HR Manual provides valuable insights into the company's culture and values. Key takeaways include: * **Structure and Transparency:** The company emphasizes a structured and transparent approach to its HR processes. This is evident in its clear policies for recruitment, performance appraisals, compensation, leave management, work-from-home arrangements, and incident reporting. * **Professionalism and Ethics:** Priya Softweb places a high value on professionalism and ethical conduct. Its dress code, guidelines for mobile phone usage, and strict policies against tobacco use within the office all point toward a commitment to maintaining a professional and respectful work environment. * **Employee Well-being:** The company demonstrates a genuine concern for the well-being of its employees. This is reflected in its comprehensive leave policies, flexible work-from-home arrangements, and efforts to promote a healthy and clean workspace. * **Diversity and Inclusion:** Priya Softweb is committed to fostering a diverse and inclusive workplace, where employees from all backgrounds feel valued and respected. Its DEI policy outlines the company's commitment to equal opportunities, diverse hiring practices, and inclusive benefits and policies. * **Continuous Learning and Development:** The company encourages a culture of continuous learning and development, providing opportunities for employees to expand their skillsets and stay current with industry advancements. This is evident in its policies for Ethics & Compliance training and its encouragement of utilizing idle time for self-learning and exploring new technologies. Overall, Priya Softweb's HR Manual reveals a company culture that prioritizes structure, transparency, professionalism, employee well-being, diversity, and a commitment to continuous improvement. The company strives to create a supportive and growth-oriented work environment where employees feel valued and empowered to succeed.</code> | <code>What are the key takeaways from Priya Softweb's HR Manual regarding the company's culture and values?</code> | | <code>Priya Softweb provides allocated basement parking facilities for employees to park their two-wheelers and four-wheelers. However, parking on the ground floor, around the lawn or main premises, is strictly prohibited as this space is reserved for Directors. Employees should use the parking under wings 5 and 6, while other parking spaces are allocated to different wings. Parking two-wheelers in the car parking zone is not permitted, even if space is available. Two-wheelers should be parked in the designated basement space on the main stand, not on the side stand. Employees are encouraged to park in common spaces on a first-come, first-served basis. The company clarifies that it is not responsible for providing parking and that employees park their vehicles at their own risk. This comprehensive parking policy ensures organized parking arrangements and clarifies the company's liability regarding vehicle safety.</code> | <code>What are the parking arrangements at Priya Softweb?</code> | | <code>Investments and declarations must be submitted on or before the 25th of each month through OMS at Priya Softweb.</code> | <code>What is the deadline for submitting investments and declarations at Priya Softweb?</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `gradient_accumulation_steps`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 10 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `load_best_model_at_end`: True - `optim`: adamw_torch_fused #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 16 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 10 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 | |:-------:|:-----:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:| | 0 | 0 | - | 0.5729 | 0.5863 | 0.6595 | 0.5079 | 0.6896 | | 1.0 | 1 | - | 0.6636 | 0.6914 | 0.8213 | 0.6036 | 0.8472 | | 2.0 | 2 | - | 0.7833 | 0.8148 | 0.9352 | 0.7171 | 0.8796 | | 3.0 | 3 | - | 0.8213 | 0.8519 | 0.8981 | 0.7333 | 0.8981 | | 4.0 | 5 | - | 0.8426 | 0.9074 | 0.8981 | 0.75 | 0.8981 | | 5.0 | 6 | - | 0.8426 | 0.9074 | 0.8981 | 0.7685 | 0.8981 | | **6.0** | **7** | **-** | **0.8796** | **0.9074** | **0.8981** | **0.7685** | **0.8981** | | 7.0 | 9 | - | 0.8796 | 0.9074 | 0.8981 | 0.7685 | 0.8981 | | 8.0 | 10 | 0.5275 | 0.8796 | 0.9074 | 0.8981 | 0.7685 | 0.8981 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.1.2+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "BAAI/bge-base-en-v1.5", "datasets": [], "language": [], "library_name": "sentence-transformers", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:160", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Priya Softweb emphasizes the importance of maintaining a clean and organized workspace. The company's HR policies clearly state that employees are responsible for keeping their assigned workspaces clean, orderly, and free from unnecessary items. Spitting tobacco, gum, or other substances in the washrooms is strictly prohibited. The company believes that a clean and organized work environment contributes to a more efficient and professional work experience for everyone. This emphasis on cleanliness reflects the company's commitment to creating a pleasant and hygienic workspace for its employees.", "sentences": ["What is Priya Softweb's policy on the use of mobile phones during work hours?", "What steps does Priya Softweb take to ensure that the workspace is clean and organized?", "What are the repercussions for employees who violate the Non-Disclosure Agreement at Priya Softweb?"]}, {"source_sentence": "Priya Softweb provides allocated basement parking facilities for employees to park their two-wheelers and four-wheelers. However, parking on the ground floor, around the lawn or main premises, is strictly prohibited as this space is reserved for Directors. Employees should use the parking under wings 5 and 6, while other parking spaces are allocated to different wings. Parking two-wheelers in the car parking zone is not permitted, even if space is available. Two-wheelers should be parked in the designated basement space on the main stand, not on the side stand. Employees are encouraged to park in common spaces on a first-come, first-served basis. The company clarifies that it is not responsible for providing parking and that employees park their vehicles at their own risk. This comprehensive parking policy ensures organized parking arrangements and clarifies the company's liability regarding vehicle safety.", "sentences": ["What is the application process for planned leaves at Priya Softweb?", "What are the parking arrangements at Priya Softweb?", "What is the process for reporting a security breach at Priya Softweb?"]}, {"source_sentence": "The Diwali bonus at Priya Softweb is a discretionary benefit linked to the company's business performance. Distributed during the festive season of Diwali, it serves as a gesture of appreciation for employees' contributions throughout the year. However, it's important to note that employees currently under the notice period are not eligible for this bonus. This distinction highlights that the bonus is intended to reward ongoing commitment and contribution to the company's success.", "sentences": ["What steps does Priya Softweb take to promote responsible use of company resources?", "How does Priya Softweb demonstrate its commitment to Diversity, Equity, and Inclusion (DEI)?", "What is the significance of the company's Diwali bonus at Priya Softweb?"]}, {"source_sentence": "Priya Softweb's HR Manual paints a picture of a company that values its employees while upholding a strong sense of professionalism and ethical conduct. The company emphasizes a structured and transparent approach to its HR processes, ensuring clarity and fairness in areas like recruitment, performance appraisals, compensation, leave management, work-from-home arrangements, and incident reporting. The manual highlights the importance of compliance with company policies, promotes diversity and inclusion, and encourages a culture of continuous learning and development. Overall, the message conveyed is one of creating a supportive, respectful, and growth-oriented work environment for all employees.", "sentences": ["What is the overall message conveyed by Priya Softweb's HR Manual?", "What is the process for reporting employee misconduct at Priya Softweb?", "What is Priya Softweb's policy on salary disbursement and payslips?"]}, {"source_sentence": "No, work-from-home arrangements do not affect an employee's employment terms, compensation, and benefits at Priya Softweb. This clarifies that work-from-home is a flexible work arrangement and does not impact the employee's overall employment status or benefits.", "sentences": ["Do work-from-home arrangements affect compensation and benefits at Priya Softweb?", "What is the objective of the Work From Home Policy at Priya Softweb?", "What is the procedure for a new employee joining Priya Softweb?"]}], "model-index": [{"name": "SentenceTransformer based on BAAI/bge-base-en-v1.5", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 768", "type": "dim_768"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.8333333333333334, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 1.0, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 1.0, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 1.0, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.8333333333333334, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.33333333333333326, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.20000000000000004, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.10000000000000002, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.8333333333333334, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 1.0, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 1.0, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 1.0, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.923940541865081, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.898148148148148, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.898148148148148, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 512", "type": "dim_512"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.8333333333333334, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 1.0, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 1.0, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 1.0, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.8333333333333334, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.33333333333333326, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.20000000000000004, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.10000000000000002, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.8333333333333334, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 1.0, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 1.0, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 1.0, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.923940541865081, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.898148148148148, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.898148148148148, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 256", "type": "dim_256"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.8333333333333334, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 1.0, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 1.0, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 1.0, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.8333333333333334, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.33333333333333326, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.20000000000000004, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.10000000000000002, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.8333333333333334, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 1.0, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 1.0, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 1.0, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.9312144170634953, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.9074074074074076, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.9074074074074073, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 128", "type": "dim_128"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.7777777777777778, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 1.0, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 1.0, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 1.0, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.7777777777777778, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.33333333333333326, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.20000000000000004, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.10000000000000002, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.7777777777777778, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 1.0, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 1.0, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 1.0, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.9107105144841319, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.8796296296296297, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.8796296296296295, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 64", "type": "dim_64"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.6111111111111112, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.9444444444444444, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.9444444444444444, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 1.0, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.6111111111111112, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.31481481481481477, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.1888888888888889, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.10000000000000002, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.6111111111111112, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.9444444444444444, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.9444444444444444, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 1.0, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.826662566744103, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7685185185185186, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7685185185185185, "name": "Cosine Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,244
SALT-NLP/FLANG-DistilBERT
SALT-NLP
fill-mask
[ "transformers", "pytorch", "distilbert", "fill-mask", "Financial Language Modelling", "en", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-06-24T05:43:00Z
2022-11-29T17:07:13+00:00
32
0
--- language: en tags: - Financial Language Modelling widget: - text: Stocks rallied and the British pound [MASK]. --- ## Dataset Summary - **Homepage:** https://salt-nlp.github.io/FLANG/ - **Models:** https://huggingface.co/SALT-NLP/FLANG-BERT - **Repository:** https://github.com/SALT-NLP/FLANG ## FLANG FLANG is a set of large language models for Financial LANGuage tasks. These models use domain specific pre-training with preferential masking to build more robust representations for the domain. The models in the set are:\ [FLANG-BERT](https://huggingface.co/SALT-NLP/FLANG-BERT)\ [FLANG-SpanBERT](https://huggingface.co/SALT-NLP/FLANG-SpanBERT)\ [FLANG-DistilBERT](https://huggingface.co/SALT-NLP/FLANG-DistilBERT)\ [FLANG-Roberta](https://huggingface.co/SALT-NLP/FLANG-Roberta)\ [FLANG-ELECTRA](https://huggingface.co/SALT-NLP/FLANG-ELECTRA) ## FLANG-DistilBERT FLANG-DistilBERT is a pre-trained language model which uses financial keywords and phrases for preferential masking of domain specific terms. It is built by further training the DistilBERT language model in the finance domain with improved performance over previous models due to the use of domain knowledge and vocabulary. ## FLUE FLUE (Financial Language Understanding Evaluation) is a comprehensive and heterogeneous benchmark that has been built from 5 diverse financial domain specific datasets. Sentiment Classification: [Financial PhraseBank](https://huggingface.co/datasets/financial_phrasebank)\ Sentiment Analysis, Question Answering: [FiQA 2018](https://huggingface.co/datasets/SALT-NLP/FLUE-FiQA)\ New Headlines Classification: [Headlines](https://www.kaggle.com/datasets/daittan/gold-commodity-news-and-dimensions)\ Named Entity Recognition: [NER](https://paperswithcode.com/dataset/fin)\ Structure Boundary Detection: [FinSBD3](https://sites.google.com/nlg.csie.ntu.edu.tw/finweb2021/shared-task-finsbd-3) ## Citation Please cite the work with the following citation: ```bibtex @INPROCEEDINGS{shah-etal-2022-flang, author = {Shah, Raj Sanjay and Chawla, Kunal and Eidnani, Dheeraj and Shah, Agam and Du, Wendi and Chava, Sudheer and Raman, Natraj and Smiley, Charese and Chen, Jiaao and Yang, Diyi }, title = {When FLUE Meets FLANG: Benchmarks and Large Pretrained Language Model for Financial Domain}, booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, year = {2022}, publisher = {Association for Computational Linguistics} } ``` ## Contact information Please contact Raj Sanjay Shah (rajsanjayshah[at]gatech[dot]edu) or Sudheer Chava (schava6[at]gatech[dot]edu) or Diyi Yang (diyiy[at]stanford[dot]edu) about any FLANG-DistilBERT related issues and questions. --- license: afl-3.0 ---
null
Non_BioNLP
## Dataset Summary - **Homepage:** https://salt-nlp.github.io/FLANG/ - **Models:** https://huggingface.co/SALT-NLP/FLANG-BERT - **Repository:** https://github.com/SALT-NLP/FLANG ## FLANG FLANG is a set of large language models for Financial LANGuage tasks. These models use domain specific pre-training with preferential masking to build more robust representations for the domain. The models in the set are:\ [FLANG-BERT](https://huggingface.co/SALT-NLP/FLANG-BERT)\ [FLANG-SpanBERT](https://huggingface.co/SALT-NLP/FLANG-SpanBERT)\ [FLANG-DistilBERT](https://huggingface.co/SALT-NLP/FLANG-DistilBERT)\ [FLANG-Roberta](https://huggingface.co/SALT-NLP/FLANG-Roberta)\ [FLANG-ELECTRA](https://huggingface.co/SALT-NLP/FLANG-ELECTRA) ## FLANG-DistilBERT FLANG-DistilBERT is a pre-trained language model which uses financial keywords and phrases for preferential masking of domain specific terms. It is built by further training the DistilBERT language model in the finance domain with improved performance over previous models due to the use of domain knowledge and vocabulary. ## FLUE FLUE (Financial Language Understanding Evaluation) is a comprehensive and heterogeneous benchmark that has been built from 5 diverse financial domain specific datasets. Sentiment Classification: [Financial PhraseBank](https://huggingface.co/datasets/financial_phrasebank)\ Sentiment Analysis, Question Answering: [FiQA 2018](https://huggingface.co/datasets/SALT-NLP/FLUE-FiQA)\ New Headlines Classification: [Headlines](https://www.kaggle.com/datasets/daittan/gold-commodity-news-and-dimensions)\ Named Entity Recognition: [NER](https://paperswithcode.com/dataset/fin)\ Structure Boundary Detection: [FinSBD3](https://sites.google.com/nlg.csie.ntu.edu.tw/finweb2021/shared-task-finsbd-3) ## Citation Please cite the work with the following citation: ```bibtex @INPROCEEDINGS{shah-etal-2022-flang, author = {Shah, Raj Sanjay and Chawla, Kunal and Eidnani, Dheeraj and Shah, Agam and Du, Wendi and Chava, Sudheer and Raman, Natraj and Smiley, Charese and Chen, Jiaao and Yang, Diyi }, title = {When FLUE Meets FLANG: Benchmarks and Large Pretrained Language Model for Financial Domain}, booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, year = {2022}, publisher = {Association for Computational Linguistics} } ``` ## Contact information Please contact Raj Sanjay Shah (rajsanjayshah[at]gatech[dot]edu) or Sudheer Chava (schava6[at]gatech[dot]edu) or Diyi Yang (diyiy[at]stanford[dot]edu) about any FLANG-DistilBERT related issues and questions. --- license: afl-3.0 ---
{"language": "en", "tags": ["Financial Language Modelling"], "widget": [{"text": "Stocks rallied and the British pound [MASK]."}]}
task
[ "NAMED_ENTITY_RECOGNITION", "QUESTION_ANSWERING" ]
40,245
vasista22/whisper-tamil-small
vasista22
automatic-speech-recognition
[ "transformers", "pytorch", "jax", "whisper", "automatic-speech-recognition", "whisper-event", "ta", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
2023-01-01T06:00:21Z
2023-04-24T21:03:31+00:00
905
0
--- language: - ta license: apache-2.0 metrics: - wer tags: - whisper-event model-index: - name: Whisper Tamil Small - Vasista Sai Lodagala results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ta_in split: test metrics: - type: wer value: 9.11 name: WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: ta split: test metrics: - type: wer value: 7.95 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Tamil Small This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Tamil data available from multiple publicly available ASR corpuses. It has been fine-tuned as a part of the Whisper fine-tuning sprint. **NOTE:** The code used to train this model is available for re-use in the [whisper-finetune](https://github.com/vasistalodagala/whisper-finetune) repository. ## Usage In order to evaluate this model on an entire dataset, the evaluation codes available in the [whisper-finetune](https://github.com/vasistalodagala/whisper-finetune) repository can be used. The same repository also provides the scripts for faster inference using whisper-jax. In order to infer a single audio file using this model, the following code snippet can be used: ```python >>> import torch >>> from transformers import pipeline >>> # path to the audio file to be transcribed >>> audio = "/path/to/audio.format" >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> transcribe = pipeline(task="automatic-speech-recognition", model="vasista22/whisper-tamil-small", chunk_length_s=30, device=device) >>> transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ta", task="transcribe") >>> print('Transcription: ', transcribe(audio)["text"]) ``` For faster inference of whisper models, the [whisper-jax](https://github.com/sanchit-gandhi/whisper-jax) library can be used. Please follow the necessary installation steps as mentioned [here](https://github.com/vasistalodagala/whisper-finetune#faster-evaluation-with-whisper-jax), before using the following code snippet: ```python >>> import jax.numpy as jnp >>> from whisper_jax import FlaxWhisperForConditionalGeneration, FlaxWhisperPipline >>> # path to the audio file to be transcribed >>> audio = "/path/to/audio.format" >>> transcribe = FlaxWhisperPipline("vasista22/whisper-tamil-small", batch_size=16) >>> transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ta", task="transcribe") >>> print('Transcription: ', transcribe(audio)["text"]) ``` ## Training and evaluation data Training Data: - [IISc-MILE Tamil ASR Corpus](https://www.openslr.org/127/) - [ULCA ASR Corpus](https://github.com/Open-Speech-EkStep/ULCA-asr-dataset-corpus#tamil-labelled--total-duration-is-116024-hours) - [Shrutilipi ASR Corpus](https://ai4bharat.org/shrutilipi) - [Microsoft Speech Corpus (Indian Languages)](https://msropendata.com/datasets/7230b4b1-912d-400e-be58-f84e0512985e) - [Google/Fleurs Train+Dev set](https://huggingface.co/datasets/google/fleurs) - Babel ASR Corpus Evaluation Data: - [Microsoft Speech Corpus (Indian Languages) Test Set](https://msropendata.com/datasets/7230b4b1-912d-400e-be58-f84e0512985e) - [Google/Fleurs Test Set](https://huggingface.co/datasets/google/fleurs) - [IISc-MILE Test Set](https://www.openslr.org/127/) - Babel Test Set ## Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.7e-05 - train_batch_size: 48 - eval_batch_size: 32 - seed: 22 - optimizer: adamw_bnb_8bit - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 17500 - training_steps: 29659 (Initially set to 84740 steps) - mixed_precision_training: True ## Acknowledgement This work was done at [Speech Lab, IIT Madras](https://asr.iitm.ac.in/). The compute resources for this work were funded by "Bhashini: National Language translation Mission" project of the Ministry of Electronics and Information Technology (MeitY), Government of India.
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Tamil Small This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Tamil data available from multiple publicly available ASR corpuses. It has been fine-tuned as a part of the Whisper fine-tuning sprint. **NOTE:** The code used to train this model is available for re-use in the [whisper-finetune](https://github.com/vasistalodagala/whisper-finetune) repository. ## Usage In order to evaluate this model on an entire dataset, the evaluation codes available in the [whisper-finetune](https://github.com/vasistalodagala/whisper-finetune) repository can be used. The same repository also provides the scripts for faster inference using whisper-jax. In order to infer a single audio file using this model, the following code snippet can be used: ```python >>> import torch >>> from transformers import pipeline >>> # path to the audio file to be transcribed >>> audio = "/path/to/audio.format" >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> transcribe = pipeline(task="automatic-speech-recognition", model="vasista22/whisper-tamil-small", chunk_length_s=30, device=device) >>> transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ta", task="transcribe") >>> print('Transcription: ', transcribe(audio)["text"]) ``` For faster inference of whisper models, the [whisper-jax](https://github.com/sanchit-gandhi/whisper-jax) library can be used. Please follow the necessary installation steps as mentioned [here](https://github.com/vasistalodagala/whisper-finetune#faster-evaluation-with-whisper-jax), before using the following code snippet: ```python >>> import jax.numpy as jnp >>> from whisper_jax import FlaxWhisperForConditionalGeneration, FlaxWhisperPipline >>> # path to the audio file to be transcribed >>> audio = "/path/to/audio.format" >>> transcribe = FlaxWhisperPipline("vasista22/whisper-tamil-small", batch_size=16) >>> transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ta", task="transcribe") >>> print('Transcription: ', transcribe(audio)["text"]) ``` ## Training and evaluation data Training Data: - [IISc-MILE Tamil ASR Corpus](https://www.openslr.org/127/) - [ULCA ASR Corpus](https://github.com/Open-Speech-EkStep/ULCA-asr-dataset-corpus#tamil-labelled--total-duration-is-116024-hours) - [Shrutilipi ASR Corpus](https://ai4bharat.org/shrutilipi) - [Microsoft Speech Corpus (Indian Languages)](https://msropendata.com/datasets/7230b4b1-912d-400e-be58-f84e0512985e) - [Google/Fleurs Train+Dev set](https://huggingface.co/datasets/google/fleurs) - Babel ASR Corpus Evaluation Data: - [Microsoft Speech Corpus (Indian Languages) Test Set](https://msropendata.com/datasets/7230b4b1-912d-400e-be58-f84e0512985e) - [Google/Fleurs Test Set](https://huggingface.co/datasets/google/fleurs) - [IISc-MILE Test Set](https://www.openslr.org/127/) - Babel Test Set ## Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.7e-05 - train_batch_size: 48 - eval_batch_size: 32 - seed: 22 - optimizer: adamw_bnb_8bit - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 17500 - training_steps: 29659 (Initially set to 84740 steps) - mixed_precision_training: True ## Acknowledgement This work was done at [Speech Lab, IIT Madras](https://asr.iitm.ac.in/). The compute resources for this work were funded by "Bhashini: National Language translation Mission" project of the Ministry of Electronics and Information Technology (MeitY), Government of India.
{"language": ["ta"], "license": "apache-2.0", "metrics": ["wer"], "tags": ["whisper-event"], "model-index": [{"name": "Whisper Tamil Small - Vasista Sai Lodagala", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "google/fleurs", "type": "google/fleurs", "config": "ta_in", "split": "test"}, "metrics": [{"type": "wer", "value": 9.11, "name": "WER"}]}, {"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "mozilla-foundation/common_voice_11_0", "type": "mozilla-foundation/common_voice_11_0", "config": "ta", "split": "test"}, "metrics": [{"type": "wer", "value": 7.95, "name": "WER"}]}]}]}
task
[ "TRANSLATION" ]
40,246
gokuls/hBERTv2_new_pretrain_w_init__stsb
gokuls
text-classification
[ "transformers", "pytorch", "tensorboard", "hybridbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-06T14:15:59Z
2023-06-06T14:24:14+00:00
10
0
--- datasets: - glue language: - en metrics: - spearmanr tags: - generated_from_trainer model-index: - name: hBERTv2_new_pretrain_w_init__stsb results: - task: type: text-classification name: Text Classification dataset: name: GLUE STSB type: glue config: stsb split: validation args: stsb metrics: - type: spearmanr value: 0.3669953973916525 name: Spearmanr --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_new_pretrain_w_init__stsb This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2_complete_training_new_wt_init](https://huggingface.co/gokuls/bert_12_layer_model_v2_complete_training_new_wt_init) on the GLUE STSB dataset. It achieves the following results on the evaluation set: - Loss: 2.0270 - Pearson: 0.3743 - Spearmanr: 0.3670 - Combined Score: 0.3707 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | Combined Score | |:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:|:--------------:| | 2.2654 | 1.0 | 45 | 2.4836 | 0.2041 | 0.1912 | 0.1976 | | 1.9657 | 2.0 | 90 | 2.1138 | 0.2744 | 0.2547 | 0.2646 | | 1.6665 | 3.0 | 135 | 2.2375 | 0.3087 | 0.3002 | 0.3044 | | 1.3265 | 4.0 | 180 | 2.0270 | 0.3743 | 0.3670 | 0.3707 | | 1.0731 | 5.0 | 225 | 2.3748 | 0.3294 | 0.3212 | 0.3253 | | 0.7974 | 6.0 | 270 | 2.6753 | 0.3338 | 0.3353 | 0.3345 | | 0.6738 | 7.0 | 315 | 2.5125 | 0.3590 | 0.3464 | 0.3527 | | 0.5384 | 8.0 | 360 | 2.3740 | 0.3310 | 0.3211 | 0.3261 | | 0.4589 | 9.0 | 405 | 2.3911 | 0.3709 | 0.3690 | 0.3699 | ### Framework versions - Transformers 4.29.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_new_pretrain_w_init__stsb This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2_complete_training_new_wt_init](https://huggingface.co/gokuls/bert_12_layer_model_v2_complete_training_new_wt_init) on the GLUE STSB dataset. It achieves the following results on the evaluation set: - Loss: 2.0270 - Pearson: 0.3743 - Spearmanr: 0.3670 - Combined Score: 0.3707 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | Combined Score | |:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:|:--------------:| | 2.2654 | 1.0 | 45 | 2.4836 | 0.2041 | 0.1912 | 0.1976 | | 1.9657 | 2.0 | 90 | 2.1138 | 0.2744 | 0.2547 | 0.2646 | | 1.6665 | 3.0 | 135 | 2.2375 | 0.3087 | 0.3002 | 0.3044 | | 1.3265 | 4.0 | 180 | 2.0270 | 0.3743 | 0.3670 | 0.3707 | | 1.0731 | 5.0 | 225 | 2.3748 | 0.3294 | 0.3212 | 0.3253 | | 0.7974 | 6.0 | 270 | 2.6753 | 0.3338 | 0.3353 | 0.3345 | | 0.6738 | 7.0 | 315 | 2.5125 | 0.3590 | 0.3464 | 0.3527 | | 0.5384 | 8.0 | 360 | 2.3740 | 0.3310 | 0.3211 | 0.3261 | | 0.4589 | 9.0 | 405 | 2.3911 | 0.3709 | 0.3690 | 0.3699 | ### Framework versions - Transformers 4.29.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
{"datasets": ["glue"], "language": ["en"], "metrics": ["spearmanr"], "tags": ["generated_from_trainer"], "model-index": [{"name": "hBERTv2_new_pretrain_w_init__stsb", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE STSB", "type": "glue", "config": "stsb", "split": "validation", "args": "stsb"}, "metrics": [{"type": "spearmanr", "value": 0.3669953973916525, "name": "Spearmanr"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,247
DivyaMereddy007/RecipeBert_v5original_epoc50_Copy_of_TrainSetenceTransforme-Finetuning_v5_DistilledBert
DivyaMereddy007
sentence-similarity
[ "sentence-transformers", "safetensors", "distilbert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:1746", "loss:CosineSimilarityLoss", "arxiv:1908.10084", "base_model:sentence-transformers/distilbert-base-nli-mean-tokens", "base_model:finetune:sentence-transformers/distilbert-base-nli-mean-tokens", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-06-07T19:55:41Z
2024-06-07T19:55:52+00:00
7
0
--- base_model: sentence-transformers/distilbert-base-nli-mean-tokens datasets: [] language: [] library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:1746 - loss:CosineSimilarityLoss widget: - source_sentence: Scalloped Corn ["1 can cream-style corn", "1 can whole kernel corn", "1/2 pkg. (approximately 20) saltine crackers, crushed", "1 egg, beaten", "6 tsp. butter, divided", "pepper to taste"] ["Mix together both cans of corn, crackers, egg, 2 teaspoons of melted butter and pepper and place in a buttered baking dish.", "Dot with remaining 4 teaspoons of butter.", "Bake at 350\u00b0 for 1 hour."] sentences: - Artichoke Dip ["2 cans or jars artichoke hearts", "1 c. mayonnaise", "1 c. Parmesan cheese"] ["Drain artichokes and chop.", "Mix with mayonnaise and Parmesan cheese.", "After well mixed, bake, uncovered, for 20 to 30 minutes at 350\u00b0.", "Serve with crackers."] - Scalloped Corn ["1 can cream-style corn", "1 can whole kernel corn", "1/2 pkg. (approximately 20) saltine crackers, crushed", "1 egg, beaten", "6 tsp. butter, divided", "pepper to taste"] ["Mix together both cans of corn, crackers, egg, 2 teaspoons of melted butter and pepper and place in a buttered baking dish.", "Dot with remaining 4 teaspoons of butter.", "Bake at 350\u00b0 for 1 hour."] - Chicken Stew ["3 lb. chicken, boiled", "4 medium potatoes, diced", "2 medium onions, chopped", "1 (16 oz.) can creamed corn", "1 (16 oz.) can English peas", "1 (16 oz.) can field peas", "1 (16 oz.) can butter beans", "1 (16 oz.) can tomatoes", "1 (46 oz.) can tomato juice", "1 small box macaroni", "1 Tbsp. black pepper", "1 Tbsp. salt", "1 Tbsp. sugar"] ["Remove chicken from bone.", "Use the broth.", "Mix the vegetables and macaroni.", "Add sugar, salt and black pepper.", "Cook until all vegetables are tender over medium heat."] - source_sentence: Watermelon Rind Pickles ["7 lb. watermelon rind", "7 c. sugar", "2 c. apple vinegar", "1/2 tsp. oil of cloves", "1/2 tsp. oil of cinnamon"] ["Trim off green and pink parts of watermelon rind; cut to 1-inch cubes.", "Parboil until tender, but not soft.", "Drain. Combine sugar, vinegar, oil of cloves and oil of cinnamon; bring to boiling and pour over rind.", "Let stand overnight.", "In the morning, drain off syrup.", "Heat and put over rind.", "The third morning, heat rind and syrup; seal in hot, sterilized jars.", "Makes 8 pints.", "(Oil of cinnamon and clove keeps rind clear and transparent.)"] sentences: - Cheeseburger Potato Soup ["6 baking potatoes", "1 lb. of extra lean ground beef", "2/3 c. butter or margarine", "6 c. milk", "3/4 tsp. salt", "1/2 tsp. pepper", "1 1/2 c (6 oz.) shredded Cheddar cheese, divided", "12 sliced bacon, cooked, crumbled and divided", "4 green onion, chopped and divided", "1 (8 oz.) carton sour cream (optional)"] ["Wash potatoes; prick several times with a fork.", "Microwave them with a wet paper towel covering the potatoes on high for 6-8 minutes.", "The potatoes should be soft, ready to eat.", "Let them cool enough to handle.", "Cut in half lengthwise; scoop out pulp and reserve.", "Discard shells.", "Brown ground beef until done.", "Drain any grease from the meat.", "Set aside when done.", "Meat will be added later.", "Melt butter in a large kettle over low heat; add flour, stirring until smooth.", "Cook 1 minute, stirring constantly. Gradually add milk; cook over medium heat, stirring constantly, until thickened and bubbly.", "Stir in potato, ground beef, salt, pepper, 1 cup of cheese, 2 tablespoons of green onion and 1/2 cup of bacon.", "Cook until heated (do not boil).", "Stir in sour cream if desired; cook until heated (do not boil).", "Sprinkle with remaining cheese, bacon and green onions."] - Easy Fudge ["1 (14 oz.) can sweetened condensed milk", "1 (12 oz.) pkg. semi-sweet chocolate chips", "1 (1 oz.) sq. unsweetened chocolate (if desired)", "1 1/2 c. chopped nuts (if desired)", "1 tsp. vanilla"] ["Butter a square pan, 8 x 8 x 2-inches.", "Heat milk, chocolate chips and unsweetened chocolate over low heat, stirring constantly, until chocolate is melted and mixture is smooth. Remove from heat.", "Stir in nuts and vanilla.", "Spread in pan."] - Chicken Ole ["4 chicken breasts, cooked", "1 can cream of chicken soup", "1 can cream of mushroom soup", "1 can green chili salsa sauce", "1 can green chilies", "1 c. milk", "1 grated onion", "1 pkg. corn tortilla in pieces"] ["Dice chicken.", "Mix all ingredients together.", "Let sit overnight.", "Bake 1 1/2 hours at 375\u00b0."] - source_sentence: Quick Barbecue Wings ["chicken wings (as many as you need for dinner)", "flour", "barbecue sauce (your choice)"] ["Clean wings.", "Flour and fry until done.", "Place fried chicken wings in microwave bowl.", "Stir in barbecue sauce.", "Microwave on High (stir once) for 4 minutes."] sentences: - Creamy Corn ["2 (16 oz.) pkg. frozen corn", "1 (8 oz.) pkg. cream cheese, cubed", "1/3 c. butter, cubed", "1/2 tsp. garlic powder", "1/2 tsp. salt", "1/4 tsp. pepper"] ["In a slow cooker, combine all ingredients. Cover and cook on low for 4 hours or until heated through and cheese is melted. Stir well before serving. Yields 6 servings."] - Broccoli Salad ["1 large head broccoli (about 1 1/2 lb.)", "10 slices bacon, cooked and crumbled", "5 green onions, sliced or 1/4 c. chopped red onion", "1/2 c. raisins", "1 c. mayonnaise", "2 Tbsp. vinegar", "1/4 c. sugar"] ["Trim off large leaves of broccoli and remove the tough ends of lower stalks. Wash the broccoli thoroughly. Cut the florets and stems into bite-size pieces. Place in a large bowl. Add bacon, onions and raisins. Combine remaining ingredients, stirring well. Add dressing to broccoli mixture and toss gently. Cover and refrigerate 2 to 3 hours. Makes about 6 servings."] - Vegetable-Burger Soup ["1/2 lb. ground beef", "2 c. water", "1 tsp. sugar", "1 pkg. Cup-a-Soup onion soup mix (dry)", "1 lb. can stewed tomatoes", "1 (8 oz.) can tomato sauce", "1 (10 oz.) pkg. frozen mixed vegetables"] ["Lightly brown beef in soup pot.", "Drain off excess fat.", "Stir in tomatoes, tomato sauce, water, frozen vegetables, soup mix and sugar.", "Bring to a boil.", "Reduce heat and simmer for 20 minutes. Serve."] - source_sentence: 'Eggless Milkless Applesauce Cake ["3/4 c. sugar", "1/2 c. shortening", "1 1/2 c. applesauce", "3 level tsp. soda", "1 tsp. each: cinnamon, cloves and nutmeg", "2 c. sifted flour", "1 c. raisins", "1 c. nuts"] ["Mix Crisco with applesauce, nuts and raisins.", "Sift dry ingredients and add.", "Mix well.", "Put in a greased and floured loaf pan or tube pan.", "Bake in loaf pan at 350\u00b0 to 375\u00b0 for 45 to 60 minutes, layer pan at 375\u00b0 for 20 minutes or tube pan at 325\u00b0 for 1 hour."]' sentences: - Broccoli Dip For Crackers ["16 oz. sour cream", "1 pkg. dry vegetable soup mix", "10 oz. pkg. frozen chopped broccoli, thawed and drained", "4 to 6 oz. Cheddar cheese, grated"] ["Mix together sour cream, soup mix, broccoli and half of cheese.", "Sprinkle remaining cheese on top.", "Bake at 350\u00b0 for 30 minutes, uncovered.", "Serve hot with vegetable crackers."] - Potato And Cheese Pie ["3 eggs", "1 tsp. salt", "1/4 tsp. pepper", "2 c. half and half", "3 c. potatoes, shredded coarse", "1 c. Cheddar cheese, coarsely shredded", "1/3 c. green onions"] ["Beat eggs, salt and pepper until well blended.", "Stir in half and half, potatoes and onions.", "Pour into well-greased 8-inch baking dish.", "Bake in a 400\u00b0 oven for 35 to 40 minutes, or until knife inserted in center comes out clean and potatoes are tender. Cool on rack 5 minutes; cut into squares.", "Makes 4 large servings."] - Angel Biscuits ["5 c. flour", "3 Tbsp. sugar", "4 tsp. baking powder", "1 1/2 pkg. dry yeast", "2 c. buttermilk", "1 tsp. soda", "1 1/2 sticks margarine", "1/2 c. warm water"] ["Mix flour, sugar, baking powder, soda and salt together.", "Cut in margarine, dissolve yeast in warm water.", "Stir into buttermilk and add to dry mixture.", "Cover and chill."] - source_sentence: Rhubarb Coffee Cake ["1 1/2 c. sugar", "1/2 c. butter", "1 egg", "1 c. buttermilk", "2 c. flour", "1/2 tsp. salt", "1 tsp. soda", "1 c. buttermilk", "2 c. rhubarb, finely cut", "1 tsp. vanilla"] ["Cream sugar and butter.", "Add egg and beat well.", "To creamed butter, sugar and egg, add alternately buttermilk with mixture of flour, salt and soda.", "Mix well.", "Add rhubarb and vanilla.", "Pour into greased 9 x 13-inch pan and add Topping."] sentences: - Prize-Winning Meat Loaf ["1 1/2 lb. ground beef", "1 c. tomato juice", "3/4 c. oats (uncooked)", "1 egg, beaten", "1/4 c. chopped onion", "1/4 tsp. pepper", "1 1/2 tsp. salt"] ["Mix well.", "Press firmly into an 8 1/2 x 4 1/2 x 2 1/2-inch loaf pan.", "Bake in preheated moderate oven.", "Bake at 350\u00b0 for 1 hour.", "Let stand 5 minutes before slicing.", "Makes 8 servings."] - Angel Biscuits ["5 c. flour", "3 Tbsp. sugar", "4 tsp. baking powder", "1 1/2 pkg. dry yeast", "2 c. buttermilk", "1 tsp. soda", "1 1/2 sticks margarine", "1/2 c. warm water"] ["Mix flour, sugar, baking powder, soda and salt together.", "Cut in margarine, dissolve yeast in warm water.", "Stir into buttermilk and add to dry mixture.", "Cover and chill."] - 'Smothered Round Steak(Servings: 4) ["2 lb. round steak", "1/2 tsp. ground black pepper", "1 tsp. ground white pepper", "1/2 c. vegetable oil", "2 bell peppers, chopped", "1 c. beef stock or water", "2 tsp. salt", "1 tsp. ground red pepper", "all-purpose flour (dredging)", "3 medium onions, chopped", "1 celery rib, chopped"] ["Alex Patout says, \"Smothering is a multipurpose Cajun technique that works wonders with everything from game to snap beans.", "It''s similar to what the rest of the world knows as braising.", "The ingredients are briefly browned or sauteed, then cooked with a little liquid over a low heat for a long time.\""]' --- # SentenceTransformer based on sentence-transformers/distilbert-base-nli-mean-tokens This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/distilbert-base-nli-mean-tokens](https://huggingface.co/sentence-transformers/distilbert-base-nli-mean-tokens). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/distilbert-base-nli-mean-tokens](https://huggingface.co/sentence-transformers/distilbert-base-nli-mean-tokens) <!-- at revision 2781c006adbf3726b509caa8649fc8077ff0724d --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("DivyaMereddy007/RecipeBert_v5original_epoc50_Copy_of_TrainSetenceTransforme-Finetuning_v5_DistilledBert") # Run inference sentences = [ 'Rhubarb Coffee Cake ["1 1/2 c. sugar", "1/2 c. butter", "1 egg", "1 c. buttermilk", "2 c. flour", "1/2 tsp. salt", "1 tsp. soda", "1 c. buttermilk", "2 c. rhubarb, finely cut", "1 tsp. vanilla"] ["Cream sugar and butter.", "Add egg and beat well.", "To creamed butter, sugar and egg, add alternately buttermilk with mixture of flour, salt and soda.", "Mix well.", "Add rhubarb and vanilla.", "Pour into greased 9 x 13-inch pan and add Topping."]', 'Prize-Winning Meat Loaf ["1 1/2 lb. ground beef", "1 c. tomato juice", "3/4 c. oats (uncooked)", "1 egg, beaten", "1/4 c. chopped onion", "1/4 tsp. pepper", "1 1/2 tsp. salt"] ["Mix well.", "Press firmly into an 8 1/2 x 4 1/2 x 2 1/2-inch loaf pan.", "Bake in preheated moderate oven.", "Bake at 350\\u00b0 for 1 hour.", "Let stand 5 minutes before slicing.", "Makes 8 servings."]', 'Angel Biscuits ["5 c. flour", "3 Tbsp. sugar", "4 tsp. baking powder", "1 1/2 pkg. dry yeast", "2 c. buttermilk", "1 tsp. soda", "1 1/2 sticks margarine", "1/2 c. warm water"] ["Mix flour, sugar, baking powder, soda and salt together.", "Cut in margarine, dissolve yeast in warm water.", "Stir into buttermilk and add to dry mixture.", "Cover and chill."]', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 1,746 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | label | |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 63 tokens</li><li>mean: 119.05 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 63 tokens</li><li>mean: 118.49 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.19</li><li>max: 1.0</li></ul> | * Samples: | sentence_0 | sentence_1 | label | |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------| | <code>Strawberry Whatever ["1 lb. frozen strawberries in juice", "1 small can crushed pineapple", "3 ripe bananas", "1 c. chopped pecans", "1 large pkg. strawberry Jell-O", "1 1/2 c. boiling water", "1 pt. sour cream"] ["Mix Jell-O in boiling water.", "Add strawberries, pineapple, crushed bananas and nuts.", "Spread 1/2 mixture in 13 x 6 1/2-inch pan.", "Allow to gel in freezer 30 minutes.", "Add layer of sour cream, then remaining mixture on top.", "Gel and serve."]</code> | <code>One Hour Rolls ["1 c. milk", "2 Tbsp. sugar", "1 pkg. dry yeast", "1 Tbsp. salt", "3 Tbsp. Crisco oil", "2 c. plain flour"] ["Put flour into a large mixing bowl.", "Combine sugar, milk, salt and oil in a saucepan and heat to boiling; remove from heat and let cool to lukewarm.", "Add yeast and mix well.", "Pour into flour and stir.", "Batter will be sticky.", "Roll out batter on a floured board and cut with biscuit cutter.", "Lightly brush tops with melted oleo and fold over.", "Place rolls on a cookie sheet, put in a warm place and let rise for 1 hour.", "Bake at 350\u00b0 for about 20 minutes. Yield: 2 1/2 dozen."]</code> | <code>0.1</code> | | <code>Broccoli Dip For Crackers ["16 oz. sour cream", "1 pkg. dry vegetable soup mix", "10 oz. pkg. frozen chopped broccoli, thawed and drained", "4 to 6 oz. Cheddar cheese, grated"] ["Mix together sour cream, soup mix, broccoli and half of cheese.", "Sprinkle remaining cheese on top.", "Bake at 350\u00b0 for 30 minutes, uncovered.", "Serve hot with vegetable crackers."]</code> | <code>Vegetable-Burger Soup ["1/2 lb. ground beef", "2 c. water", "1 tsp. sugar", "1 pkg. Cup-a-Soup onion soup mix (dry)", "1 lb. can stewed tomatoes", "1 (8 oz.) can tomato sauce", "1 (10 oz.) pkg. frozen mixed vegetables"] ["Lightly brown beef in soup pot.", "Drain off excess fat.", "Stir in tomatoes, tomato sauce, water, frozen vegetables, soup mix and sugar.", "Bring to a boil.", "Reduce heat and simmer for 20 minutes. Serve."]</code> | <code>0.4</code> | | <code>Summer Spaghetti ["1 lb. very thin spaghetti", "1/2 bottle McCormick Salad Supreme (seasoning)", "1 bottle Zesty Italian dressing"] ["Prepare spaghetti per package.", "Drain.", "Melt a little butter through it.", "Marinate overnight in Salad Supreme and Zesty Italian dressing.", "Just before serving, add cucumbers, tomatoes, green peppers, mushrooms, olives or whatever your taste may want."]</code> | <code>Chicken Funny ["1 large whole chicken", "2 (10 1/2 oz.) cans chicken gravy", "1 (10 1/2 oz.) can cream of mushroom soup", "1 (6 oz.) box Stove Top stuffing", "4 oz. shredded cheese"] ["Boil and debone chicken.", "Put bite size pieces in average size square casserole dish.", "Pour gravy and cream of mushroom soup over chicken; level.", "Make stuffing according to instructions on box (do not make too moist).", "Put stuffing on top of chicken and gravy; level.", "Sprinkle shredded cheese on top and bake at 350\u00b0 for approximately 20 minutes or until golden and bubbly."]</code> | <code>0.3</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `num_train_epochs`: 50 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 50 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | |:-------:|:----:|:-------------:| | 4.5455 | 500 | 0.0594 | | 9.0909 | 1000 | 0.0099 | | 13.6364 | 1500 | 0.0085 | | 18.1818 | 2000 | 0.0077 | | 22.7273 | 2500 | 0.0074 | | 27.2727 | 3000 | 0.0071 | | 31.8182 | 3500 | 0.0068 | | 36.3636 | 4000 | 0.0066 | | 40.9091 | 4500 | 0.0063 | | 45.4545 | 5000 | 0.006 | | 50.0 | 5500 | 0.0057 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.3.0+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.2 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on sentence-transformers/distilbert-base-nli-mean-tokens This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/distilbert-base-nli-mean-tokens](https://huggingface.co/sentence-transformers/distilbert-base-nli-mean-tokens). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/distilbert-base-nli-mean-tokens](https://huggingface.co/sentence-transformers/distilbert-base-nli-mean-tokens) <!-- at revision 2781c006adbf3726b509caa8649fc8077ff0724d --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("DivyaMereddy007/RecipeBert_v5original_epoc50_Copy_of_TrainSetenceTransforme-Finetuning_v5_DistilledBert") # Run inference sentences = [ 'Rhubarb Coffee Cake ["1 1/2 c. sugar", "1/2 c. butter", "1 egg", "1 c. buttermilk", "2 c. flour", "1/2 tsp. salt", "1 tsp. soda", "1 c. buttermilk", "2 c. rhubarb, finely cut", "1 tsp. vanilla"] ["Cream sugar and butter.", "Add egg and beat well.", "To creamed butter, sugar and egg, add alternately buttermilk with mixture of flour, salt and soda.", "Mix well.", "Add rhubarb and vanilla.", "Pour into greased 9 x 13-inch pan and add Topping."]', 'Prize-Winning Meat Loaf ["1 1/2 lb. ground beef", "1 c. tomato juice", "3/4 c. oats (uncooked)", "1 egg, beaten", "1/4 c. chopped onion", "1/4 tsp. pepper", "1 1/2 tsp. salt"] ["Mix well.", "Press firmly into an 8 1/2 x 4 1/2 x 2 1/2-inch loaf pan.", "Bake in preheated moderate oven.", "Bake at 350\\u00b0 for 1 hour.", "Let stand 5 minutes before slicing.", "Makes 8 servings."]', 'Angel Biscuits ["5 c. flour", "3 Tbsp. sugar", "4 tsp. baking powder", "1 1/2 pkg. dry yeast", "2 c. buttermilk", "1 tsp. soda", "1 1/2 sticks margarine", "1/2 c. warm water"] ["Mix flour, sugar, baking powder, soda and salt together.", "Cut in margarine, dissolve yeast in warm water.", "Stir into buttermilk and add to dry mixture.", "Cover and chill."]', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 1,746 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | label | |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 63 tokens</li><li>mean: 119.05 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 63 tokens</li><li>mean: 118.49 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.19</li><li>max: 1.0</li></ul> | * Samples: | sentence_0 | sentence_1 | label | |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------| | <code>Strawberry Whatever ["1 lb. frozen strawberries in juice", "1 small can crushed pineapple", "3 ripe bananas", "1 c. chopped pecans", "1 large pkg. strawberry Jell-O", "1 1/2 c. boiling water", "1 pt. sour cream"] ["Mix Jell-O in boiling water.", "Add strawberries, pineapple, crushed bananas and nuts.", "Spread 1/2 mixture in 13 x 6 1/2-inch pan.", "Allow to gel in freezer 30 minutes.", "Add layer of sour cream, then remaining mixture on top.", "Gel and serve."]</code> | <code>One Hour Rolls ["1 c. milk", "2 Tbsp. sugar", "1 pkg. dry yeast", "1 Tbsp. salt", "3 Tbsp. Crisco oil", "2 c. plain flour"] ["Put flour into a large mixing bowl.", "Combine sugar, milk, salt and oil in a saucepan and heat to boiling; remove from heat and let cool to lukewarm.", "Add yeast and mix well.", "Pour into flour and stir.", "Batter will be sticky.", "Roll out batter on a floured board and cut with biscuit cutter.", "Lightly brush tops with melted oleo and fold over.", "Place rolls on a cookie sheet, put in a warm place and let rise for 1 hour.", "Bake at 350\u00b0 for about 20 minutes. Yield: 2 1/2 dozen."]</code> | <code>0.1</code> | | <code>Broccoli Dip For Crackers ["16 oz. sour cream", "1 pkg. dry vegetable soup mix", "10 oz. pkg. frozen chopped broccoli, thawed and drained", "4 to 6 oz. Cheddar cheese, grated"] ["Mix together sour cream, soup mix, broccoli and half of cheese.", "Sprinkle remaining cheese on top.", "Bake at 350\u00b0 for 30 minutes, uncovered.", "Serve hot with vegetable crackers."]</code> | <code>Vegetable-Burger Soup ["1/2 lb. ground beef", "2 c. water", "1 tsp. sugar", "1 pkg. Cup-a-Soup onion soup mix (dry)", "1 lb. can stewed tomatoes", "1 (8 oz.) can tomato sauce", "1 (10 oz.) pkg. frozen mixed vegetables"] ["Lightly brown beef in soup pot.", "Drain off excess fat.", "Stir in tomatoes, tomato sauce, water, frozen vegetables, soup mix and sugar.", "Bring to a boil.", "Reduce heat and simmer for 20 minutes. Serve."]</code> | <code>0.4</code> | | <code>Summer Spaghetti ["1 lb. very thin spaghetti", "1/2 bottle McCormick Salad Supreme (seasoning)", "1 bottle Zesty Italian dressing"] ["Prepare spaghetti per package.", "Drain.", "Melt a little butter through it.", "Marinate overnight in Salad Supreme and Zesty Italian dressing.", "Just before serving, add cucumbers, tomatoes, green peppers, mushrooms, olives or whatever your taste may want."]</code> | <code>Chicken Funny ["1 large whole chicken", "2 (10 1/2 oz.) cans chicken gravy", "1 (10 1/2 oz.) can cream of mushroom soup", "1 (6 oz.) box Stove Top stuffing", "4 oz. shredded cheese"] ["Boil and debone chicken.", "Put bite size pieces in average size square casserole dish.", "Pour gravy and cream of mushroom soup over chicken; level.", "Make stuffing according to instructions on box (do not make too moist).", "Put stuffing on top of chicken and gravy; level.", "Sprinkle shredded cheese on top and bake at 350\u00b0 for approximately 20 minutes or until golden and bubbly."]</code> | <code>0.3</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `num_train_epochs`: 50 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 50 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | |:-------:|:----:|:-------------:| | 4.5455 | 500 | 0.0594 | | 9.0909 | 1000 | 0.0099 | | 13.6364 | 1500 | 0.0085 | | 18.1818 | 2000 | 0.0077 | | 22.7273 | 2500 | 0.0074 | | 27.2727 | 3000 | 0.0071 | | 31.8182 | 3500 | 0.0068 | | 36.3636 | 4000 | 0.0066 | | 40.9091 | 4500 | 0.0063 | | 45.4545 | 5000 | 0.006 | | 50.0 | 5500 | 0.0057 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.3.0+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.2 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/distilbert-base-nli-mean-tokens", "datasets": [], "language": [], "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:1746", "loss:CosineSimilarityLoss"], "widget": [{"source_sentence": "Scalloped Corn [\"1 can cream-style corn\", \"1 can whole kernel corn\", \"1/2 pkg. (approximately 20) saltine crackers, crushed\", \"1 egg, beaten\", \"6 tsp. butter, divided\", \"pepper to taste\"] [\"Mix together both cans of corn, crackers, egg, 2 teaspoons of melted butter and pepper and place in a buttered baking dish.\", \"Dot with remaining 4 teaspoons of butter.\", \"Bake at 350\\u00b0 for 1 hour.\"]", "sentences": ["Artichoke Dip [\"2 cans or jars artichoke hearts\", \"1 c. mayonnaise\", \"1 c. Parmesan cheese\"] [\"Drain artichokes and chop.\", \"Mix with mayonnaise and Parmesan cheese.\", \"After well mixed, bake, uncovered, for 20 to 30 minutes at 350\\u00b0.\", \"Serve with crackers.\"]", "Scalloped Corn [\"1 can cream-style corn\", \"1 can whole kernel corn\", \"1/2 pkg. (approximately 20) saltine crackers, crushed\", \"1 egg, beaten\", \"6 tsp. butter, divided\", \"pepper to taste\"] [\"Mix together both cans of corn, crackers, egg, 2 teaspoons of melted butter and pepper and place in a buttered baking dish.\", \"Dot with remaining 4 teaspoons of butter.\", \"Bake at 350\\u00b0 for 1 hour.\"]", "Chicken Stew [\"3 lb. chicken, boiled\", \"4 medium potatoes, diced\", \"2 medium onions, chopped\", \"1 (16 oz.) can creamed corn\", \"1 (16 oz.) can English peas\", \"1 (16 oz.) can field peas\", \"1 (16 oz.) can butter beans\", \"1 (16 oz.) can tomatoes\", \"1 (46 oz.) can tomato juice\", \"1 small box macaroni\", \"1 Tbsp. black pepper\", \"1 Tbsp. salt\", \"1 Tbsp. sugar\"] [\"Remove chicken from bone.\", \"Use the broth.\", \"Mix the vegetables and macaroni.\", \"Add sugar, salt and black pepper.\", \"Cook until all vegetables are tender over medium heat.\"]"]}, {"source_sentence": "Watermelon Rind Pickles [\"7 lb. watermelon rind\", \"7 c. sugar\", \"2 c. apple vinegar\", \"1/2 tsp. oil of cloves\", \"1/2 tsp. oil of cinnamon\"] [\"Trim off green and pink parts of watermelon rind; cut to 1-inch cubes.\", \"Parboil until tender, but not soft.\", \"Drain. Combine sugar, vinegar, oil of cloves and oil of cinnamon; bring to boiling and pour over rind.\", \"Let stand overnight.\", \"In the morning, drain off syrup.\", \"Heat and put over rind.\", \"The third morning, heat rind and syrup; seal in hot, sterilized jars.\", \"Makes 8 pints.\", \"(Oil of cinnamon and clove keeps rind clear and transparent.)\"]", "sentences": ["Cheeseburger Potato Soup [\"6 baking potatoes\", \"1 lb. of extra lean ground beef\", \"2/3 c. butter or margarine\", \"6 c. milk\", \"3/4 tsp. salt\", \"1/2 tsp. pepper\", \"1 1/2 c (6 oz.) shredded Cheddar cheese, divided\", \"12 sliced bacon, cooked, crumbled and divided\", \"4 green onion, chopped and divided\", \"1 (8 oz.) carton sour cream (optional)\"] [\"Wash potatoes; prick several times with a fork.\", \"Microwave them with a wet paper towel covering the potatoes on high for 6-8 minutes.\", \"The potatoes should be soft, ready to eat.\", \"Let them cool enough to handle.\", \"Cut in half lengthwise; scoop out pulp and reserve.\", \"Discard shells.\", \"Brown ground beef until done.\", \"Drain any grease from the meat.\", \"Set aside when done.\", \"Meat will be added later.\", \"Melt butter in a large kettle over low heat; add flour, stirring until smooth.\", \"Cook 1 minute, stirring constantly. Gradually add milk; cook over medium heat, stirring constantly, until thickened and bubbly.\", \"Stir in potato, ground beef, salt, pepper, 1 cup of cheese, 2 tablespoons of green onion and 1/2 cup of bacon.\", \"Cook until heated (do not boil).\", \"Stir in sour cream if desired; cook until heated (do not boil).\", \"Sprinkle with remaining cheese, bacon and green onions.\"]", "Easy Fudge [\"1 (14 oz.) can sweetened condensed milk\", \"1 (12 oz.) pkg. semi-sweet chocolate chips\", \"1 (1 oz.) sq. unsweetened chocolate (if desired)\", \"1 1/2 c. chopped nuts (if desired)\", \"1 tsp. vanilla\"] [\"Butter a square pan, 8 x 8 x 2-inches.\", \"Heat milk, chocolate chips and unsweetened chocolate over low heat, stirring constantly, until chocolate is melted and mixture is smooth. Remove from heat.\", \"Stir in nuts and vanilla.\", \"Spread in pan.\"]", "Chicken Ole [\"4 chicken breasts, cooked\", \"1 can cream of chicken soup\", \"1 can cream of mushroom soup\", \"1 can green chili salsa sauce\", \"1 can green chilies\", \"1 c. milk\", \"1 grated onion\", \"1 pkg. corn tortilla in pieces\"] [\"Dice chicken.\", \"Mix all ingredients together.\", \"Let sit overnight.\", \"Bake 1 1/2 hours at 375\\u00b0.\"]"]}, {"source_sentence": "Quick Barbecue Wings [\"chicken wings (as many as you need for dinner)\", \"flour\", \"barbecue sauce (your choice)\"] [\"Clean wings.\", \"Flour and fry until done.\", \"Place fried chicken wings in microwave bowl.\", \"Stir in barbecue sauce.\", \"Microwave on High (stir once) for 4 minutes.\"]", "sentences": ["Creamy Corn [\"2 (16 oz.) pkg. frozen corn\", \"1 (8 oz.) pkg. cream cheese, cubed\", \"1/3 c. butter, cubed\", \"1/2 tsp. garlic powder\", \"1/2 tsp. salt\", \"1/4 tsp. pepper\"] [\"In a slow cooker, combine all ingredients. Cover and cook on low for 4 hours or until heated through and cheese is melted. Stir well before serving. Yields 6 servings.\"]", "Broccoli Salad [\"1 large head broccoli (about 1 1/2 lb.)\", \"10 slices bacon, cooked and crumbled\", \"5 green onions, sliced or 1/4 c. chopped red onion\", \"1/2 c. raisins\", \"1 c. mayonnaise\", \"2 Tbsp. vinegar\", \"1/4 c. sugar\"] [\"Trim off large leaves of broccoli and remove the tough ends of lower stalks. Wash the broccoli thoroughly. Cut the florets and stems into bite-size pieces. Place in a large bowl. Add bacon, onions and raisins. Combine remaining ingredients, stirring well. Add dressing to broccoli mixture and toss gently. Cover and refrigerate 2 to 3 hours. Makes about 6 servings.\"]", "Vegetable-Burger Soup [\"1/2 lb. ground beef\", \"2 c. water\", \"1 tsp. sugar\", \"1 pkg. Cup-a-Soup onion soup mix (dry)\", \"1 lb. can stewed tomatoes\", \"1 (8 oz.) can tomato sauce\", \"1 (10 oz.) pkg. frozen mixed vegetables\"] [\"Lightly brown beef in soup pot.\", \"Drain off excess fat.\", \"Stir in tomatoes, tomato sauce, water, frozen vegetables, soup mix and sugar.\", \"Bring to a boil.\", \"Reduce heat and simmer for 20 minutes. Serve.\"]"]}, {"source_sentence": "Eggless Milkless Applesauce Cake [\"3/4 c. sugar\", \"1/2 c. shortening\", \"1 1/2 c. applesauce\", \"3 level tsp. soda\", \"1 tsp. each: cinnamon, cloves and nutmeg\", \"2 c. sifted flour\", \"1 c. raisins\", \"1 c. nuts\"] [\"Mix Crisco with applesauce, nuts and raisins.\", \"Sift dry ingredients and add.\", \"Mix well.\", \"Put in a greased and floured loaf pan or tube pan.\", \"Bake in loaf pan at 350\\u00b0 to 375\\u00b0 for 45 to 60 minutes, layer pan at 375\\u00b0 for 20 minutes or tube pan at 325\\u00b0 for 1 hour.\"]", "sentences": ["Broccoli Dip For Crackers [\"16 oz. sour cream\", \"1 pkg. dry vegetable soup mix\", \"10 oz. pkg. frozen chopped broccoli, thawed and drained\", \"4 to 6 oz. Cheddar cheese, grated\"] [\"Mix together sour cream, soup mix, broccoli and half of cheese.\", \"Sprinkle remaining cheese on top.\", \"Bake at 350\\u00b0 for 30 minutes, uncovered.\", \"Serve hot with vegetable crackers.\"]", "Potato And Cheese Pie [\"3 eggs\", \"1 tsp. salt\", \"1/4 tsp. pepper\", \"2 c. half and half\", \"3 c. potatoes, shredded coarse\", \"1 c. Cheddar cheese, coarsely shredded\", \"1/3 c. green onions\"] [\"Beat eggs, salt and pepper until well blended.\", \"Stir in half and half, potatoes and onions.\", \"Pour into well-greased 8-inch baking dish.\", \"Bake in a 400\\u00b0 oven for 35 to 40 minutes, or until knife inserted in center comes out clean and potatoes are tender. Cool on rack 5 minutes; cut into squares.\", \"Makes 4 large servings.\"]", "Angel Biscuits [\"5 c. flour\", \"3 Tbsp. sugar\", \"4 tsp. baking powder\", \"1 1/2 pkg. dry yeast\", \"2 c. buttermilk\", \"1 tsp. soda\", \"1 1/2 sticks margarine\", \"1/2 c. warm water\"] [\"Mix flour, sugar, baking powder, soda and salt together.\", \"Cut in margarine, dissolve yeast in warm water.\", \"Stir into buttermilk and add to dry mixture.\", \"Cover and chill.\"]"]}, {"source_sentence": "Rhubarb Coffee Cake [\"1 1/2 c. sugar\", \"1/2 c. butter\", \"1 egg\", \"1 c. buttermilk\", \"2 c. flour\", \"1/2 tsp. salt\", \"1 tsp. soda\", \"1 c. buttermilk\", \"2 c. rhubarb, finely cut\", \"1 tsp. vanilla\"] [\"Cream sugar and butter.\", \"Add egg and beat well.\", \"To creamed butter, sugar and egg, add alternately buttermilk with mixture of flour, salt and soda.\", \"Mix well.\", \"Add rhubarb and vanilla.\", \"Pour into greased 9 x 13-inch pan and add Topping.\"]", "sentences": ["Prize-Winning Meat Loaf [\"1 1/2 lb. ground beef\", \"1 c. tomato juice\", \"3/4 c. oats (uncooked)\", \"1 egg, beaten\", \"1/4 c. chopped onion\", \"1/4 tsp. pepper\", \"1 1/2 tsp. salt\"] [\"Mix well.\", \"Press firmly into an 8 1/2 x 4 1/2 x 2 1/2-inch loaf pan.\", \"Bake in preheated moderate oven.\", \"Bake at 350\\u00b0 for 1 hour.\", \"Let stand 5 minutes before slicing.\", \"Makes 8 servings.\"]", "Angel Biscuits [\"5 c. flour\", \"3 Tbsp. sugar\", \"4 tsp. baking powder\", \"1 1/2 pkg. dry yeast\", \"2 c. buttermilk\", \"1 tsp. soda\", \"1 1/2 sticks margarine\", \"1/2 c. warm water\"] [\"Mix flour, sugar, baking powder, soda and salt together.\", \"Cut in margarine, dissolve yeast in warm water.\", \"Stir into buttermilk and add to dry mixture.\", \"Cover and chill.\"]", "Smothered Round Steak(Servings: 4) [\"2 lb. round steak\", \"1/2 tsp. ground black pepper\", \"1 tsp. ground white pepper\", \"1/2 c. vegetable oil\", \"2 bell peppers, chopped\", \"1 c. beef stock or water\", \"2 tsp. salt\", \"1 tsp. ground red pepper\", \"all-purpose flour (dredging)\", \"3 medium onions, chopped\", \"1 celery rib, chopped\"] [\"Alex Patout says, \\\"Smothering is a multipurpose Cajun technique that works wonders with everything from game to snap beans.\", \"It's similar to what the rest of the world knows as braising.\", \"The ingredients are briefly browned or sauteed, then cooked with a little liquid over a low heat for a long time.\\\"\"]"]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,248
fine-tuned/NFCorpus-32000-384-gpt-4o-2024-05-13-94858978
fine-tuned
feature-extraction
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "sentence-similarity", "mteb", "en", "dataset:fine-tuned/NFCorpus-32000-384-gpt-4o-2024-05-13-94858978", "dataset:allenai/c4", "license:apache-2.0", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-05-29T12:27:01Z
2024-05-29T12:27:34+00:00
7
0
--- datasets: - fine-tuned/NFCorpus-32000-384-gpt-4o-2024-05-13-94858978 - allenai/c4 language: - en - en license: apache-2.0 pipeline_tag: feature-extraction tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb --- This model is a fine-tuned version of [**BAAI/bge-large-en-v1.5**](https://huggingface.co/BAAI/bge-large-en-v1.5) designed for the following use case: None ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( 'fine-tuned/NFCorpus-32000-384-gpt-4o-2024-05-13-94858978', trust_remote_code=True ) embeddings = model.encode([ 'first text to embed', 'second text to embed' ]) print(cos_sim(embeddings[0], embeddings[1])) ```
null
Non_BioNLP
This model is a fine-tuned version of [**BAAI/bge-large-en-v1.5**](https://huggingface.co/BAAI/bge-large-en-v1.5) designed for the following use case: None ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( 'fine-tuned/NFCorpus-32000-384-gpt-4o-2024-05-13-94858978', trust_remote_code=True ) embeddings = model.encode([ 'first text to embed', 'second text to embed' ]) print(cos_sim(embeddings[0], embeddings[1])) ```
{"datasets": ["fine-tuned/NFCorpus-32000-384-gpt-4o-2024-05-13-94858978", "allenai/c4"], "language": ["en", "en"], "license": "apache-2.0", "pipeline_tag": "feature-extraction", "tags": ["sentence-transformers", "feature-extraction", "sentence-similarity", "mteb"]}
task
[ "TEXT_CLASSIFICATION" ]
40,249
Thang203/open-mar11Top10
Thang203
text-classification
[ "bertopic", "text-classification", "region:us" ]
2024-03-26T18:14:44Z
2024-03-26T18:14:46+00:00
4
0
--- library_name: bertopic pipeline_tag: text-classification tags: - bertopic --- # open-mar11Top10 This is a [BERTopic](https://github.com/MaartenGr/BERTopic) model. BERTopic is a flexible and modular topic modeling framework that allows for the generation of easily interpretable topics from large datasets. ## Usage To use this model, please install BERTopic: ``` pip install -U bertopic ``` You can use the model as follows: ```python from bertopic import BERTopic topic_model = BERTopic.load("Thang203/open-mar11Top10") topic_model.get_topic_info() ``` ## Topic overview * Number of topics: 10 * Number of training documents: 2109 <details> <summary>Click here for an overview of all topics.</summary> | Topic ID | Topic Keywords | Topic Frequency | Label | |----------|----------------|-----------------|-------| | -1 | models - language - model - language models - llms | 11 | -1_models_language_model_language models | | 0 | models - language - model - llms - language models | 597 | 0_models_language_model_llms | | 1 | models - quantization - language - training - model | 1001 | 1_models_quantization_language_training | | 2 | models - attacks - llms - language - text | 222 | 2_models_attacks_llms_language | | 3 | bias - biases - gender - models - language | 164 | 3_bias_biases_gender_models | | 4 | music - poetry - generation - poems - model | 45 | 4_music_poetry_generation_poems | | 5 | hallucinations - hallucination - models - large - language models | 23 | 5_hallucinations_hallucination_models_large | | 6 | claim - legal - claims - generation - based | 20 | 6_claim_legal_claims_generation | | 7 | financial - analysis - chinese - financial domain - language | 14 | 7_financial_analysis_chinese_financial domain | | 8 | summarization - summaries - abstractive - abstractive summarization - summary | 12 | 8_summarization_summaries_abstractive_abstractive summarization | </details> ## Training hyperparameters * calculate_probabilities: False * language: english * low_memory: False * min_topic_size: 10 * n_gram_range: (1, 1) * nr_topics: 10 * seed_topic_list: None * top_n_words: 10 * verbose: True * zeroshot_min_similarity: 0.7 * zeroshot_topic_list: None ## Framework versions * Numpy: 1.25.2 * HDBSCAN: 0.8.33 * UMAP: 0.5.5 * Pandas: 1.5.3 * Scikit-Learn: 1.2.2 * Sentence-transformers: 2.6.1 * Transformers: 4.38.2 * Numba: 0.58.1 * Plotly: 5.15.0 * Python: 3.10.12
null
Non_BioNLP
# open-mar11Top10 This is a [BERTopic](https://github.com/MaartenGr/BERTopic) model. BERTopic is a flexible and modular topic modeling framework that allows for the generation of easily interpretable topics from large datasets. ## Usage To use this model, please install BERTopic: ``` pip install -U bertopic ``` You can use the model as follows: ```python from bertopic import BERTopic topic_model = BERTopic.load("Thang203/open-mar11Top10") topic_model.get_topic_info() ``` ## Topic overview * Number of topics: 10 * Number of training documents: 2109 <details> <summary>Click here for an overview of all topics.</summary> | Topic ID | Topic Keywords | Topic Frequency | Label | |----------|----------------|-----------------|-------| | -1 | models - language - model - language models - llms | 11 | -1_models_language_model_language models | | 0 | models - language - model - llms - language models | 597 | 0_models_language_model_llms | | 1 | models - quantization - language - training - model | 1001 | 1_models_quantization_language_training | | 2 | models - attacks - llms - language - text | 222 | 2_models_attacks_llms_language | | 3 | bias - biases - gender - models - language | 164 | 3_bias_biases_gender_models | | 4 | music - poetry - generation - poems - model | 45 | 4_music_poetry_generation_poems | | 5 | hallucinations - hallucination - models - large - language models | 23 | 5_hallucinations_hallucination_models_large | | 6 | claim - legal - claims - generation - based | 20 | 6_claim_legal_claims_generation | | 7 | financial - analysis - chinese - financial domain - language | 14 | 7_financial_analysis_chinese_financial domain | | 8 | summarization - summaries - abstractive - abstractive summarization - summary | 12 | 8_summarization_summaries_abstractive_abstractive summarization | </details> ## Training hyperparameters * calculate_probabilities: False * language: english * low_memory: False * min_topic_size: 10 * n_gram_range: (1, 1) * nr_topics: 10 * seed_topic_list: None * top_n_words: 10 * verbose: True * zeroshot_min_similarity: 0.7 * zeroshot_topic_list: None ## Framework versions * Numpy: 1.25.2 * HDBSCAN: 0.8.33 * UMAP: 0.5.5 * Pandas: 1.5.3 * Scikit-Learn: 1.2.2 * Sentence-transformers: 2.6.1 * Transformers: 4.38.2 * Numba: 0.58.1 * Plotly: 5.15.0 * Python: 3.10.12
{"library_name": "bertopic", "pipeline_tag": "text-classification", "tags": ["bertopic"]}
task
[ "SUMMARIZATION" ]
40,250
farleyknight-org-username/arxiv-summarization-t5-small
farleyknight-org-username
text2text-generation
[ "transformers", "pytorch", "t5", "text2text-generation", "generated_from_trainer", "dataset:ccdv/arxiv-summarization", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-09-05T10:56:03Z
2022-09-06T09:00:07+00:00
26
1
--- datasets: - ccdv/arxiv-summarization license: apache-2.0 metrics: - rouge tags: - generated_from_trainer model-index: - name: arxiv-summarization-t5-small-2022-09-05 results: - task: type: summarization name: Summarization dataset: name: ccdv/arxiv-summarization type: ccdv/arxiv-summarization config: section split: train args: section metrics: - type: rouge value: 37.7982 name: Rouge1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # arxiv-summarization-t5-small-2022-09-05 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the ccdv/arxiv-summarization dataset. It achieves the following results on the evaluation set: - Loss: 2.2994 - Rouge1: 37.7982 - Rouge2: 12.5571 - Rougel: 24.9325 - Rougelsum: 32.9189 - Gen Len: 122.0061 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.5925 | 0.39 | 10000 | 2.4566 | 17.8432 | 6.6779 | 14.2303 | 16.1952 | 19.0 | | 2.518 | 0.79 | 20000 | 2.3868 | 18.0354 | 6.8565 | 14.3552 | 16.3664 | 19.0 | | 2.4587 | 1.18 | 30000 | 2.3600 | 18.2076 | 6.9618 | 14.5349 | 16.5626 | 19.0 | | 2.4365 | 1.58 | 40000 | 2.3295 | 18.3579 | 7.0312 | 14.6145 | 16.6845 | 19.0 | | 2.4306 | 1.97 | 50000 | 2.3190 | 18.4551 | 7.0861 | 14.6879 | 16.7627 | 19.0 | | 2.4005 | 2.37 | 60000 | 2.3056 | 18.3521 | 7.0496 | 14.6413 | 16.6832 | 19.0 | | 2.396 | 2.76 | 70000 | 2.3012 | 18.348 | 7.0439 | 14.6509 | 16.6994 | 19.0 | ### Framework versions - Transformers 4.22.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # arxiv-summarization-t5-small-2022-09-05 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the ccdv/arxiv-summarization dataset. It achieves the following results on the evaluation set: - Loss: 2.2994 - Rouge1: 37.7982 - Rouge2: 12.5571 - Rougel: 24.9325 - Rougelsum: 32.9189 - Gen Len: 122.0061 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.5925 | 0.39 | 10000 | 2.4566 | 17.8432 | 6.6779 | 14.2303 | 16.1952 | 19.0 | | 2.518 | 0.79 | 20000 | 2.3868 | 18.0354 | 6.8565 | 14.3552 | 16.3664 | 19.0 | | 2.4587 | 1.18 | 30000 | 2.3600 | 18.2076 | 6.9618 | 14.5349 | 16.5626 | 19.0 | | 2.4365 | 1.58 | 40000 | 2.3295 | 18.3579 | 7.0312 | 14.6145 | 16.6845 | 19.0 | | 2.4306 | 1.97 | 50000 | 2.3190 | 18.4551 | 7.0861 | 14.6879 | 16.7627 | 19.0 | | 2.4005 | 2.37 | 60000 | 2.3056 | 18.3521 | 7.0496 | 14.6413 | 16.6832 | 19.0 | | 2.396 | 2.76 | 70000 | 2.3012 | 18.348 | 7.0439 | 14.6509 | 16.6994 | 19.0 | ### Framework versions - Transformers 4.22.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
{"datasets": ["ccdv/arxiv-summarization"], "license": "apache-2.0", "metrics": ["rouge"], "tags": ["generated_from_trainer"], "model-index": [{"name": "arxiv-summarization-t5-small-2022-09-05", "results": [{"task": {"type": "summarization", "name": "Summarization"}, "dataset": {"name": "ccdv/arxiv-summarization", "type": "ccdv/arxiv-summarization", "config": "section", "split": "train", "args": "section"}, "metrics": [{"type": "rouge", "value": 37.7982, "name": "Rouge1"}]}]}]}
task
[ "SUMMARIZATION" ]
40,251
davidadamczyk/setfit-model-2
davidadamczyk
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:sentence-transformers/all-mpnet-base-v2", "base_model:finetune:sentence-transformers/all-mpnet-base-v2", "model-index", "region:us" ]
2024-10-13T13:38:32Z
2024-10-13T13:38:48+00:00
8
0
--- base_model: sentence-transformers/all-mpnet-base-v2 library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: 'There is, of course, much to digest. I hope that these rubes and those who incited them are locked up, along with the fake electors and their advisors, and those who conspired to convince elections officials to violate the law, and finally, those who have and continue to threaten true Americans just doing their constitution-based jobs. One thing jumps out. Judge McFadden, who seems willing to demand that the government prove its case beyond a reasonable doubt, also seems to be willing to sentence convicted lawbreakers to serious time. That he acquitted the guy who claimed the police let him gives me confidence that these are not sham trials.The thing that I haven’t heard much about are the firings, trials, convictions, and sentences of those LEOs who aided and abetted the traitors. That would include the cops who let Mr. Martin enter the Capitol, and those on Trump’s secret service detail who may have been aiding Trump’s efforts to foment a riot. ' - text: 'Both Vladimir Putin and Yevgeny Prigozhin are international war criminals.Both also undermined US elections in favor of Trump.<a href="https://www.reuters.com/world/us/russias-prigozhin-admits-interfering-us-elections-2022-11-07" target="_blank">https://www.reuters.com/world/us/russias-prigozhin-admits-interfering-us-elections-2022-11-07</a>/ ' - text: 'Aaron 100 percent. citizens united was a huge win for Russian citizen Vlad and Chinese citizen Xi. ' - text: 'George Corsetti “Russia did NOT interfere in the 2016 election.”Sorry George, this is not true. Read the Russia report, it details more than a dozen felonies committed by TFG and his family and Campaign personnel during the 2015/16 Campaign along with evidence of Russian hackers and agents directly interfering in the 2016 election. ' - text: 'Ms.Renkl does a nice job here, yet only hints at the decimation to public schools, libraries, governance, and healthcare by Bill Lee and the Red Legislators .Tennessee has a $50 B per year budget, $25B 0f this comes from federal government. It is a wealthy state ranking in the top 16 economically and 3rd in fiscal stability ( USNews).The stability comes from the egregious, wrongheaded use of federal monies earmarked for public schools and healthcare,Governor controls all Federal school and healthcare dollars rather than decimating to citizens. The US tax payer is subsidizing this state as the Governor and legislators deny ACA low cost insurance to WORKING poor and the Governor used for unrelated purposes. . Federal public school monies are used to subsidize private schools and Lee’s pet project:private DeVos/Hillsdale religious charter schools. US tax payers should be made aware of the mishandling of our tax dollars in support of the ultra conservative regime. ' inference: true model-index: - name: SetFit with sentence-transformers/all-mpnet-base-v2 results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.8 name: Accuracy --- # SetFit with sentence-transformers/all-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 384 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | yes | <ul><li>'Ken The FBI and DOJ should open an investigation into Russian interference in the 2022 election.\n'</li><li>"But you still haven't mentioned the crucial upcoming elections in Czechia, which cold alter the balance in Eastern/Central Europe.\n"</li><li>'factsonly She won the 2022 election. She beat at least one Dem primary opponent and beat her Republican opponent by a decent margin in the general election.\n'</li></ul> | | no | <ul><li>"Sean Who needs a source when you have Trump's well documented relationship with Putin?\n"</li><li>'After a years-long crime spree by Donald Trump, his children, and his accomplices, we\'re still waiting for indictments. Why? Why is this so hard? The man who said, "Russia, if you\'re listening..." has openly and loudly ignored the law, the constitution, precedent, tradition, common decency and common sense for years, and yet we\'re still waiting for some part of his manifold misdeeds to land him in the docket. Again, why? Why?! There is so much evidence against him, it is impossible to see why he hasn\'t been arrested and charged for sedition, insurrection, money laundering, violating the Espionage Act, the Presidential Records Act, payoffs to hide his adulterous affairs, and other crimes up to and including attempting to mastermind a coup. There is no Witch Hunt. There\'s a just an inexplicably as-yet unindicted multiple felon who continues to grift dollars out of his hoodwinked followers.I am beginning to wonder if the DOJ has forgotten what upholding the law means, or if it is just the person who runs the DOJ.Donald Trump is not the only person to have questions that need to be answered: so does Merrick Garland -- and foremost amongst them is, \'What\'s the hold up?\'\n'</li><li>"Most writers just imitate what they've read. They repeat formulas and replicate familiar sentence structures. Most TV could be written by ChatGPT. So it seems like ChatGPT writes pretty much like 90 percent of writers in a creative writing class. And 90 percent of readers don't want writing that pushes creative limits—look at the success of Colleen Hoover. I'd don't see why something like ChatGPT couldn't write her books. I don't mean that to be insulting—I do doubt an AI book would touch hearts as hers apparently do because it would lack her ineffable humanity. But even if an AI novel became a popular success, it wouldn't mean that AI had bested Nabokov or Woolf or DFW or … well, it's a very large list, and I'm not even claiming these as anything more than the first three whose names came to mind.(And in answer to Elon, sure, if I had to choose, I guess I'd rather live under the rule of Marcus Aurelius than Caligula's. But in fact I wouldn't get a vote on that, and I'd rather not live under an emperor at all.)\n"</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.8 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("davidadamczyk/setfit-model-2") # Run inference preds = model("Aaron 100 percent. citizens united was a huge win for Russian citizen Vlad and Chinese citizen Xi. ") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 6 | 80.325 | 276 | | Label | Training Sample Count | |:------|:----------------------| | no | 18 | | yes | 22 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 120 - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0017 | 1 | 0.4496 | - | | 0.0833 | 50 | 0.1797 | - | | 0.1667 | 100 | 0.0034 | - | | 0.25 | 150 | 0.0003 | - | | 0.3333 | 200 | 0.0002 | - | | 0.4167 | 250 | 0.0002 | - | | 0.5 | 300 | 0.0001 | - | | 0.5833 | 350 | 0.0001 | - | | 0.6667 | 400 | 0.0001 | - | | 0.75 | 450 | 0.0001 | - | | 0.8333 | 500 | 0.0001 | - | | 0.9167 | 550 | 0.0001 | - | | 1.0 | 600 | 0.0001 | - | ### Framework Versions - Python: 3.10.13 - SetFit: 1.1.0 - Sentence Transformers: 3.0.1 - Transformers: 4.45.2 - PyTorch: 2.4.0+cu124 - Datasets: 2.21.0 - Tokenizers: 0.20.0 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with sentence-transformers/all-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 384 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | yes | <ul><li>'Ken The FBI and DOJ should open an investigation into Russian interference in the 2022 election.\n'</li><li>"But you still haven't mentioned the crucial upcoming elections in Czechia, which cold alter the balance in Eastern/Central Europe.\n"</li><li>'factsonly She won the 2022 election. She beat at least one Dem primary opponent and beat her Republican opponent by a decent margin in the general election.\n'</li></ul> | | no | <ul><li>"Sean Who needs a source when you have Trump's well documented relationship with Putin?\n"</li><li>'After a years-long crime spree by Donald Trump, his children, and his accomplices, we\'re still waiting for indictments. Why? Why is this so hard? The man who said, "Russia, if you\'re listening..." has openly and loudly ignored the law, the constitution, precedent, tradition, common decency and common sense for years, and yet we\'re still waiting for some part of his manifold misdeeds to land him in the docket. Again, why? Why?! There is so much evidence against him, it is impossible to see why he hasn\'t been arrested and charged for sedition, insurrection, money laundering, violating the Espionage Act, the Presidential Records Act, payoffs to hide his adulterous affairs, and other crimes up to and including attempting to mastermind a coup. There is no Witch Hunt. There\'s a just an inexplicably as-yet unindicted multiple felon who continues to grift dollars out of his hoodwinked followers.I am beginning to wonder if the DOJ has forgotten what upholding the law means, or if it is just the person who runs the DOJ.Donald Trump is not the only person to have questions that need to be answered: so does Merrick Garland -- and foremost amongst them is, \'What\'s the hold up?\'\n'</li><li>"Most writers just imitate what they've read. They repeat formulas and replicate familiar sentence structures. Most TV could be written by ChatGPT. So it seems like ChatGPT writes pretty much like 90 percent of writers in a creative writing class. And 90 percent of readers don't want writing that pushes creative limits—look at the success of Colleen Hoover. I'd don't see why something like ChatGPT couldn't write her books. I don't mean that to be insulting—I do doubt an AI book would touch hearts as hers apparently do because it would lack her ineffable humanity. But even if an AI novel became a popular success, it wouldn't mean that AI had bested Nabokov or Woolf or DFW or … well, it's a very large list, and I'm not even claiming these as anything more than the first three whose names came to mind.(And in answer to Elon, sure, if I had to choose, I guess I'd rather live under the rule of Marcus Aurelius than Caligula's. But in fact I wouldn't get a vote on that, and I'd rather not live under an emperor at all.)\n"</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.8 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("davidadamczyk/setfit-model-2") # Run inference preds = model("Aaron 100 percent. citizens united was a huge win for Russian citizen Vlad and Chinese citizen Xi. ") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 6 | 80.325 | 276 | | Label | Training Sample Count | |:------|:----------------------| | no | 18 | | yes | 22 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 120 - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0017 | 1 | 0.4496 | - | | 0.0833 | 50 | 0.1797 | - | | 0.1667 | 100 | 0.0034 | - | | 0.25 | 150 | 0.0003 | - | | 0.3333 | 200 | 0.0002 | - | | 0.4167 | 250 | 0.0002 | - | | 0.5 | 300 | 0.0001 | - | | 0.5833 | 350 | 0.0001 | - | | 0.6667 | 400 | 0.0001 | - | | 0.75 | 450 | 0.0001 | - | | 0.8333 | 500 | 0.0001 | - | | 0.9167 | 550 | 0.0001 | - | | 1.0 | 600 | 0.0001 | - | ### Framework Versions - Python: 3.10.13 - SetFit: 1.1.0 - Sentence Transformers: 3.0.1 - Transformers: 4.45.2 - PyTorch: 2.4.0+cu124 - Datasets: 2.21.0 - Tokenizers: 0.20.0 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/all-mpnet-base-v2", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "There is, of course, much to digest. I hope that these rubes and those who incited them are locked up, along with the fake electors and their advisors, and those who conspired to convince elections officials to violate the law, and finally, those who have and continue to threaten true Americans just doing their constitution-based jobs. One thing jumps out. Judge McFadden, who seems willing to demand that the government prove its case beyond a reasonable doubt, also seems to be willing to sentence convicted lawbreakers to serious time. That he acquitted the guy who claimed the police let him gives me confidence that these are not sham trials.The thing that I haven’t heard much about are the firings, trials, convictions, and sentences of those LEOs who aided and abetted the traitors. That would include the cops who let Mr. Martin enter the Capitol, and those on Trump’s secret service detail who may have been aiding Trump’s efforts to foment a riot.\n"}, {"text": "Both Vladimir Putin and Yevgeny Prigozhin are international war criminals.Both also undermined US elections in favor of Trump.<a href=\"https://www.reuters.com/world/us/russias-prigozhin-admits-interfering-us-elections-2022-11-07\" target=\"_blank\">https://www.reuters.com/world/us/russias-prigozhin-admits-interfering-us-elections-2022-11-07</a>/\n"}, {"text": "Aaron 100 percent. citizens united was a huge win for Russian citizen Vlad and Chinese citizen Xi.\n"}, {"text": "George Corsetti “Russia did NOT interfere in the 2016 election.”Sorry George, this is not true. Read the Russia report, it details more than a dozen felonies committed by TFG and his family and Campaign personnel during the 2015/16 Campaign along with evidence of Russian hackers and agents directly interfering in the 2016 election.\n"}, {"text": "Ms.Renkl does a nice job here, yet only hints at the decimation to public schools, libraries, governance, and healthcare by Bill Lee and the Red Legislators .Tennessee has a $50 B per year budget, $25B 0f this comes from federal government. It is a wealthy state ranking in the top 16 economically and 3rd in fiscal stability ( USNews).The stability comes from the egregious, wrongheaded use of federal monies earmarked for public schools and healthcare,Governor controls all Federal school and healthcare dollars rather than decimating to citizens. The US tax payer is subsidizing this state as the Governor and legislators deny ACA low cost insurance to WORKING poor and the Governor used for unrelated purposes. . Federal public school monies are used to subsidize private schools and Lee’s pet project:private DeVos/Hillsdale religious charter schools. US tax payers should be made aware of the mishandling of our tax dollars in support of the ultra conservative regime.\n"}], "inference": true, "model-index": [{"name": "SetFit with sentence-transformers/all-mpnet-base-v2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.8, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,252
hopkins/mbart-finetuned-eng-ind-31
hopkins
translation
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-03T00:17:51Z
2023-07-03T00:31:21+00:00
11
0
--- metrics: - bleu tags: - translation - generated_from_trainer model-index: - name: mbart-finetuned-eng-ind-31 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-finetuned-eng-ind-31 This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.7681 - Bleu: 21.8896 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-finetuned-eng-ind-31 This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.7681 - Bleu: 21.8896 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
{"metrics": ["bleu"], "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "mbart-finetuned-eng-ind-31", "results": []}]}
task
[ "TRANSLATION" ]
40,253
gokuls/hBERTv1_new_pretrain_w_init_48_mnli
gokuls
text-classification
[ "transformers", "pytorch", "tensorboard", "hybridbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-06T13:43:23Z
2023-06-07T00:03:36+00:00
8
0
--- datasets: - glue language: - en metrics: - accuracy tags: - generated_from_trainer model-index: - name: hBERTv1_new_pretrain_w_init_48_mnli results: - task: type: text-classification name: Text Classification dataset: name: GLUE MNLI type: glue config: mnli split: validation_matched args: mnli metrics: - type: accuracy value: 0.5911310008136696 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_new_pretrain_w_init_48_mnli This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.8954 - Accuracy: 0.5911 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 1.031 | 1.0 | 3068 | 0.9883 | 0.4718 | | 0.9713 | 2.0 | 6136 | 0.9752 | 0.5146 | | 0.9588 | 3.0 | 9204 | 0.9702 | 0.5167 | | 0.953 | 4.0 | 12272 | 0.9590 | 0.5355 | | 0.9323 | 5.0 | 15340 | 0.9371 | 0.5416 | | 0.9068 | 6.0 | 18408 | 0.9213 | 0.5590 | | 0.8891 | 7.0 | 21476 | 0.9168 | 0.5608 | | 0.8741 | 8.0 | 24544 | 0.9472 | 0.5453 | | 0.8589 | 9.0 | 27612 | 0.9185 | 0.5793 | | 0.8432 | 10.0 | 30680 | 0.9134 | 0.5747 | | 0.8289 | 11.0 | 33748 | 0.9139 | 0.5753 | | 0.8137 | 12.0 | 36816 | 0.9113 | 0.5767 | | 0.7988 | 13.0 | 39884 | 0.8925 | 0.5917 | | 0.7828 | 14.0 | 42952 | 0.9037 | 0.5859 | | 0.7705 | 15.0 | 46020 | 0.9129 | 0.5866 | | 0.7576 | 16.0 | 49088 | 0.9237 | 0.5879 | | 0.7463 | 17.0 | 52156 | 0.9212 | 0.5897 | | 0.7341 | 18.0 | 55224 | 0.9226 | 0.5918 | ### Framework versions - Transformers 4.29.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_new_pretrain_w_init_48_mnli This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_wt_init_48) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.8954 - Accuracy: 0.5911 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 1.031 | 1.0 | 3068 | 0.9883 | 0.4718 | | 0.9713 | 2.0 | 6136 | 0.9752 | 0.5146 | | 0.9588 | 3.0 | 9204 | 0.9702 | 0.5167 | | 0.953 | 4.0 | 12272 | 0.9590 | 0.5355 | | 0.9323 | 5.0 | 15340 | 0.9371 | 0.5416 | | 0.9068 | 6.0 | 18408 | 0.9213 | 0.5590 | | 0.8891 | 7.0 | 21476 | 0.9168 | 0.5608 | | 0.8741 | 8.0 | 24544 | 0.9472 | 0.5453 | | 0.8589 | 9.0 | 27612 | 0.9185 | 0.5793 | | 0.8432 | 10.0 | 30680 | 0.9134 | 0.5747 | | 0.8289 | 11.0 | 33748 | 0.9139 | 0.5753 | | 0.8137 | 12.0 | 36816 | 0.9113 | 0.5767 | | 0.7988 | 13.0 | 39884 | 0.8925 | 0.5917 | | 0.7828 | 14.0 | 42952 | 0.9037 | 0.5859 | | 0.7705 | 15.0 | 46020 | 0.9129 | 0.5866 | | 0.7576 | 16.0 | 49088 | 0.9237 | 0.5879 | | 0.7463 | 17.0 | 52156 | 0.9212 | 0.5897 | | 0.7341 | 18.0 | 55224 | 0.9226 | 0.5918 | ### Framework versions - Transformers 4.29.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
{"datasets": ["glue"], "language": ["en"], "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "hBERTv1_new_pretrain_w_init_48_mnli", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE MNLI", "type": "glue", "config": "mnli", "split": "validation_matched", "args": "mnli"}, "metrics": [{"type": "accuracy", "value": 0.5911310008136696, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,254
gaudi/opus-mt-ca-fr-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-17T15:20:48Z
2024-10-18T23:25:50+00:00
6
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-ca-fr) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-ca-fr).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-ca-fr --output_dir ./ctranslate2/opus-mt-ca-fr-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-ca-fr-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-ca-fr-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-ca-fr-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-ca-fr) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-ca-fr) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-ca-fr).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-ca-fr --output_dir ./ctranslate2/opus-mt-ca-fr-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-ca-fr-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-ca-fr-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-ca-fr-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-ca-fr) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
40,255
google/madlad400-3b-mt
google
translation
[ "transformers", "safetensors", "gguf", "t5", "text2text-generation", "text-generation-inference", "translation", "multilingual", "en", "ru", "es", "fr", "de", "it", "pt", "pl", "nl", "vi", "tr", "sv", "id", "ro", "cs", "zh", "hu", "ja", "th", "fi", "fa", "uk", "da", "el", "no", "bg", "sk", "ko", "ar", "lt", "ca", "sl", "he", "et", "lv", "hi", "sq", "ms", "az", "sr", "ta", "hr", "kk", "is", "ml", "mr", "te", "af", "gl", "fil", "be", "mk", "eu", "bn", "ka", "mn", "bs", "uz", "ur", "sw", "yue", "ne", "kn", "kaa", "gu", "si", "cy", "eo", "la", "hy", "ky", "tg", "ga", "mt", "my", "km", "tt", "so", "ku", "ps", "pa", "rw", "lo", "ha", "dv", "fy", "lb", "ckb", "mg", "gd", "am", "ug", "ht", "grc", "hmn", "sd", "jv", "mi", "tk", "ceb", "yi", "ba", "fo", "or", "xh", "su", "kl", "ny", "sm", "sn", "co", "zu", "ig", "yo", "pap", "st", "haw", "as", "oc", "cv", "lus", "tet", "gsw", "sah", "br", "rm", "sa", "bo", "om", "se", "ce", "cnh", "ilo", "hil", "udm", "os", "lg", "ti", "vec", "ts", "tyv", "kbd", "ee", "iba", "av", "kha", "to", "tn", "nso", "fj", "zza", "ak", "ada", "otq", "dz", "bua", "cfm", "ln", "chm", "gn", "krc", "wa", "hif", "yua", "srn", "war", "rom", "bik", "pam", "sg", "lu", "ady", "kbp", "syr", "ltg", "myv", "iso", "kac", "bho", "ay", "kum", "qu", "za", "pag", "ngu", "ve", "pck", "zap", "tyz", "hui", "bbc", "tzo", "tiv", "ksd", "gom", "min", "ang", "nhe", "bgp", "nzi", "nnb", "nv", "zxx", "bci", "kv", "new", "mps", "alt", "meu", "bew", "fon", "iu", "abt", "mgh", "mnw", "tvl", "dov", "tlh", "ho", "kw", "mrj", "meo", "crh", "mbt", "emp", "ace", "ium", "mam", "gym", "mai", "crs", "pon", "ubu", "fip", "quc", "gv", "kj", "btx", "ape", "chk", "rcf", "shn", "tzh", "mdf", "ppk", "ss", "gag", "cab", "kri", "seh", "ibb", "tbz", "bru", "enq", "ach", "cuk", "kmb", "wo", "kek", "qub", "tab", "bts", "kos", "rwo", "cak", "tuc", "bum", "cjk", "gil", "stq", "tsg", "quh", "mak", "arn", "ban", "jiv", "sja", "yap", "tcy", "toj", "twu", "xal", "amu", "rmc", "hus", "nia", "kjh", "bm", "guh", "mas", "acf", "dtp", "ksw", "bzj", "din", "zne", "mad", "msi", "mag", "mkn", "kg", "lhu", "ch", "qvi", "mh", "djk", "sus", "mfe", "srm", "dyu", "ctu", "gui", "pau", "inb", "bi", "mni", "guc", "jam", "wal", "jac", "bas", "gor", "skr", "nyu", "noa", "sda", "gub", "nog", "cni", "teo", "tdx", "sxn", "rki", "nr", "frp", "alz", "taj", "lrc", "cce", "rn", "jvn", "hvn", "nij", "dwr", "izz", "msm", "bus", "ktu", "chr", "maz", "tzj", "suz", "knj", "bim", "gvl", "bqc", "tca", "pis", "prk", "laj", "mel", "qxr", "niq", "ahk", "shp", "hne", "spp", "koi", "krj", "quf", "luz", "agr", "tsc", "mqy", "gof", "gbm", "miq", "dje", "awa", "bjj", "qvz", "sjp", "tll", "raj", "kjg", "bgz", "quy", "cbk", "akb", "oj", "ify", "mey", "ks", "cac", "brx", "qup", "syl", "jax", "ff", "ber", "tks", "trp", "mrw", "adh", "smt", "srr", "ffm", "qvc", "mtr", "ann", "aa", "noe", "nut", "gyn", "kwi", "xmm", "msb", "dataset:allenai/MADLAD-400", "arxiv:2309.04662", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-27T15:58:33Z
2023-11-27T15:58:35+00:00
13,810
116
--- datasets: - allenai/MADLAD-400 language: - multilingual - en - ru - es - fr - de - it - pt - pl - nl - vi - tr - sv - id - ro - cs - zh - hu - ja - th - fi - fa - uk - da - el - 'no' - bg - sk - ko - ar - lt - ca - sl - he - et - lv - hi - sq - ms - az - sr - ta - hr - kk - is - ml - mr - te - af - gl - fil - be - mk - eu - bn - ka - mn - bs - uz - ur - sw - yue - ne - kn - kaa - gu - si - cy - eo - la - hy - ky - tg - ga - mt - my - km - tt - so - ku - ps - pa - rw - lo - ha - dv - fy - lb - ckb - mg - gd - am - ug - ht - grc - hmn - sd - jv - mi - tk - ceb - yi - ba - fo - or - xh - su - kl - ny - sm - sn - co - zu - ig - yo - pap - st - haw - as - oc - cv - lus - tet - gsw - sah - br - rm - sa - bo - om - se - ce - cnh - ilo - hil - udm - os - lg - ti - vec - ts - tyv - kbd - ee - iba - av - kha - to - tn - nso - fj - zza - ak - ada - otq - dz - bua - cfm - ln - chm - gn - krc - wa - hif - yua - srn - war - rom - bik - pam - sg - lu - ady - kbp - syr - ltg - myv - iso - kac - bho - ay - kum - qu - za - pag - ngu - ve - pck - zap - tyz - hui - bbc - tzo - tiv - ksd - gom - min - ang - nhe - bgp - nzi - nnb - nv - zxx - bci - kv - new - mps - alt - meu - bew - fon - iu - abt - mgh - mnw - tvl - dov - tlh - ho - kw - mrj - meo - crh - mbt - emp - ace - ium - mam - gym - mai - crs - pon - ubu - fip - quc - gv - kj - btx - ape - chk - rcf - shn - tzh - mdf - ppk - ss - gag - cab - kri - seh - ibb - tbz - bru - enq - ach - cuk - kmb - wo - kek - qub - tab - bts - kos - rwo - cak - tuc - bum - cjk - gil - stq - tsg - quh - mak - arn - ban - jiv - sja - yap - tcy - toj - twu - xal - amu - rmc - hus - nia - kjh - bm - guh - mas - acf - dtp - ksw - bzj - din - zne - mad - msi - mag - mkn - kg - lhu - ch - qvi - mh - djk - sus - mfe - srm - dyu - ctu - gui - pau - inb - bi - mni - guc - jam - wal - jac - bas - gor - skr - nyu - noa - sda - gub - nog - cni - teo - tdx - sxn - rki - nr - frp - alz - taj - lrc - cce - rn - jvn - hvn - nij - dwr - izz - msm - bus - ktu - chr - maz - tzj - suz - knj - bim - gvl - bqc - tca - pis - prk - laj - mel - qxr - niq - ahk - shp - hne - spp - koi - krj - quf - luz - agr - tsc - mqy - gof - gbm - miq - dje - awa - bjj - qvz - sjp - tll - raj - kjg - bgz - quy - cbk - akb - oj - ify - mey - ks - cac - brx - qup - syl - jax - ff - ber - tks - trp - mrw - adh - smt - srr - ffm - qvc - mtr - ann - kaa - aa - noe - nut - gyn - kwi - xmm - msb library_name: transformers license: apache-2.0 pipeline_tag: translation tags: - text2text-generation - text-generation-inference widget: - text: <2en> Como vai, amigo? example_title: Translation to English - text: <2de> Do you speak German? example_title: Translation to German --- # Model Card for MADLAD-400-3B-MT # Table of Contents 0. [TL;DR](#TL;DR) 1. [Model Details](#model-details) 2. [Usage](#usage) 3. [Uses](#uses) 4. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 5. [Training Details](#training-details) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Citation](#citation) # TL;DR MADLAD-400-3B-MT is a multilingual machine translation model based on the T5 architecture that was trained on 1 trillion tokens covering over 450 languages using publicly available data. It is competitive with models that are significantly larger. **Disclaimer**: [Juarez Bochi](https://huggingface.co/jbochi), who was not involved in this research, converted the original weights and wrote the contents of this model card based on the original paper and Flan-T5. # Model Details ## Model Description - **Model type:** Language model - **Language(s) (NLP):** Multilingual (400+ languages) - **License:** Apache 2.0 - **Related Models:** [All MADLAD-400 Checkpoints](https://huggingface.co/models?search=madlad) - **Original Checkpoints:** [All Original MADLAD-400 Checkpoints](https://github.com/google-research/google-research/tree/master/madlad_400) - **Resources for more information:** - [Research paper](https://arxiv.org/abs/2309.04662) - [GitHub Repo](https://github.com/google-research/t5x) - [Hugging Face MADLAD-400 Docs (Similar to T5) ](https://huggingface.co/docs/transformers/model_doc/MADLAD-400) - [Pending PR](https://github.com/huggingface/transformers/pull/27471) # Usage Find below some example scripts on how to use the model: ## Using the Pytorch model with `transformers` ### Running the model on a CPU or GPU <details> <summary> Click to expand </summary> First, install the Python packages that are required: `pip install transformers accelerate sentencepiece` ```python from transformers import T5ForConditionalGeneration, T5Tokenizer model_name = 'jbochi/madlad400-3b-mt' model = T5ForConditionalGeneration.from_pretrained(model_name, device_map="auto") tokenizer = T5Tokenizer.from_pretrained(model_name) text = "<2pt> I love pizza!" input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device) outputs = model.generate(input_ids=input_ids) tokenizer.decode(outputs[0], skip_special_tokens=True) # Eu adoro pizza! ``` </details> ## Running the model with Candle <details> <summary> Click to expand </summary> Usage with [candle](https://github.com/huggingface/candle): ```bash $ cargo run --example t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" \ --prompt "<2de> How are you, my friend?" \ --decode --temperature 0 ``` We also provide a quantized model (1.65 GB vs the original 11.8 GB file): ``` cargo run --example quantized-t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" --weight-file "model-q4k.gguf" \ --prompt "<2de> How are you, my friend?" \ --temperature 0 ... Wie geht es dir, mein Freund? ``` </details> # Uses ## Direct Use and Downstream Use > Primary intended uses: Machine Translation and multilingual NLP tasks on over 400 languages. > Primary intended users: Research community. ## Out-of-Scope Use > These models are trained on general domain data and are therefore not meant to > work on domain-specific models out-of-the box. Moreover, these research models have not been assessed > for production usecases. # Bias, Risks, and Limitations > We note that we evaluate on only 204 of the languages supported by these models and on machine translation > and few-shot machine translation tasks. Users must consider use of this model carefully for their own > usecase. ## Ethical considerations and risks > We trained these models with MADLAD-400 and publicly available data to create baseline models that > support NLP for over 400 languages, with a focus on languages underrepresented in large-scale corpora. > Given that these models were trained with web-crawled datasets that may contain sensitive, offensive or > otherwise low-quality content despite extensive preprocessing, it is still possible that these issues to the > underlying training data may cause differences in model performance and toxic (or otherwise problematic) > output for certain domains. Moreover, large models are dual use technologies that have specific risks > associated with their use and development. We point the reader to surveys such as those written by > Weidinger et al. or Bommasani et al. for a more detailed discussion of these risks, and to Liebling > et al. for a thorough discussion of the risks of machine translation systems. ## Known Limitations More information needed ## Sensitive Use: More information needed # Training Details > We train models of various sizes: a 3B, 32-layer parameter model, > a 7.2B 48-layer parameter model and a 10.7B 32-layer parameter model. > We share all parameters of the model across language pairs, > and use a Sentence Piece Model with 256k tokens shared on both the encoder and decoder > side. Each input sentence has a <2xx> token prepended to the source sentence to indicate the target > language. See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. ## Training Data > For both the machine translation and language model, MADLAD-400 is used. For the machine translation > model, a combination of parallel datasources covering 157 languages is also used. Further details are > described in the [paper](https://arxiv.org/pdf/2309.04662.pdf). ## Training Procedure See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics > For evaluation, we used WMT, NTREX, Flores-200 and Gatones datasets as described in Section 4.3 in the [paper](https://arxiv.org/pdf/2309.04662.pdf). > The translation quality of this model varies based on language, as seen in the paper, and likely varies on > domain, though we have not assessed this. ## Results ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/EzsMD1AwCuFH0S0DeD-n8.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/CJ5zCUVy7vTU76Lc8NZcK.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/NK0S-yVeWuhKoidpLYh3m.png) See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. # Environmental Impact More information needed # Citation **BibTeX:** ```bibtex @misc{kudugunta2023madlad400, title={MADLAD-400: A Multilingual And Document-Level Large Audited Dataset}, author={Sneha Kudugunta and Isaac Caswell and Biao Zhang and Xavier Garcia and Christopher A. Choquette-Choo and Katherine Lee and Derrick Xin and Aditya Kusupati and Romi Stella and Ankur Bapna and Orhan Firat}, year={2023}, eprint={2309.04662}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
null
TBD
# Model Card for MADLAD-400-3B-MT # Table of Contents 0. [TL;DR](#TL;DR) 1. [Model Details](#model-details) 2. [Usage](#usage) 3. [Uses](#uses) 4. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 5. [Training Details](#training-details) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Citation](#citation) # TL;DR MADLAD-400-3B-MT is a multilingual machine translation model based on the T5 architecture that was trained on 1 trillion tokens covering over 450 languages using publicly available data. It is competitive with models that are significantly larger. **Disclaimer**: [Juarez Bochi](https://huggingface.co/jbochi), who was not involved in this research, converted the original weights and wrote the contents of this model card based on the original paper and Flan-T5. # Model Details ## Model Description - **Model type:** Language model - **Language(s) (NLP):** Multilingual (400+ languages) - **License:** Apache 2.0 - **Related Models:** [All MADLAD-400 Checkpoints](https://huggingface.co/models?search=madlad) - **Original Checkpoints:** [All Original MADLAD-400 Checkpoints](https://github.com/google-research/google-research/tree/master/madlad_400) - **Resources for more information:** - [Research paper](https://arxiv.org/abs/2309.04662) - [GitHub Repo](https://github.com/google-research/t5x) - [Hugging Face MADLAD-400 Docs (Similar to T5) ](https://huggingface.co/docs/transformers/model_doc/MADLAD-400) - [Pending PR](https://github.com/huggingface/transformers/pull/27471) # Usage Find below some example scripts on how to use the model: ## Using the Pytorch model with `transformers` ### Running the model on a CPU or GPU <details> <summary> Click to expand </summary> First, install the Python packages that are required: `pip install transformers accelerate sentencepiece` ```python from transformers import T5ForConditionalGeneration, T5Tokenizer model_name = 'jbochi/madlad400-3b-mt' model = T5ForConditionalGeneration.from_pretrained(model_name, device_map="auto") tokenizer = T5Tokenizer.from_pretrained(model_name) text = "<2pt> I love pizza!" input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device) outputs = model.generate(input_ids=input_ids) tokenizer.decode(outputs[0], skip_special_tokens=True) # Eu adoro pizza! ``` </details> ## Running the model with Candle <details> <summary> Click to expand </summary> Usage with [candle](https://github.com/huggingface/candle): ```bash $ cargo run --example t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" \ --prompt "<2de> How are you, my friend?" \ --decode --temperature 0 ``` We also provide a quantized model (1.65 GB vs the original 11.8 GB file): ``` cargo run --example quantized-t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" --weight-file "model-q4k.gguf" \ --prompt "<2de> How are you, my friend?" \ --temperature 0 ... Wie geht es dir, mein Freund? ``` </details> # Uses ## Direct Use and Downstream Use > Primary intended uses: Machine Translation and multilingual NLP tasks on over 400 languages. > Primary intended users: Research community. ## Out-of-Scope Use > These models are trained on general domain data and are therefore not meant to > work on domain-specific models out-of-the box. Moreover, these research models have not been assessed > for production usecases. # Bias, Risks, and Limitations > We note that we evaluate on only 204 of the languages supported by these models and on machine translation > and few-shot machine translation tasks. Users must consider use of this model carefully for their own > usecase. ## Ethical considerations and risks > We trained these models with MADLAD-400 and publicly available data to create baseline models that > support NLP for over 400 languages, with a focus on languages underrepresented in large-scale corpora. > Given that these models were trained with web-crawled datasets that may contain sensitive, offensive or > otherwise low-quality content despite extensive preprocessing, it is still possible that these issues to the > underlying training data may cause differences in model performance and toxic (or otherwise problematic) > output for certain domains. Moreover, large models are dual use technologies that have specific risks > associated with their use and development. We point the reader to surveys such as those written by > Weidinger et al. or Bommasani et al. for a more detailed discussion of these risks, and to Liebling > et al. for a thorough discussion of the risks of machine translation systems. ## Known Limitations More information needed ## Sensitive Use: More information needed # Training Details > We train models of various sizes: a 3B, 32-layer parameter model, > a 7.2B 48-layer parameter model and a 10.7B 32-layer parameter model. > We share all parameters of the model across language pairs, > and use a Sentence Piece Model with 256k tokens shared on both the encoder and decoder > side. Each input sentence has a <2xx> token prepended to the source sentence to indicate the target > language. See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. ## Training Data > For both the machine translation and language model, MADLAD-400 is used. For the machine translation > model, a combination of parallel datasources covering 157 languages is also used. Further details are > described in the [paper](https://arxiv.org/pdf/2309.04662.pdf). ## Training Procedure See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics > For evaluation, we used WMT, NTREX, Flores-200 and Gatones datasets as described in Section 4.3 in the [paper](https://arxiv.org/pdf/2309.04662.pdf). > The translation quality of this model varies based on language, as seen in the paper, and likely varies on > domain, though we have not assessed this. ## Results ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/EzsMD1AwCuFH0S0DeD-n8.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/CJ5zCUVy7vTU76Lc8NZcK.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b7f632037d6452a321fa15/NK0S-yVeWuhKoidpLYh3m.png) See the [research paper](https://arxiv.org/pdf/2309.04662.pdf) for further details. # Environmental Impact More information needed # Citation **BibTeX:** ```bibtex @misc{kudugunta2023madlad400, title={MADLAD-400: A Multilingual And Document-Level Large Audited Dataset}, author={Sneha Kudugunta and Isaac Caswell and Biao Zhang and Xavier Garcia and Christopher A. Choquette-Choo and Katherine Lee and Derrick Xin and Aditya Kusupati and Romi Stella and Ankur Bapna and Orhan Firat}, year={2023}, eprint={2309.04662}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
{"datasets": ["allenai/MADLAD-400"], "language": ["multilingual", "en", "ru", "es", "fr", "de", "it", "pt", "pl", "nl", "vi", "tr", "sv", "id", "ro", "cs", "zh", "hu", "ja", "th", "fi", "fa", "uk", "da", "el", "no", "bg", "sk", "ko", "ar", "lt", "ca", "sl", "he", "et", "lv", "hi", "sq", "ms", "az", "sr", "ta", "hr", "kk", "is", "ml", "mr", "te", "af", "gl", "fil", "be", "mk", "eu", "bn", "ka", "mn", "bs", "uz", "ur", "sw", "yue", "ne", "kn", "kaa", "gu", "si", "cy", "eo", "la", "hy", "ky", "tg", "ga", "mt", "my", "km", "tt", "so", "ku", "ps", "pa", "rw", "lo", "ha", "dv", "fy", "lb", "ckb", "mg", "gd", "am", "ug", "ht", "grc", "hmn", "sd", "jv", "mi", "tk", "ceb", "yi", "ba", "fo", "or", "xh", "su", "kl", "ny", "sm", "sn", "co", "zu", "ig", "yo", "pap", "st", "haw", "as", "oc", "cv", "lus", "tet", "gsw", "sah", "br", "rm", "sa", "bo", "om", "se", "ce", "cnh", "ilo", "hil", "udm", "os", "lg", "ti", "vec", "ts", "tyv", "kbd", "ee", "iba", "av", "kha", "to", "tn", "nso", "fj", "zza", "ak", "ada", "otq", "dz", "bua", "cfm", "ln", "chm", "gn", "krc", "wa", "hif", "yua", "srn", "war", "rom", "bik", "pam", "sg", "lu", "ady", "kbp", "syr", "ltg", "myv", "iso", "kac", "bho", "ay", "kum", "qu", "za", "pag", "ngu", "ve", "pck", "zap", "tyz", "hui", "bbc", "tzo", "tiv", "ksd", "gom", "min", "ang", "nhe", "bgp", "nzi", "nnb", "nv", "zxx", "bci", "kv", "new", "mps", "alt", "meu", "bew", "fon", "iu", "abt", "mgh", "mnw", "tvl", "dov", "tlh", "ho", "kw", "mrj", "meo", "crh", "mbt", "emp", "ace", "ium", "mam", "gym", "mai", "crs", "pon", "ubu", "fip", "quc", "gv", "kj", "btx", "ape", "chk", "rcf", "shn", "tzh", "mdf", "ppk", "ss", "gag", "cab", "kri", "seh", "ibb", "tbz", "bru", "enq", "ach", "cuk", "kmb", "wo", "kek", "qub", "tab", "bts", "kos", "rwo", "cak", "tuc", "bum", "cjk", "gil", "stq", "tsg", "quh", "mak", "arn", "ban", "jiv", "sja", "yap", "tcy", "toj", "twu", "xal", "amu", "rmc", "hus", "nia", "kjh", "bm", "guh", "mas", "acf", "dtp", "ksw", "bzj", "din", "zne", "mad", "msi", "mag", "mkn", "kg", "lhu", "ch", "qvi", "mh", "djk", "sus", "mfe", "srm", "dyu", "ctu", "gui", "pau", "inb", "bi", "mni", "guc", "jam", "wal", "jac", "bas", "gor", "skr", "nyu", "noa", "sda", "gub", "nog", "cni", "teo", "tdx", "sxn", "rki", "nr", "frp", "alz", "taj", "lrc", "cce", "rn", "jvn", "hvn", "nij", "dwr", "izz", "msm", "bus", "ktu", "chr", "maz", "tzj", "suz", "knj", "bim", "gvl", "bqc", "tca", "pis", "prk", "laj", "mel", "qxr", "niq", "ahk", "shp", "hne", "spp", "koi", "krj", "quf", "luz", "agr", "tsc", "mqy", "gof", "gbm", "miq", "dje", "awa", "bjj", "qvz", "sjp", "tll", "raj", "kjg", "bgz", "quy", "cbk", "akb", "oj", "ify", "mey", "ks", "cac", "brx", "qup", "syl", "jax", "ff", "ber", "tks", "trp", "mrw", "adh", "smt", "srr", "ffm", "qvc", "mtr", "ann", "kaa", "aa", "noe", "nut", "gyn", "kwi", "xmm", "msb"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "translation", "tags": ["text2text-generation", "text-generation-inference"], "widget": [{"text": "<2en> Como vai, amigo?", "example_title": "Translation to English"}, {"text": "<2de> Do you speak German?", "example_title": "Translation to German"}]}
task
[ "TRANSLATION" ]
40,256
LoneStriker/shisa-7b-v1-5.0bpw-h6-exl2
LoneStriker
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "ja", "en", "dataset:augmxnt/ultra-orca-boros-en-ja-v1", "dataset:Open-Orca/SlimOrca", "dataset:augmxnt/shisa-en-ja-dpo-v1", "arxiv:2310.05914", "arxiv:2305.18290", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-12-07T18:07:21Z
2023-12-07T18:54:27+00:00
6
0
--- datasets: - augmxnt/ultra-orca-boros-en-ja-v1 - Open-Orca/SlimOrca - augmxnt/shisa-en-ja-dpo-v1 language: - ja - en license: apache-2.0 --- # Shisa 7B ![Shi-chan and Sa-chan/シーちゃんとサーちゃん](https://huggingface.co/augmxnt/shisa-7b-v1/resolve/main/shisa.webp) **Shisa 7B** (`shisa-7b-v1`) is a bilingual Japanese and English (JA/EN) general-purpose chat model that aims to achieve strong Japanese language performance while retaining robust English capabilities, using a synthetic-data driven approach. This model is based on [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) with a custom JA-optimized extended tokenizer that is >2X more efficient in Japanese than Mistral's original tokenizer. The base model was pre-trained for an additional 8B primarily Japanese tokens. It was then subsequently fine-tuned with an expanded, machine-translated version of [airoboros-3.1](https://huggingface.co/datasets/jondurbin/airoboros-3.1), a set of the highest-scoring items from [ultrafeedback_binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized), and additional freshly generated [airoboros](https://github.com/jondurbin/airoboros) data directly to the target languages. We also release our base model, datasets, and pipeline code under a permissive Apache 2.0 license which can be used for any purpose, commercial or otherwise: * [shisa-base-7b-v1](https://huggingface.co/augmxnt/shisa-base-7b-v1) - our base model w/ an extended tokenizer and additional JA pre-training * [shisa-pretrain-en-ja-v1](https://huggingface.co/datasets/augmxnt/shisa-pretrain-en-ja-v1) - our pre-training data set * [ultra-orca-boros-en-ja](https://huggingface.co/datasets/augmxnt/ultra-orca-boros-en-ja-v1) - a synthetically generated, machine-translated, programmatically validated JA/EN fine-tuning dataset * [shisa-en-ja-dpo-v1](https://huggingface.co/datasets/augmxnt/shisa-en-ja-dpo-v1) - Small subset of DPO pairs from ultrafeedback, along with JA DPO pairs using GPT-4 generated items as the chosen value, and outputs from our preliminary 7b model as the rejected values * [Shisa repository](https://github.com/AUGMXNT/shisa) - this includes our translation, dataset generation, training, and evaluation code Moreover, we are in the process of publishing extended writeups and more details of our process, including ablation results, testing methodology, and key findings [on our project wiki](https://github.com/AUGMXNT/shisa/wiki) that may be of interest to fellow researchers. ## Fine-Tuning Our original intuition was to see if we could create a stronger Japanese model using the best [existing public JA training sets](https://github.com/AUGMXNT/shisa/wiki/A-Review-of-Public-Japanese-Training-Sets) and incorporating them. After initial review and testing, however, we decided that focusing solely on translation/generation of our own synthetic datasets could yield superior results with less training. We compared multiple translation tools and, via manual review, judged that while `gpt-4` almost always delivered the highest quality translations, Google's `text-bison-32k` was a good balance of quality, cost and throughput. Over various iterations, we refined our translation approach to include some additional algorithms for flagging and filtering invalid translations, re-translating and backfilling as necessary. We also took this project as an opportunity to apply some newer techniques such as incorporating [NEFTune](https://arxiv.org/abs/2310.05914) and [DPO](https://arxiv.org/abs/2305.18290) training. For our v1 release, we picked from our release candidates based on a significant amount of human preference testing (thousands of generations and multiple rounds of pairwise comparisons). We analyzed our results with both win/loss/draw and [BTL modeling](https://datascience.oneoffcoder.com/btl-model.html) (iLSR) using [choix](https://github.com/lucasmaystre/choix)). The best candidate model was fine-tuned in a 3-step process: 1. First, the model was fine-tuned on `ultra-orca-boros-en-ja` and SlimOrca ([WandB Log](https://wandb.ai/jondurbin/shisa-7b-v1/runs/k8pfog9d/overview)) 2. Next, we add one additional epoch at performed using only a subset of Japanese ultra-orca-boros-en-ja items to enhance JA performance (as SlimOrca from the first step is mostly EN) ([WandB Log](https://wandb.ai/jondurbin/shisa-mega-7b-v1.1/runs/dopsr0o7/overview)) 3. Finally, the model was tuned using a DPOTrainer on a small subset of ultrafeedback (EN) and our own JA DPO dataset which uses gpt-4 outputs as the chosen values and outputs from stage 1's prelim model as rejected values. ([WandDB Log](https://wandb.ai/jondurbin/shisa-mega-dpo-7b-v1.1) ) During our training process, we also gained some key insights on [why some existing Japanese models seem to underperform](https://github.com/AUGMXNT/shisa/wiki/A-Review-of-Public-Japanese-Training-Sets#analysis) even versus models that have no additional JA training, and we hope that sharing this analysis will be useful to other teams developing Japanese language models. While we need to explore this further, as an experimental validation, we applied a version of our fine-tuning set onto an existing base model ("Gamma 7B") and the initial JA MT-Bench results suggests that we can drastically increase functional performance with our tuning approach: | Model | Score | | ------------------------------ | ----- | | shisa-gamma-7b-allsources-v0.4 | 5.65 | | ja-stablelm-instruct-gamma-7b* | 4.01 | ## Performance Throughout our training, we did extensive human evaluation for each model to cross-validate our model performance, and we are currently conducting ongoing larger scale manual head-to-head testing between models. Our intention is open up and scale this data collection as we further develop our tools. For more information and updates, please see our [project wiki](https://github.com/AUGMXNT/shisa/wiki). While we believe [llm-jp-eval](https://github.com/llm-jp/llm-jp-eval) is a useful metric for our [base model](https://huggingface.co/augmxnt/shisa-base-7b-v1), and it was extremely useful during our tuning process for initial validations, as our fine-tune training includes a percentage of the benchmark train splits, we provide these llm-jp-eval results primarily as a point of interest: | AVR | MC | NLI | QA | RC | |-------|-------|-------|-------|-------| | 0.7480| 0.8900| 0.8040| 0.4153| 0.8825| *(We run a [slightly modified llm-jp-eval](https://github.com/llm-jp/llm-jp-eval/compare/main...AUGMXNT:llm-jp-eval:main) to support testing of Qwen and to emit a `bos_token` if available)* For our final model, since it's customary to include benchmarks, we've used Stability AI Japan's [Japanese MT-Bench](https://github.com/Stability-AI/FastChat) as a more representative test of our model's capabilities. For [our JA MT-Bench testing](https://github.com/Stability-AI/FastChat/compare/jp-stable...AUGMXNT:FastChat:jp-stable) we use a Japanese prompt ("あなたは役立つアシスタントです。") as well as `--num-choices 4` in an effort to reduce sampling variability, however we've still observed regular 0.5+ point (and sometimes even greater swings) between generations, as well as issues with default prompts and parameters when testing, so again, we'd urge caution in over-interpreting these scores and treating them as more of a probabilistic directional indicator, rather than a definitive score or ranking: | Benchmark | Score | | ----------- | ----- | | JA MT-Bench | 5.02 | | MT-Bench | 5.71 | There is an [MT-Bench Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard), but as JA MT-Bench is still under development, for convenience, here is a comparison of the JA MT-Bench scores of some other models (our scores were rated by `gpt-4-0613`): | Model | Score | | ------------------------------------------------- | ---- | | gpt-4-0613 | 9.40 | | gpt-4-1106-preview | 9.17 | | gpt-3.5-turbo* | 8.41 | | Qwen-14B-Chat | 7.47 | | **shisa-7b-v1** | **5.02** | | ELYZA-japanese-Llama-2-7b-fast-instruct* | 4.86 | | ja-stablelm-instruct-gamma-7b* | 4.01 | | japanese-stablelm-instruct-alpha-7b* | 2.74 | | Mistral-7B-OpenOrca-ja* | 2.23 | | youri-7b-chat* | 2.00 | | Mistral-7B-Instruct-v0.1* | 1.78 | | llm-jp-13b-instruct-full-jaster-dolly-oasst-v1.0* | 1.31 | *(Marked JA MT-Bench results in this section are [sourced from shi3z](https://note.com/shi3zblog/n/n6b2ac5874021))* ## Limitations Although our model demonstrates a reasonably high level of Japanese fluency, as a 7B parameter model, it is prone to higher hallucination rates and less effective instruction following and reasoning than larger-class models. Also, it still does not have complete mastery of the Japanese language and a native speaker will spot occasional mistakes like some non-idiomatic/awkward phrasing, improper tenses/speech levels, etc. We've also noticed a small amount of language leakage, likely largely attributable to our tokenizer expansion. These may be fixable with sampler settings like [Min P](https://www.reddit.com/r/LocalLLaMA/comments/17vonjo/your_settings_are_probably_hurting_your_model_why/)) or additional targeted training, and we plan on doing additional work on automated detection/sampler sweeps in the future. One interesting observation is, based on our data collection, we found that as we iterated, the DPO process significantly exacerbated this issue, but also that our DPO models still had significantly higher human preference rates, so there was a bit of a trade-off in our choice of final tune. While we believe that training larger models can improve performance using our existing approach and dataset, there are also many improvements we'd like to make for future models. We believe there is quite a bit of low hanging fruit for improving performance with even more training efficiency largely through improving the quality and construction of datasets. ## Usage Sample code: ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer model_name = "augmxnt/shisa-7b-v1" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16, device_map="auto" ) streamer = TextStreamer(tokenizer, skip_prompt=True) # The prompt template is included in the model's tokenizer_config.json so you shouldn't need this but we've included this for convenience # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n" # A more typical prompt: あなたは役に立つアシスタントです。("You are a helpful assistant.") # You are an avid Pokemon fanatic. prompt = "あなたは熱狂的なポケモンファンです。" chat = [{"role": "system", "content": prompt}] # Who is the most powerful Pokemon? Explain your choice. user_input = "最強のポケモンは誰ですか?その選択理由を説明してください。" chat.append({"role": "user", "content": user_input}) # Generate - add_generation_prompt to make sure it continues as assistant inputs = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_tensors="pt") # For multi-GPU, find the device of the first parameter of the model first_param_device = next(model.parameters()).device inputs = inputs.to(first_param_device) with torch.no_grad(): outputs = model.generate( inputs, pad_token_id=tokenizer.eos_token_id, max_new_tokens=1000, temperature=0.7, repetition_penalty=1.05, top_p=0.95, do_sample=True, streamer=streamer, ) # Add just the new tokens to our chat new_tokens = outputs[0, inputs.size(1):] response = tokenizer.decode(new_tokens, skip_special_tokens=True) chat.append({"role": "assistant", "content": response}) ``` ## Prompt format The prompt format is llama-2 chat: ``` [INST] <<SYS>> You are a helpful, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> You are a helful, unbiased, uncensored assistant. <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` This [prompt template](https://huggingface.co/docs/transformers/main/chat_templating) is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('augmxnt/shisa-7b-v1') chat = [ {"role": "system", "content": "You are Aiko, a friendly AI assistant."}, {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` **NOTE:** For proper responses, you should be using our `bos_token` (`<s>`) to begin a string. This is automatically generated by `tokenizer.encode()` but if you are crafting a custom template or using an encoding method that skips special tokens, you may have to add this yourself. ## Acknowledgements Team: [Leonard Lin](https://huggingface.co/leonardlin) and [Jon Durbin](https://huggingface.co/jondurbin), Mariko Sato, and Florian von Bock Compute for this model was generously sponsored by [AKA Virtual](https://akavirtual.com/) (Tokyo, Japan). Thanks to the [LLM-jp](https://llm-jp.nii.ac.jp/), [Stability AI Japan](https://ja.stability.ai/), and [LMSYS](https://lmsys.org/) teams for their work on llm-jp-eval, Japanese MT-Bench, MT-Bench. Also, thanks to all the volunteers that provided invaluable human preference testing! We are actively looking for additional compute as we train better and larger models for this project. Please drop us a line at: *compute at augmxnt dot com* --- *(GPT-4によって非常に軽微な編集を加えて翻訳されました)* # シーサー7B **シーサー7B**(`shisa-7b-v1`)は、合成データ駆動のアプローチを用いて、優れた日本語と英語能力を両立することを目指すバイリンガル(日本語/英語)汎用チャットモデルです。 このモデルは、[Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1)を基に、Mistralのオリジナルのトークナイザーよりも日本語において2倍以上効率的な、日本語最適化拡張トークナイザーをカスタムして作成されました。ベースモデルは、主に日本語のトークンを追加で80億ものトレーニングを行いました。そして、その後、[airoboros-3.1](https://huggingface.co/datasets/jondurbin/airoboros-3.1)の拡張された機械翻訳版、[ultrafeedback_binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)からの最高得点項目のセット、そして新たに生成された[airoboros](https://github.com/jondurbin/airoboros)のデータを直接目標言語で微調整しています。 商用を含むあらゆる目的で使用可能な寛容なApache 2.0ライセンスの下で、ベースモデル、データセット、およびパイプラインコードも公開しています: * [shisa-base-7b-v1](https://huggingface.co/augmxnt/shisa-base-7b-v1) - 拡張トークナイザーと追加の日本語プレトレーニングを備えた当方のベースモデル * [shisa-pretrain-en-ja-v1](https://huggingface.co/datasets/augmxnt/shisa-pretrain-en-ja-v1) - 当方のプレトレーニングデータセット * [ultra-orca-boros-en-ja](https://huggingface.co/datasets/jondurbin/ultra-orca-boros-en-ja) - 合成生成、機械翻訳、プログラムによる検証によるJA/EN微調整データセット * [shisa-en-ja-dpo-v1](https://huggingface.co/datasets/augmxnt/shisa-en-ja-dpo-v1) - ultrafeedbackからのDPOペアの小さなサブセットと、選択された値としてGPT-4生成項目を使用した日本語のDPOペア、そして初期の7ビリオンモデルの出力を却下した値 * [シーサーリポジトリ](https://github.com/AUGMXNT/shisa) - 翻訳、データセットの生成、トレーニング、評価コードなどが含まれています さらに、アブレーション結果、テスト方法論、主要な調査結果など、プロセスの詳細や拡張ライトアップを公開する過程にあります。これは[当プロジェクトwiki](https://github.com/AUGMXNT/shisa/wiki)で研究者に興味深い情報として提供されています。 ## 微調整 最初の直感は、最良の[既存の公開日本語トレーニングセット](https://github.com/AUGMXNT/shisa/wiki/A-Review-of-Public-Japanese-Training-Sets)を使用して、それらを組み入れることでより強力な日本語モデルを作成できるかどうかを見ることでした。しかし、初期の検討とテストの後、自らの合成データセットの翻訳/生成にだけ焦点を当てることで、短期間のトレーニングで優れた結果を得ることができると結論付けました。 私たちは複数の翻訳ツールを比較し、手動でレビューを行った結果、`gpt-4`がほぼ常に最高品質の翻訳を提供しながら、Googleの `text-bison-32k`は品質、コスト、スループットのバランスが良いと判断しました。複数の繰り返しを経て、無効な翻訳のフラグ付けとフィルタリング、必要に応じた再翻訳とバックフィルのための追加のアルゴリズムを含むように、翻訳アプローチを洗練させました。 また、このプロジェクトを[NEFTune](https://arxiv.org/abs/2310.05914)と[DPO](https://arxiv.org/abs/2305.18290)トレーニングを取り入れるなど、新しい技術を適用する機会ともなりました。 v1リリースのために、私たちは大量の人間の嗜好テスト(数千の生成と複数ラウンドのペアワイズ比較)に基づいてリリース候補から選択しました。私たちは、勝ち/負け/引き分けと、[BTLモデル](https://datascience.oneoffcoder.com/btl-model.html)(iLSR)を使用して[choix](https://github.com/lucasmaystre/choix)で結果を分析しました。 最良の候補モデルは、3ステップのプロセスで微調整されました: 1. 最初に、モデルは`ultra-orca-boros-en-ja`とSlimOrca ([WandB Log](https://wandb.ai/jondurbin/shisa-7b-v1/runs/k8pfog9d/overview))で微調整されました。 2. 次に、日本語のパフォーマンスを向上させるためにultra-orca-boros-en-jaの一部を使用して1回追加のエポックを追加しました(最初の段階のSlimOrcaは主に英語)([WandB Log](https://wandb.ai/jondurbin/shisa-mega-7b-v1.1/runs/dopsr0o7/overview))。 3. 最後に、モデルは小規模のultrafeedback(英語)と自身のJA DPOデータセットに対してDPOTrainerを使用して調整されました。ここで使用したJA DPOデータセットはgpt-4の出力を選出された値とし、ステージ1の予備モデルの出力を却下した値とします。([WandDB Log](https://wandb.ai/jondurbin/shisa-mega-dpo-7b-v1.1) ) 私たちのトレーニングプロセス中に、何故一部の既存の日本語モデルが、追加の日本語トレーニングがないモデルに対してもパフォーマンスが低いのか、といういくつかの重要な洞察を得ることができました。この分析結果を共有すれば、他のチームが日本語モデルを開発する際の参考になると思います。 さらに探求する必要はありますが、実験的な検証として、微調整セットのバージョンを既存のベースモデル("Gamma 7B")に適用し、初期のJA MT-Bench結果が示すように、私たちのチューニングアプローチで機能性のパフォーマンスを劇的に向上させることができました: | モデル | スコア | | ------------------------------ | ----- | | shisa-gamma-7b-allsources-v0.4 | 5.65 | | ja-stablelm-instruct-gamma-7b* | 4.01 | ## パフォーマンス トレーニング全体を通じて、各モデルについて人間による評価を行い、モデルのパフォーマンスを相互に検証しました。現在、モデル間の手動での比較テストを大規模に行っています。私たちの目指すところは、ツールをさらに発展させることでこのデータ収集を公開して拡張することです。詳細と更新情報については、[プロジェクトwiki](https://github.com/AUGMXNT/shisa/wiki) をご覧ください。 我々は、[llm-jp-eval](https://github.com/llm-jp/llm-jp-eval)は、私たちの[基本モデル](https://huggingface.co/augmxnt/shisa-base-7b-v1)の有用な指標であり、初期の検証のための微調整プロセス中に非常に役立つと考えていますが、微調整トレーニングにはベンチマークのトレイン分割の一部が含まれているため、私たちが提供するllm-jp-evalの結果は主に興味深いポイントとして提供しています: | AVR | MC | NLI | QA | RC | |-------|-------|-------|-------|-------| | 0.7480| 0.8900| 0.8040| 0.4153| 0.8825| *(Qwenのテストをサポートし、可能であれば`bos_token`を発行するために、[わずかに修正したllm-jp-eval](https://github.com/llm-jp/llm-jp-eval/compare/main...AUGMXNT:llm-jp-eval:main) を実行しています)* 最終モデルについては、ベンチマークを含めるのが一般的なため、私たちのモデルの能力をより代表的にテストするために、Stability AI Japanの[Japanese MT-Bench](https://github.com/Stability-AI/FastChat)を使用しました。[私たちのJA MT-Bench テスト](https://github.com/Stability-AI/FastChat/compare/jp-stable...AUGMXNT:FastChat:jp-stable)では、サンプリング変動を減らすために、日本語のプロンプト("あなたは役立つアシスタントです。")と `--num-choices 4`を使用していますが、生成間で0.5+点(時にはそれ以上の変動)を頻繁に観察し、テスト時のデフォルトのプロンプトとパラメータに問題があったという経験から、これらのスコアを過度に解釈することには注意が必要で、これらを確定的なスコアやランキングではなく、より確率的な方向指標として扱うことをお勧めします: | ベンチマーク | スコア | | ----------- | ----- | | JA MT-Bench | 5.02 | | MT-Bench | 5.71 | [MT-Bench Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard)がありますが、JA MT-Benchはまだ開発中であるため、便宜上、他のモデルのJA MT-Benchスコアとの比較を示します(私たちのスコアは`gpt-4-0613`によって評価されました): | モデル | スコア | | ------------------------------------------------- | ---- | | gpt-4-0613 | 9.40 | | gpt-4-1106-preview | 9.17 | | gpt-3.5-turbo* | 8.41 | | Qwen-14B-Chat | 7.47 | | **shisa-7b-v1** | **5.02** | | ELYZA-japanese-Llama-2-7b-fast-instruct* | 4.86 | | ja-stablelm-instruct-gamma-7b* | 4.01 | | japanese-stablelm-instruct-alpha-7b* | 2.74 | | Mistral-7B-OpenOrca-ja* | 2.23 | | youri-7b-chat* | 2.00 | | Mistral-7B-Instruct-v0.1* | 1.78 | | llm-jp-13b-instruct-full-jaster-dolly-oasst-v1.0* | 1.31 | *(このセクションでマークされたJA MT-Benchの結果は[shi3zから引用](https://note.com/shi3zblog/n/n6b2ac5874021)しました)* ## 制限事項 当モデルは十分な日本語の流暢さを示していますが、7Bパラメータのモデルとしては、より大きなクラスのモデルに比べて幻覚率が高く、指示の追跡や推論が効果的でない傾向があります。また、日本語の完全な習得はまだ達しておらず、ネイティブスピーカーはたまに非慣用的/違和感のある表現や不適切な時制/話し言葉のレベルなどの間違いを見つけることがあります。 また、私たちのトークナイザーの拡張に大いに起因する可能性が高いが、わずかな言語リークを確認しています。これらは[Min P](https://www.reddit.com/r/LocalLLaMA/comments/17vonjo/your_settings_are_probably_hurting_your_model_why/)などのサンプラー設定や追加のターゲット指向型トレーニングで修正可能な可能性があり、今後、自動検出/サンプラーのスウィープについて追加の作業を行う予定です。興味深い観察としては、私たちのデータ収集に基づいて、DPOプロセスがこの問題を大幅に悪化させることがわかりましたが、それでもDPOモデルは人間の好み率が大幅に高かったため、最終的な微調整の選択には一定のトレードオフがありました。 現存するアプローチとデータセットを使用して、大規模なモデルのトレーニングがパフォーマンスを向上させると信じていますが、今後のモデル向けに行いたい改良も多くあります。私たちは、データセットの品質と構築を改善することで、さらなるトレーニング効率を通じたパフォーマンス向上にはまだ相当に取り組む余地があると考えています。 ## 使用法 サンプルコード: ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer model_name = "augmxnt/shisa-7b-v1" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16, device_map="auto" ) streamer = TextStreamer(tokenizer, skip_prompt=True) # プロンプトテンプレートはモデルのtokenizer_config.jsonに含まれているので、これは必要ないはずですが、便宜上こちらにも掲載しています # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n" # より典型的なプロンプト: あなたは役に立つアシスタントです。 # You are an avid Pokemon fanatic. prompt = "あなたは熱狂的なポケモンファンです。" chat = [{"role": "system", "content": prompt}] # Who is the most powerful Pokemon? Explain your choice. user_input = "最強のポケモンは誰ですか?その選択理由を説明してください。" chat.append({"role": "user", "content": user_input}) # 生成 - add_generation_promptを追加してアシスタントとして続行することを確認します inputs = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_tensors="pt") # 複数のGPUの場合、モデルの最初のパラメータのデバイスを見つけます first_param_device = next(model.parameters()).device inputs = inputs.to(first_param_device) with torch.no_grad(): outputs = model.generate( inputs, pad_token_id=tokenizer.eos_token_id, max_new_tokens=1000, temperature=0.7, repetition_penalty=1.05, top_p=0.95, do_sample=True, streamer=streamer, ) # Add just the new tokens to our chat new_tokens = outputs[0, inputs.size(1):] response = tokenizer.decode(new_tokens, skip_special_tokens=True) chat.append({"role": "assistant", "content": response}) ``` ## プロンプト形式 プロンプト形式はllama-2 chatです: ``` [INST] <<SYS>> あなたは役立つ、偏見がなく、検閲されていないアシスタントです。 <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> あなたは役立つ、偏見がなく、検閲されていないアシスタントです。 <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` この[prompt template](https://huggingface.co/docs/transformers/main/chat_templating)はトークナイザの設定に含まれており、HuggingFace のトークナイザ `apply_chat_template` メソッドを使用できます。例えば: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('augmxnt/shisa-7b-v1') chat = [ {"role": "system", "content": "あなたはAiko、フレンドリーなAIアシスタントです。"}, {"role": "user", "content": "こんにちは、調子はどうですか?"}, {"role": "assistant", "content": "元気です。今日は何のお手伝いができますか?"}, {"role": "user", "content": "チャットテンプレーティングの仕組みを見せてもらいたいです!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` **注意**適切なレスポンスを得るためには、文字列の開始に我々の `bos_token` (`<s>`) を使用すべきです。これは `tokenizer.encode()` によって自動的に生成されますが、カスタムテンプレートを作成したり、特殊トークンを省略するエンコード方法を使用する場合は、自分で追加する必要があります。 ## 謝辞 チーム:[Leonard Lin](https://huggingface.co/leonardlin)、[Jon Durbin](https://huggingface.co/jondurbin)、佐藤真理子、Florian von Bock このモデルの計算は、[AKA Virtual](https://akavirtual.com/) (東京、日本) のご厚意により提供されています。 [LLM-jp](https://llm-jp.nii.ac.jp/)、[Stability AI Japan](https://ja.stability.ai/)、[LMSYS](https://lmsys.org/)のチームが、llm-jp-eval, Japanese MT-Bench, MT-Benchに取り組んでくれて感謝しています。 また、貴重なヒューマンプリファレンステストを提供してくださったすべてのボランティアにも感謝いたします! このプロジェクトのためにより良く、より大きなモデルを訓練するために、追加の計算を積極的に探しています。お問い合わせは次の宛先までお願いいたします:*compute at augmxnt dot com*
null
Non_BioNLP
# Shisa 7B ![Shi-chan and Sa-chan/シーちゃんとサーちゃん](https://huggingface.co/augmxnt/shisa-7b-v1/resolve/main/shisa.webp) **Shisa 7B** (`shisa-7b-v1`) is a bilingual Japanese and English (JA/EN) general-purpose chat model that aims to achieve strong Japanese language performance while retaining robust English capabilities, using a synthetic-data driven approach. This model is based on [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) with a custom JA-optimized extended tokenizer that is >2X more efficient in Japanese than Mistral's original tokenizer. The base model was pre-trained for an additional 8B primarily Japanese tokens. It was then subsequently fine-tuned with an expanded, machine-translated version of [airoboros-3.1](https://huggingface.co/datasets/jondurbin/airoboros-3.1), a set of the highest-scoring items from [ultrafeedback_binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized), and additional freshly generated [airoboros](https://github.com/jondurbin/airoboros) data directly to the target languages. We also release our base model, datasets, and pipeline code under a permissive Apache 2.0 license which can be used for any purpose, commercial or otherwise: * [shisa-base-7b-v1](https://huggingface.co/augmxnt/shisa-base-7b-v1) - our base model w/ an extended tokenizer and additional JA pre-training * [shisa-pretrain-en-ja-v1](https://huggingface.co/datasets/augmxnt/shisa-pretrain-en-ja-v1) - our pre-training data set * [ultra-orca-boros-en-ja](https://huggingface.co/datasets/augmxnt/ultra-orca-boros-en-ja-v1) - a synthetically generated, machine-translated, programmatically validated JA/EN fine-tuning dataset * [shisa-en-ja-dpo-v1](https://huggingface.co/datasets/augmxnt/shisa-en-ja-dpo-v1) - Small subset of DPO pairs from ultrafeedback, along with JA DPO pairs using GPT-4 generated items as the chosen value, and outputs from our preliminary 7b model as the rejected values * [Shisa repository](https://github.com/AUGMXNT/shisa) - this includes our translation, dataset generation, training, and evaluation code Moreover, we are in the process of publishing extended writeups and more details of our process, including ablation results, testing methodology, and key findings [on our project wiki](https://github.com/AUGMXNT/shisa/wiki) that may be of interest to fellow researchers. ## Fine-Tuning Our original intuition was to see if we could create a stronger Japanese model using the best [existing public JA training sets](https://github.com/AUGMXNT/shisa/wiki/A-Review-of-Public-Japanese-Training-Sets) and incorporating them. After initial review and testing, however, we decided that focusing solely on translation/generation of our own synthetic datasets could yield superior results with less training. We compared multiple translation tools and, via manual review, judged that while `gpt-4` almost always delivered the highest quality translations, Google's `text-bison-32k` was a good balance of quality, cost and throughput. Over various iterations, we refined our translation approach to include some additional algorithms for flagging and filtering invalid translations, re-translating and backfilling as necessary. We also took this project as an opportunity to apply some newer techniques such as incorporating [NEFTune](https://arxiv.org/abs/2310.05914) and [DPO](https://arxiv.org/abs/2305.18290) training. For our v1 release, we picked from our release candidates based on a significant amount of human preference testing (thousands of generations and multiple rounds of pairwise comparisons). We analyzed our results with both win/loss/draw and [BTL modeling](https://datascience.oneoffcoder.com/btl-model.html) (iLSR) using [choix](https://github.com/lucasmaystre/choix)). The best candidate model was fine-tuned in a 3-step process: 1. First, the model was fine-tuned on `ultra-orca-boros-en-ja` and SlimOrca ([WandB Log](https://wandb.ai/jondurbin/shisa-7b-v1/runs/k8pfog9d/overview)) 2. Next, we add one additional epoch at performed using only a subset of Japanese ultra-orca-boros-en-ja items to enhance JA performance (as SlimOrca from the first step is mostly EN) ([WandB Log](https://wandb.ai/jondurbin/shisa-mega-7b-v1.1/runs/dopsr0o7/overview)) 3. Finally, the model was tuned using a DPOTrainer on a small subset of ultrafeedback (EN) and our own JA DPO dataset which uses gpt-4 outputs as the chosen values and outputs from stage 1's prelim model as rejected values. ([WandDB Log](https://wandb.ai/jondurbin/shisa-mega-dpo-7b-v1.1) ) During our training process, we also gained some key insights on [why some existing Japanese models seem to underperform](https://github.com/AUGMXNT/shisa/wiki/A-Review-of-Public-Japanese-Training-Sets#analysis) even versus models that have no additional JA training, and we hope that sharing this analysis will be useful to other teams developing Japanese language models. While we need to explore this further, as an experimental validation, we applied a version of our fine-tuning set onto an existing base model ("Gamma 7B") and the initial JA MT-Bench results suggests that we can drastically increase functional performance with our tuning approach: | Model | Score | | ------------------------------ | ----- | | shisa-gamma-7b-allsources-v0.4 | 5.65 | | ja-stablelm-instruct-gamma-7b* | 4.01 | ## Performance Throughout our training, we did extensive human evaluation for each model to cross-validate our model performance, and we are currently conducting ongoing larger scale manual head-to-head testing between models. Our intention is open up and scale this data collection as we further develop our tools. For more information and updates, please see our [project wiki](https://github.com/AUGMXNT/shisa/wiki). While we believe [llm-jp-eval](https://github.com/llm-jp/llm-jp-eval) is a useful metric for our [base model](https://huggingface.co/augmxnt/shisa-base-7b-v1), and it was extremely useful during our tuning process for initial validations, as our fine-tune training includes a percentage of the benchmark train splits, we provide these llm-jp-eval results primarily as a point of interest: | AVR | MC | NLI | QA | RC | |-------|-------|-------|-------|-------| | 0.7480| 0.8900| 0.8040| 0.4153| 0.8825| *(We run a [slightly modified llm-jp-eval](https://github.com/llm-jp/llm-jp-eval/compare/main...AUGMXNT:llm-jp-eval:main) to support testing of Qwen and to emit a `bos_token` if available)* For our final model, since it's customary to include benchmarks, we've used Stability AI Japan's [Japanese MT-Bench](https://github.com/Stability-AI/FastChat) as a more representative test of our model's capabilities. For [our JA MT-Bench testing](https://github.com/Stability-AI/FastChat/compare/jp-stable...AUGMXNT:FastChat:jp-stable) we use a Japanese prompt ("あなたは役立つアシスタントです。") as well as `--num-choices 4` in an effort to reduce sampling variability, however we've still observed regular 0.5+ point (and sometimes even greater swings) between generations, as well as issues with default prompts and parameters when testing, so again, we'd urge caution in over-interpreting these scores and treating them as more of a probabilistic directional indicator, rather than a definitive score or ranking: | Benchmark | Score | | ----------- | ----- | | JA MT-Bench | 5.02 | | MT-Bench | 5.71 | There is an [MT-Bench Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard), but as JA MT-Bench is still under development, for convenience, here is a comparison of the JA MT-Bench scores of some other models (our scores were rated by `gpt-4-0613`): | Model | Score | | ------------------------------------------------- | ---- | | gpt-4-0613 | 9.40 | | gpt-4-1106-preview | 9.17 | | gpt-3.5-turbo* | 8.41 | | Qwen-14B-Chat | 7.47 | | **shisa-7b-v1** | **5.02** | | ELYZA-japanese-Llama-2-7b-fast-instruct* | 4.86 | | ja-stablelm-instruct-gamma-7b* | 4.01 | | japanese-stablelm-instruct-alpha-7b* | 2.74 | | Mistral-7B-OpenOrca-ja* | 2.23 | | youri-7b-chat* | 2.00 | | Mistral-7B-Instruct-v0.1* | 1.78 | | llm-jp-13b-instruct-full-jaster-dolly-oasst-v1.0* | 1.31 | *(Marked JA MT-Bench results in this section are [sourced from shi3z](https://note.com/shi3zblog/n/n6b2ac5874021))* ## Limitations Although our model demonstrates a reasonably high level of Japanese fluency, as a 7B parameter model, it is prone to higher hallucination rates and less effective instruction following and reasoning than larger-class models. Also, it still does not have complete mastery of the Japanese language and a native speaker will spot occasional mistakes like some non-idiomatic/awkward phrasing, improper tenses/speech levels, etc. We've also noticed a small amount of language leakage, likely largely attributable to our tokenizer expansion. These may be fixable with sampler settings like [Min P](https://www.reddit.com/r/LocalLLaMA/comments/17vonjo/your_settings_are_probably_hurting_your_model_why/)) or additional targeted training, and we plan on doing additional work on automated detection/sampler sweeps in the future. One interesting observation is, based on our data collection, we found that as we iterated, the DPO process significantly exacerbated this issue, but also that our DPO models still had significantly higher human preference rates, so there was a bit of a trade-off in our choice of final tune. While we believe that training larger models can improve performance using our existing approach and dataset, there are also many improvements we'd like to make for future models. We believe there is quite a bit of low hanging fruit for improving performance with even more training efficiency largely through improving the quality and construction of datasets. ## Usage Sample code: ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer model_name = "augmxnt/shisa-7b-v1" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16, device_map="auto" ) streamer = TextStreamer(tokenizer, skip_prompt=True) # The prompt template is included in the model's tokenizer_config.json so you shouldn't need this but we've included this for convenience # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n" # A more typical prompt: あなたは役に立つアシスタントです。("You are a helpful assistant.") # You are an avid Pokemon fanatic. prompt = "あなたは熱狂的なポケモンファンです。" chat = [{"role": "system", "content": prompt}] # Who is the most powerful Pokemon? Explain your choice. user_input = "最強のポケモンは誰ですか?その選択理由を説明してください。" chat.append({"role": "user", "content": user_input}) # Generate - add_generation_prompt to make sure it continues as assistant inputs = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_tensors="pt") # For multi-GPU, find the device of the first parameter of the model first_param_device = next(model.parameters()).device inputs = inputs.to(first_param_device) with torch.no_grad(): outputs = model.generate( inputs, pad_token_id=tokenizer.eos_token_id, max_new_tokens=1000, temperature=0.7, repetition_penalty=1.05, top_p=0.95, do_sample=True, streamer=streamer, ) # Add just the new tokens to our chat new_tokens = outputs[0, inputs.size(1):] response = tokenizer.decode(new_tokens, skip_special_tokens=True) chat.append({"role": "assistant", "content": response}) ``` ## Prompt format The prompt format is llama-2 chat: ``` [INST] <<SYS>> You are a helpful, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> You are a helful, unbiased, uncensored assistant. <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` This [prompt template](https://huggingface.co/docs/transformers/main/chat_templating) is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('augmxnt/shisa-7b-v1') chat = [ {"role": "system", "content": "You are Aiko, a friendly AI assistant."}, {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` **NOTE:** For proper responses, you should be using our `bos_token` (`<s>`) to begin a string. This is automatically generated by `tokenizer.encode()` but if you are crafting a custom template or using an encoding method that skips special tokens, you may have to add this yourself. ## Acknowledgements Team: [Leonard Lin](https://huggingface.co/leonardlin) and [Jon Durbin](https://huggingface.co/jondurbin), Mariko Sato, and Florian von Bock Compute for this model was generously sponsored by [AKA Virtual](https://akavirtual.com/) (Tokyo, Japan). Thanks to the [LLM-jp](https://llm-jp.nii.ac.jp/), [Stability AI Japan](https://ja.stability.ai/), and [LMSYS](https://lmsys.org/) teams for their work on llm-jp-eval, Japanese MT-Bench, MT-Bench. Also, thanks to all the volunteers that provided invaluable human preference testing! We are actively looking for additional compute as we train better and larger models for this project. Please drop us a line at: *compute at augmxnt dot com* --- *(GPT-4によって非常に軽微な編集を加えて翻訳されました)* # シーサー7B **シーサー7B**(`shisa-7b-v1`)は、合成データ駆動のアプローチを用いて、優れた日本語と英語能力を両立することを目指すバイリンガル(日本語/英語)汎用チャットモデルです。 このモデルは、[Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1)を基に、Mistralのオリジナルのトークナイザーよりも日本語において2倍以上効率的な、日本語最適化拡張トークナイザーをカスタムして作成されました。ベースモデルは、主に日本語のトークンを追加で80億ものトレーニングを行いました。そして、その後、[airoboros-3.1](https://huggingface.co/datasets/jondurbin/airoboros-3.1)の拡張された機械翻訳版、[ultrafeedback_binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)からの最高得点項目のセット、そして新たに生成された[airoboros](https://github.com/jondurbin/airoboros)のデータを直接目標言語で微調整しています。 商用を含むあらゆる目的で使用可能な寛容なApache 2.0ライセンスの下で、ベースモデル、データセット、およびパイプラインコードも公開しています: * [shisa-base-7b-v1](https://huggingface.co/augmxnt/shisa-base-7b-v1) - 拡張トークナイザーと追加の日本語プレトレーニングを備えた当方のベースモデル * [shisa-pretrain-en-ja-v1](https://huggingface.co/datasets/augmxnt/shisa-pretrain-en-ja-v1) - 当方のプレトレーニングデータセット * [ultra-orca-boros-en-ja](https://huggingface.co/datasets/jondurbin/ultra-orca-boros-en-ja) - 合成生成、機械翻訳、プログラムによる検証によるJA/EN微調整データセット * [shisa-en-ja-dpo-v1](https://huggingface.co/datasets/augmxnt/shisa-en-ja-dpo-v1) - ultrafeedbackからのDPOペアの小さなサブセットと、選択された値としてGPT-4生成項目を使用した日本語のDPOペア、そして初期の7ビリオンモデルの出力を却下した値 * [シーサーリポジトリ](https://github.com/AUGMXNT/shisa) - 翻訳、データセットの生成、トレーニング、評価コードなどが含まれています さらに、アブレーション結果、テスト方法論、主要な調査結果など、プロセスの詳細や拡張ライトアップを公開する過程にあります。これは[当プロジェクトwiki](https://github.com/AUGMXNT/shisa/wiki)で研究者に興味深い情報として提供されています。 ## 微調整 最初の直感は、最良の[既存の公開日本語トレーニングセット](https://github.com/AUGMXNT/shisa/wiki/A-Review-of-Public-Japanese-Training-Sets)を使用して、それらを組み入れることでより強力な日本語モデルを作成できるかどうかを見ることでした。しかし、初期の検討とテストの後、自らの合成データセットの翻訳/生成にだけ焦点を当てることで、短期間のトレーニングで優れた結果を得ることができると結論付けました。 私たちは複数の翻訳ツールを比較し、手動でレビューを行った結果、`gpt-4`がほぼ常に最高品質の翻訳を提供しながら、Googleの `text-bison-32k`は品質、コスト、スループットのバランスが良いと判断しました。複数の繰り返しを経て、無効な翻訳のフラグ付けとフィルタリング、必要に応じた再翻訳とバックフィルのための追加のアルゴリズムを含むように、翻訳アプローチを洗練させました。 また、このプロジェクトを[NEFTune](https://arxiv.org/abs/2310.05914)と[DPO](https://arxiv.org/abs/2305.18290)トレーニングを取り入れるなど、新しい技術を適用する機会ともなりました。 v1リリースのために、私たちは大量の人間の嗜好テスト(数千の生成と複数ラウンドのペアワイズ比較)に基づいてリリース候補から選択しました。私たちは、勝ち/負け/引き分けと、[BTLモデル](https://datascience.oneoffcoder.com/btl-model.html)(iLSR)を使用して[choix](https://github.com/lucasmaystre/choix)で結果を分析しました。 最良の候補モデルは、3ステップのプロセスで微調整されました: 1. 最初に、モデルは`ultra-orca-boros-en-ja`とSlimOrca ([WandB Log](https://wandb.ai/jondurbin/shisa-7b-v1/runs/k8pfog9d/overview))で微調整されました。 2. 次に、日本語のパフォーマンスを向上させるためにultra-orca-boros-en-jaの一部を使用して1回追加のエポックを追加しました(最初の段階のSlimOrcaは主に英語)([WandB Log](https://wandb.ai/jondurbin/shisa-mega-7b-v1.1/runs/dopsr0o7/overview))。 3. 最後に、モデルは小規模のultrafeedback(英語)と自身のJA DPOデータセットに対してDPOTrainerを使用して調整されました。ここで使用したJA DPOデータセットはgpt-4の出力を選出された値とし、ステージ1の予備モデルの出力を却下した値とします。([WandDB Log](https://wandb.ai/jondurbin/shisa-mega-dpo-7b-v1.1) ) 私たちのトレーニングプロセス中に、何故一部の既存の日本語モデルが、追加の日本語トレーニングがないモデルに対してもパフォーマンスが低いのか、といういくつかの重要な洞察を得ることができました。この分析結果を共有すれば、他のチームが日本語モデルを開発する際の参考になると思います。 さらに探求する必要はありますが、実験的な検証として、微調整セットのバージョンを既存のベースモデル("Gamma 7B")に適用し、初期のJA MT-Bench結果が示すように、私たちのチューニングアプローチで機能性のパフォーマンスを劇的に向上させることができました: | モデル | スコア | | ------------------------------ | ----- | | shisa-gamma-7b-allsources-v0.4 | 5.65 | | ja-stablelm-instruct-gamma-7b* | 4.01 | ## パフォーマンス トレーニング全体を通じて、各モデルについて人間による評価を行い、モデルのパフォーマンスを相互に検証しました。現在、モデル間の手動での比較テストを大規模に行っています。私たちの目指すところは、ツールをさらに発展させることでこのデータ収集を公開して拡張することです。詳細と更新情報については、[プロジェクトwiki](https://github.com/AUGMXNT/shisa/wiki) をご覧ください。 我々は、[llm-jp-eval](https://github.com/llm-jp/llm-jp-eval)は、私たちの[基本モデル](https://huggingface.co/augmxnt/shisa-base-7b-v1)の有用な指標であり、初期の検証のための微調整プロセス中に非常に役立つと考えていますが、微調整トレーニングにはベンチマークのトレイン分割の一部が含まれているため、私たちが提供するllm-jp-evalの結果は主に興味深いポイントとして提供しています: | AVR | MC | NLI | QA | RC | |-------|-------|-------|-------|-------| | 0.7480| 0.8900| 0.8040| 0.4153| 0.8825| *(Qwenのテストをサポートし、可能であれば`bos_token`を発行するために、[わずかに修正したllm-jp-eval](https://github.com/llm-jp/llm-jp-eval/compare/main...AUGMXNT:llm-jp-eval:main) を実行しています)* 最終モデルについては、ベンチマークを含めるのが一般的なため、私たちのモデルの能力をより代表的にテストするために、Stability AI Japanの[Japanese MT-Bench](https://github.com/Stability-AI/FastChat)を使用しました。[私たちのJA MT-Bench テスト](https://github.com/Stability-AI/FastChat/compare/jp-stable...AUGMXNT:FastChat:jp-stable)では、サンプリング変動を減らすために、日本語のプロンプト("あなたは役立つアシスタントです。")と `--num-choices 4`を使用していますが、生成間で0.5+点(時にはそれ以上の変動)を頻繁に観察し、テスト時のデフォルトのプロンプトとパラメータに問題があったという経験から、これらのスコアを過度に解釈することには注意が必要で、これらを確定的なスコアやランキングではなく、より確率的な方向指標として扱うことをお勧めします: | ベンチマーク | スコア | | ----------- | ----- | | JA MT-Bench | 5.02 | | MT-Bench | 5.71 | [MT-Bench Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard)がありますが、JA MT-Benchはまだ開発中であるため、便宜上、他のモデルのJA MT-Benchスコアとの比較を示します(私たちのスコアは`gpt-4-0613`によって評価されました): | モデル | スコア | | ------------------------------------------------- | ---- | | gpt-4-0613 | 9.40 | | gpt-4-1106-preview | 9.17 | | gpt-3.5-turbo* | 8.41 | | Qwen-14B-Chat | 7.47 | | **shisa-7b-v1** | **5.02** | | ELYZA-japanese-Llama-2-7b-fast-instruct* | 4.86 | | ja-stablelm-instruct-gamma-7b* | 4.01 | | japanese-stablelm-instruct-alpha-7b* | 2.74 | | Mistral-7B-OpenOrca-ja* | 2.23 | | youri-7b-chat* | 2.00 | | Mistral-7B-Instruct-v0.1* | 1.78 | | llm-jp-13b-instruct-full-jaster-dolly-oasst-v1.0* | 1.31 | *(このセクションでマークされたJA MT-Benchの結果は[shi3zから引用](https://note.com/shi3zblog/n/n6b2ac5874021)しました)* ## 制限事項 当モデルは十分な日本語の流暢さを示していますが、7Bパラメータのモデルとしては、より大きなクラスのモデルに比べて幻覚率が高く、指示の追跡や推論が効果的でない傾向があります。また、日本語の完全な習得はまだ達しておらず、ネイティブスピーカーはたまに非慣用的/違和感のある表現や不適切な時制/話し言葉のレベルなどの間違いを見つけることがあります。 また、私たちのトークナイザーの拡張に大いに起因する可能性が高いが、わずかな言語リークを確認しています。これらは[Min P](https://www.reddit.com/r/LocalLLaMA/comments/17vonjo/your_settings_are_probably_hurting_your_model_why/)などのサンプラー設定や追加のターゲット指向型トレーニングで修正可能な可能性があり、今後、自動検出/サンプラーのスウィープについて追加の作業を行う予定です。興味深い観察としては、私たちのデータ収集に基づいて、DPOプロセスがこの問題を大幅に悪化させることがわかりましたが、それでもDPOモデルは人間の好み率が大幅に高かったため、最終的な微調整の選択には一定のトレードオフがありました。 現存するアプローチとデータセットを使用して、大規模なモデルのトレーニングがパフォーマンスを向上させると信じていますが、今後のモデル向けに行いたい改良も多くあります。私たちは、データセットの品質と構築を改善することで、さらなるトレーニング効率を通じたパフォーマンス向上にはまだ相当に取り組む余地があると考えています。 ## 使用法 サンプルコード: ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer model_name = "augmxnt/shisa-7b-v1" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16, device_map="auto" ) streamer = TextStreamer(tokenizer, skip_prompt=True) # プロンプトテンプレートはモデルのtokenizer_config.jsonに含まれているので、これは必要ないはずですが、便宜上こちらにも掲載しています # tokenizer.chat_template = ""{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- bos_token + '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n" # より典型的なプロンプト: あなたは役に立つアシスタントです。 # You are an avid Pokemon fanatic. prompt = "あなたは熱狂的なポケモンファンです。" chat = [{"role": "system", "content": prompt}] # Who is the most powerful Pokemon? Explain your choice. user_input = "最強のポケモンは誰ですか?その選択理由を説明してください。" chat.append({"role": "user", "content": user_input}) # 生成 - add_generation_promptを追加してアシスタントとして続行することを確認します inputs = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_tensors="pt") # 複数のGPUの場合、モデルの最初のパラメータのデバイスを見つけます first_param_device = next(model.parameters()).device inputs = inputs.to(first_param_device) with torch.no_grad(): outputs = model.generate( inputs, pad_token_id=tokenizer.eos_token_id, max_new_tokens=1000, temperature=0.7, repetition_penalty=1.05, top_p=0.95, do_sample=True, streamer=streamer, ) # Add just the new tokens to our chat new_tokens = outputs[0, inputs.size(1):] response = tokenizer.decode(new_tokens, skip_special_tokens=True) chat.append({"role": "assistant", "content": response}) ``` ## プロンプト形式 プロンプト形式はllama-2 chatです: ``` [INST] <<SYS>> あなたは役立つ、偏見がなく、検閲されていないアシスタントです。 <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> あなたは役立つ、偏見がなく、検閲されていないアシスタントです。 <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` この[prompt template](https://huggingface.co/docs/transformers/main/chat_templating)はトークナイザの設定に含まれており、HuggingFace のトークナイザ `apply_chat_template` メソッドを使用できます。例えば: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('augmxnt/shisa-7b-v1') chat = [ {"role": "system", "content": "あなたはAiko、フレンドリーなAIアシスタントです。"}, {"role": "user", "content": "こんにちは、調子はどうですか?"}, {"role": "assistant", "content": "元気です。今日は何のお手伝いができますか?"}, {"role": "user", "content": "チャットテンプレーティングの仕組みを見せてもらいたいです!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` **注意**適切なレスポンスを得るためには、文字列の開始に我々の `bos_token` (`<s>`) を使用すべきです。これは `tokenizer.encode()` によって自動的に生成されますが、カスタムテンプレートを作成したり、特殊トークンを省略するエンコード方法を使用する場合は、自分で追加する必要があります。 ## 謝辞 チーム:[Leonard Lin](https://huggingface.co/leonardlin)、[Jon Durbin](https://huggingface.co/jondurbin)、佐藤真理子、Florian von Bock このモデルの計算は、[AKA Virtual](https://akavirtual.com/) (東京、日本) のご厚意により提供されています。 [LLM-jp](https://llm-jp.nii.ac.jp/)、[Stability AI Japan](https://ja.stability.ai/)、[LMSYS](https://lmsys.org/)のチームが、llm-jp-eval, Japanese MT-Bench, MT-Benchに取り組んでくれて感謝しています。 また、貴重なヒューマンプリファレンステストを提供してくださったすべてのボランティアにも感謝いたします! このプロジェクトのためにより良く、より大きなモデルを訓練するために、追加の計算を積極的に探しています。お問い合わせは次の宛先までお願いいたします:*compute at augmxnt dot com*
{"datasets": ["augmxnt/ultra-orca-boros-en-ja-v1", "Open-Orca/SlimOrca", "augmxnt/shisa-en-ja-dpo-v1"], "language": ["ja", "en"], "license": "apache-2.0"}
task
[ "TRANSLATION" ]
40,257
florianhoenicke/pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564
florianhoenicke
feature-extraction
[ "transformers", "safetensors", "bert", "feature-extraction", "custom_code", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-04-10T22:56:09Z
2024-04-11T11:54:11+00:00
5
0
--- {} --- # pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564 ## Model Description pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564 is a fine-tuned version of jinaai/jina-embeddings-v2-small-en designed for a specific domain. ## Use Case This model is designed to support various applications in natural language processing and understanding. ## Associated Dataset This the dataset for this model can be found [**here**](https://huggingface.co/datasets/florianhoenicke/pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564). ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from transformers import AutoModel, AutoTokenizer llm_name = "pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564" tokenizer = AutoTokenizer.from_pretrained(llm_name) model = AutoModel.from_pretrained(llm_name) tokens = tokenizer("Your text here", return_tensors="pt") embedding = model(**tokens) ```
null
Non_BioNLP
# pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564 ## Model Description pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564 is a fine-tuned version of jinaai/jina-embeddings-v2-small-en designed for a specific domain. ## Use Case This model is designed to support various applications in natural language processing and understanding. ## Associated Dataset This the dataset for this model can be found [**here**](https://huggingface.co/datasets/florianhoenicke/pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564). ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from transformers import AutoModel, AutoTokenizer llm_name = "pet-shop-100-64-10-jinaai_jina-embeddings-v2-small-en_9062874564" tokenizer = AutoTokenizer.from_pretrained(llm_name) model = AutoModel.from_pretrained(llm_name) tokens = tokenizer("Your text here", return_tensors="pt") embedding = model(**tokens) ```
{}
task
[ "TEXT_CLASSIFICATION" ]
40,258
unsloth/Qwen2-VL-7B-bnb-4bit
unsloth
image-text-to-text
[ "transformers", "safetensors", "qwen2_vl", "image-text-to-text", "multimodal", "unsloth", "qwen", "conversational", "en", "arxiv:2409.12191", "arxiv:2308.12966", "base_model:Qwen/Qwen2-VL-7B", "base_model:quantized:Qwen/Qwen2-VL-7B", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
2025-03-09T06:09:19Z
2025-03-09T06:29:33+00:00
102
0
--- base_model: Qwen/Qwen2-VL-7B language: - en library_name: transformers license: apache-2.0 pipeline_tag: image-text-to-text tags: - multimodal - unsloth - qwen --- # Qwen2-VL-7B-bnb-4bit <a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> </a> ## Introduction We're excited to unveil **Qwen2-VL**, the latest iteration of our Qwen-VL model, representing nearly a year of innovation. ### What’s New in Qwen2-VL? #### Key Enhancements: * **SoTA understanding of images of various resolution & ratio**: Qwen2-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc. * **Understanding videos of 20min+**: Qwen2-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc. * **Agent that can operate your mobiles, robots, etc.**: with the abilities of complex reasoning and decision making, Qwen2-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions. * **Multilingual Support**: to serve global users, besides English and Chinese, Qwen2-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc. #### Model Architecture Updates: * **Naive Dynamic Resolution**: Unlike before, Qwen2-VL can handle arbitrary image resolutions, mapping them into a dynamic number of visual tokens, offering a more human-like visual processing experience. <p align="center"> <img src="https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/qwen2_vl.jpg" width="80%"/> <p> * **Multimodal Rotary Position Embedding (M-ROPE)**: Decomposes positional embedding into parts to capture 1D textual, 2D visual, and 3D video positional information, enhancing its multimodal processing capabilities. <p align="center"> <img src="http://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/mrope.png" width="80%"/> <p> We have three models with 2, 7 and 72 billion parameters. This repo contains the instruction-tuned 7B Qwen2-VL model. For more information, visit our [Blog](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub](https://github.com/QwenLM/Qwen2-VL). ## Evaluation ### Image Benchmarks | Benchmark | InternVL2-8B | MiniCPM-V 2.6 | GPT-4o-mini | **Qwen2-VL-7B** | | :--- | :---: | :---: | :---: | :---: | | MMMU<sub>val</sub> | 51.8 | 49.8 | **60**| 54.1 | | DocVQA<sub>test</sub> | 91.6 | 90.8 | - | **94.5** | | InfoVQA<sub>test</sub> | 74.8 | - | - |**76.5** | | ChartQA<sub>test</sub> | **83.3** | - |- | 83.0 | | TextVQA<sub>val</sub> | 77.4 | 80.1 | -| **84.3** | | OCRBench | 794 | **852** | 785 | 845 | | MTVQA | - | - | -| **26.3** | | VCR<sub>en easy</sub> | - | 73.88 | 83.60 | **89.70** | | VCR<sub>zh easy</sub> | - | 10.18| 1.10 | **59.94** | | RealWorldQA | 64.4 | - | - | **70.1** | | MME<sub>sum</sub> | 2210.3 | **2348.4** | 2003.4| 2326.8 | | MMBench-EN<sub>test</sub> | 81.7 | - | - | **83.0** | | MMBench-CN<sub>test</sub> | **81.2** | - | - | 80.5 | | MMBench-V1.1<sub>test</sub> | 79.4 | 78.0 | 76.0| **80.7** | | MMT-Bench<sub>test</sub> | - | - | - |**63.7** | | MMStar | **61.5** | 57.5 | 54.8 | 60.7 | | MMVet<sub>GPT-4-Turbo</sub> | 54.2 | 60.0 | **66.9** | 62.0 | | HallBench<sub>avg</sub> | 45.2 | 48.1 | 46.1| **50.6** | | MathVista<sub>testmini</sub> | 58.3 | **60.6** | 52.4 | 58.2 | | MathVision | - | - | - | **16.3** | ### Video Benchmarks | Benchmark | Internvl2-8B | LLaVA-OneVision-7B | MiniCPM-V 2.6 | **Qwen2-VL-7B** | | :--- | :---: | :---: | :---: | :---: | | MVBench | 66.4 | 56.7 | - | **67.0** | | PerceptionTest<sub>test</sub> | - | 57.1 | - | **62.3** | | EgoSchema<sub>test</sub> | - | 60.1 | - | **66.7** | | Video-MME<sub>wo/w subs</sub> | 54.0/56.9 | 58.2/- | 60.9/63.6 | **63.3**/**69.0** | ## Requirements The code of Qwen2-VL has been in the latest Hugging face transformers and we advise you to build from source with command `pip install git+https://github.com/huggingface/transformers`, or you might encounter the following error: ``` KeyError: 'qwen2_vl' ``` ## Quickstart We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command: ```bash pip install qwen-vl-utils ``` Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`: ```python from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor from qwen_vl_utils import process_vision_info # default: Load the model on the available device(s) model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios. # model = Qwen2VLForConditionalGeneration.from_pretrained( # "Qwen/Qwen2-VL-7B-Instruct", # torch_dtype=torch.bfloat16, # attn_implementation="flash_attention_2", # device_map="auto", # ) # default processer processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage. # min_pixels = 256*28*28 # max_pixels = 1280*28*28 # processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels) messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", }, {"type": "text", "text": "Describe this image."}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference: Generation of the output generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) ``` <details> <summary>Without qwen_vl_utils</summary> ```python from PIL import Image import requests import torch from torchvision import io from typing import Dict from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor # Load the model in half-precision on the available device(s) model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") # Image url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg" image = Image.open(requests.get(url, stream=True).raw) conversation = [ { "role": "user", "content": [ { "type": "image", }, {"type": "text", "text": "Describe this image."}, ], } ] # Preprocess the inputs text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n' inputs = processor( text=[text_prompt], images=[image], padding=True, return_tensors="pt" ) inputs = inputs.to("cuda") # Inference: Generation of the output output_ids = model.generate(**inputs, max_new_tokens=128) generated_ids = [ output_ids[len(input_ids) :] for input_ids, output_ids in zip(inputs.input_ids, output_ids) ] output_text = processor.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) print(output_text) ``` </details> <details> <summary>Multi image inference</summary> ```python # Messages containing multiple images and a text query messages = [ { "role": "user", "content": [ {"type": "image", "image": "file:///path/to/image1.jpg"}, {"type": "image", "image": "file:///path/to/image2.jpg"}, {"type": "text", "text": "Identify the similarities between these images."}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) ``` </details> <details> <summary>Video inference</summary> ```python # Messages containing a images list as a video and a text query messages = [ { "role": "user", "content": [ { "type": "video", "video": [ "file:///path/to/frame1.jpg", "file:///path/to/frame2.jpg", "file:///path/to/frame3.jpg", "file:///path/to/frame4.jpg", ], "fps": 1.0, }, {"type": "text", "text": "Describe this video."}, ], } ] # Messages containing a video and a text query messages = [ { "role": "user", "content": [ { "type": "video", "video": "file:///path/to/video1.mp4", "max_pixels": 360 * 420, "fps": 1.0, }, {"type": "text", "text": "Describe this video."}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) ``` </details> <details> <summary>Batch inference</summary> ```python # Sample messages for batch inference messages1 = [ { "role": "user", "content": [ {"type": "image", "image": "file:///path/to/image1.jpg"}, {"type": "image", "image": "file:///path/to/image2.jpg"}, {"type": "text", "text": "What are the common elements in these pictures?"}, ], } ] messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] # Combine messages for batch processing messages = [messages1, messages1] # Preparation for batch inference texts = [ processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True) for msg in messages ] image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=texts, images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Batch Inference generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_texts = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_texts) ``` </details> ### More Usage Tips For input images, we support local files, base64, and URLs. For videos, we currently only support local files. ```python # You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text. ## Local file path messages = [ { "role": "user", "content": [ {"type": "image", "image": "file:///path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}, ], } ] ## Image URL messages = [ { "role": "user", "content": [ {"type": "image", "image": "http://path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}, ], } ] ## Base64 encoded image messages = [ { "role": "user", "content": [ {"type": "image", "image": "data:image;base64,/9j/..."}, {"type": "text", "text": "Describe this image."}, ], } ] ``` #### Image Resolution for performance boost The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs, such as a token count range of 256-1280, to balance speed and memory usage. ```python min_pixels = 256 * 28 * 28 max_pixels = 1280 * 28 * 28 processor = AutoProcessor.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels ) ``` Besides, We provide two methods for fine-grained control over the image size input to the model: 1. Define min_pixels and max_pixels: Images will be resized to maintain their aspect ratio within the range of min_pixels and max_pixels. 2. Specify exact dimensions: Directly set `resized_height` and `resized_width`. These values will be rounded to the nearest multiple of 28. ```python # min_pixels and max_pixels messages = [ { "role": "user", "content": [ { "type": "image", "image": "file:///path/to/your/image.jpg", "resized_height": 280, "resized_width": 420, }, {"type": "text", "text": "Describe this image."}, ], } ] # resized_height and resized_width messages = [ { "role": "user", "content": [ { "type": "image", "image": "file:///path/to/your/image.jpg", "min_pixels": 50176, "max_pixels": 50176, }, {"type": "text", "text": "Describe this image."}, ], } ] ``` ## Limitations While Qwen2-VL are applicable to a wide range of visual tasks, it is equally important to understand its limitations. Here are some known restrictions: 1. Lack of Audio Support: The current model does **not comprehend audio information** within videos. 2. Data timeliness: Our image dataset is **updated until June 2023**, and information subsequent to this date may not be covered. 3. Constraints in Individuals and Intellectual Property (IP): The model's capacity to recognize specific individuals or IPs is limited, potentially failing to comprehensively cover all well-known personalities or brands. 4. Limited Capacity for Complex Instruction: When faced with intricate multi-step instructions, the model's understanding and execution capabilities require enhancement. 5. Insufficient Counting Accuracy: Particularly in complex scenes, the accuracy of object counting is not high, necessitating further improvements. 6. Weak Spatial Reasoning Skills: Especially in 3D spaces, the model's inference of object positional relationships is inadequate, making it difficult to precisely judge the relative positions of objects. These limitations serve as ongoing directions for model optimization and improvement, and we are committed to continually enhancing the model's performance and scope of application. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{Qwen2VL, title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution}, author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang}, journal={arXiv preprint arXiv:2409.12191}, year={2024} } @article{Qwen-VL, title={Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond}, author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren}, journal={arXiv preprint arXiv:2308.12966}, year={2023} } ```
null
Non_BioNLP
# Qwen2-VL-7B-bnb-4bit <a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> </a> ## Introduction We're excited to unveil **Qwen2-VL**, the latest iteration of our Qwen-VL model, representing nearly a year of innovation. ### What’s New in Qwen2-VL? #### Key Enhancements: * **SoTA understanding of images of various resolution & ratio**: Qwen2-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc. * **Understanding videos of 20min+**: Qwen2-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc. * **Agent that can operate your mobiles, robots, etc.**: with the abilities of complex reasoning and decision making, Qwen2-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions. * **Multilingual Support**: to serve global users, besides English and Chinese, Qwen2-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc. #### Model Architecture Updates: * **Naive Dynamic Resolution**: Unlike before, Qwen2-VL can handle arbitrary image resolutions, mapping them into a dynamic number of visual tokens, offering a more human-like visual processing experience. <p align="center"> <img src="https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/qwen2_vl.jpg" width="80%"/> <p> * **Multimodal Rotary Position Embedding (M-ROPE)**: Decomposes positional embedding into parts to capture 1D textual, 2D visual, and 3D video positional information, enhancing its multimodal processing capabilities. <p align="center"> <img src="http://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/mrope.png" width="80%"/> <p> We have three models with 2, 7 and 72 billion parameters. This repo contains the instruction-tuned 7B Qwen2-VL model. For more information, visit our [Blog](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub](https://github.com/QwenLM/Qwen2-VL). ## Evaluation ### Image Benchmarks | Benchmark | InternVL2-8B | MiniCPM-V 2.6 | GPT-4o-mini | **Qwen2-VL-7B** | | :--- | :---: | :---: | :---: | :---: | | MMMU<sub>val</sub> | 51.8 | 49.8 | **60**| 54.1 | | DocVQA<sub>test</sub> | 91.6 | 90.8 | - | **94.5** | | InfoVQA<sub>test</sub> | 74.8 | - | - |**76.5** | | ChartQA<sub>test</sub> | **83.3** | - |- | 83.0 | | TextVQA<sub>val</sub> | 77.4 | 80.1 | -| **84.3** | | OCRBench | 794 | **852** | 785 | 845 | | MTVQA | - | - | -| **26.3** | | VCR<sub>en easy</sub> | - | 73.88 | 83.60 | **89.70** | | VCR<sub>zh easy</sub> | - | 10.18| 1.10 | **59.94** | | RealWorldQA | 64.4 | - | - | **70.1** | | MME<sub>sum</sub> | 2210.3 | **2348.4** | 2003.4| 2326.8 | | MMBench-EN<sub>test</sub> | 81.7 | - | - | **83.0** | | MMBench-CN<sub>test</sub> | **81.2** | - | - | 80.5 | | MMBench-V1.1<sub>test</sub> | 79.4 | 78.0 | 76.0| **80.7** | | MMT-Bench<sub>test</sub> | - | - | - |**63.7** | | MMStar | **61.5** | 57.5 | 54.8 | 60.7 | | MMVet<sub>GPT-4-Turbo</sub> | 54.2 | 60.0 | **66.9** | 62.0 | | HallBench<sub>avg</sub> | 45.2 | 48.1 | 46.1| **50.6** | | MathVista<sub>testmini</sub> | 58.3 | **60.6** | 52.4 | 58.2 | | MathVision | - | - | - | **16.3** | ### Video Benchmarks | Benchmark | Internvl2-8B | LLaVA-OneVision-7B | MiniCPM-V 2.6 | **Qwen2-VL-7B** | | :--- | :---: | :---: | :---: | :---: | | MVBench | 66.4 | 56.7 | - | **67.0** | | PerceptionTest<sub>test</sub> | - | 57.1 | - | **62.3** | | EgoSchema<sub>test</sub> | - | 60.1 | - | **66.7** | | Video-MME<sub>wo/w subs</sub> | 54.0/56.9 | 58.2/- | 60.9/63.6 | **63.3**/**69.0** | ## Requirements The code of Qwen2-VL has been in the latest Hugging face transformers and we advise you to build from source with command `pip install git+https://github.com/huggingface/transformers`, or you might encounter the following error: ``` KeyError: 'qwen2_vl' ``` ## Quickstart We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command: ```bash pip install qwen-vl-utils ``` Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`: ```python from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor from qwen_vl_utils import process_vision_info # default: Load the model on the available device(s) model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios. # model = Qwen2VLForConditionalGeneration.from_pretrained( # "Qwen/Qwen2-VL-7B-Instruct", # torch_dtype=torch.bfloat16, # attn_implementation="flash_attention_2", # device_map="auto", # ) # default processer processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage. # min_pixels = 256*28*28 # max_pixels = 1280*28*28 # processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels) messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", }, {"type": "text", "text": "Describe this image."}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference: Generation of the output generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) ``` <details> <summary>Without qwen_vl_utils</summary> ```python from PIL import Image import requests import torch from torchvision import io from typing import Dict from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor # Load the model in half-precision on the available device(s) model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") # Image url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg" image = Image.open(requests.get(url, stream=True).raw) conversation = [ { "role": "user", "content": [ { "type": "image", }, {"type": "text", "text": "Describe this image."}, ], } ] # Preprocess the inputs text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n' inputs = processor( text=[text_prompt], images=[image], padding=True, return_tensors="pt" ) inputs = inputs.to("cuda") # Inference: Generation of the output output_ids = model.generate(**inputs, max_new_tokens=128) generated_ids = [ output_ids[len(input_ids) :] for input_ids, output_ids in zip(inputs.input_ids, output_ids) ] output_text = processor.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) print(output_text) ``` </details> <details> <summary>Multi image inference</summary> ```python # Messages containing multiple images and a text query messages = [ { "role": "user", "content": [ {"type": "image", "image": "file:///path/to/image1.jpg"}, {"type": "image", "image": "file:///path/to/image2.jpg"}, {"type": "text", "text": "Identify the similarities between these images."}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) ``` </details> <details> <summary>Video inference</summary> ```python # Messages containing a images list as a video and a text query messages = [ { "role": "user", "content": [ { "type": "video", "video": [ "file:///path/to/frame1.jpg", "file:///path/to/frame2.jpg", "file:///path/to/frame3.jpg", "file:///path/to/frame4.jpg", ], "fps": 1.0, }, {"type": "text", "text": "Describe this video."}, ], } ] # Messages containing a video and a text query messages = [ { "role": "user", "content": [ { "type": "video", "video": "file:///path/to/video1.mp4", "max_pixels": 360 * 420, "fps": 1.0, }, {"type": "text", "text": "Describe this video."}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) ``` </details> <details> <summary>Batch inference</summary> ```python # Sample messages for batch inference messages1 = [ { "role": "user", "content": [ {"type": "image", "image": "file:///path/to/image1.jpg"}, {"type": "image", "image": "file:///path/to/image2.jpg"}, {"type": "text", "text": "What are the common elements in these pictures?"}, ], } ] messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] # Combine messages for batch processing messages = [messages1, messages1] # Preparation for batch inference texts = [ processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True) for msg in messages ] image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=texts, images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Batch Inference generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_texts = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_texts) ``` </details> ### More Usage Tips For input images, we support local files, base64, and URLs. For videos, we currently only support local files. ```python # You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text. ## Local file path messages = [ { "role": "user", "content": [ {"type": "image", "image": "file:///path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}, ], } ] ## Image URL messages = [ { "role": "user", "content": [ {"type": "image", "image": "http://path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}, ], } ] ## Base64 encoded image messages = [ { "role": "user", "content": [ {"type": "image", "image": "data:image;base64,/9j/..."}, {"type": "text", "text": "Describe this image."}, ], } ] ``` #### Image Resolution for performance boost The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs, such as a token count range of 256-1280, to balance speed and memory usage. ```python min_pixels = 256 * 28 * 28 max_pixels = 1280 * 28 * 28 processor = AutoProcessor.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels ) ``` Besides, We provide two methods for fine-grained control over the image size input to the model: 1. Define min_pixels and max_pixels: Images will be resized to maintain their aspect ratio within the range of min_pixels and max_pixels. 2. Specify exact dimensions: Directly set `resized_height` and `resized_width`. These values will be rounded to the nearest multiple of 28. ```python # min_pixels and max_pixels messages = [ { "role": "user", "content": [ { "type": "image", "image": "file:///path/to/your/image.jpg", "resized_height": 280, "resized_width": 420, }, {"type": "text", "text": "Describe this image."}, ], } ] # resized_height and resized_width messages = [ { "role": "user", "content": [ { "type": "image", "image": "file:///path/to/your/image.jpg", "min_pixels": 50176, "max_pixels": 50176, }, {"type": "text", "text": "Describe this image."}, ], } ] ``` ## Limitations While Qwen2-VL are applicable to a wide range of visual tasks, it is equally important to understand its limitations. Here are some known restrictions: 1. Lack of Audio Support: The current model does **not comprehend audio information** within videos. 2. Data timeliness: Our image dataset is **updated until June 2023**, and information subsequent to this date may not be covered. 3. Constraints in Individuals and Intellectual Property (IP): The model's capacity to recognize specific individuals or IPs is limited, potentially failing to comprehensively cover all well-known personalities or brands. 4. Limited Capacity for Complex Instruction: When faced with intricate multi-step instructions, the model's understanding and execution capabilities require enhancement. 5. Insufficient Counting Accuracy: Particularly in complex scenes, the accuracy of object counting is not high, necessitating further improvements. 6. Weak Spatial Reasoning Skills: Especially in 3D spaces, the model's inference of object positional relationships is inadequate, making it difficult to precisely judge the relative positions of objects. These limitations serve as ongoing directions for model optimization and improvement, and we are committed to continually enhancing the model's performance and scope of application. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{Qwen2VL, title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution}, author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang}, journal={arXiv preprint arXiv:2409.12191}, year={2024} } @article{Qwen-VL, title={Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond}, author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren}, journal={arXiv preprint arXiv:2308.12966}, year={2023} } ```
{"base_model": "Qwen/Qwen2-VL-7B", "language": ["en"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "image-text-to-text", "tags": ["multimodal", "unsloth", "qwen"]}
task
[ "QUESTION_ANSWERING" ]
40,259
google-t5/t5-11b
google-t5
translation
[ "transformers", "pytorch", "tf", "t5", "text2text-generation", "summarization", "translation", "en", "fr", "ro", "de", "multilingual", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
2022-03-02T23:29:04Z
2023-01-02T16:15:50+00:00
1,644,086
61
--- datasets: - c4 language: - en - fr - ro - de - multilingual license: apache-2.0 tags: - summarization - translation inference: false --- # Model Card for T5 11B ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876727255704b657944387750412e706e67) # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training Details](#training-details) 5. [Evaluation](#evaluation) 6. [Environmental Impact](#environmental-impact) 7. [Citation](#citation) 8. [Model Card Authors](#model-card-authors) 9. [How To Get Started With the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description The developers of the Text-To-Text Transfer Transformer (T5) [write](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html): > With T5, we propose reframing all NLP tasks into a unified text-to-text-format where the input and output are always text strings, in contrast to BERT-style models that can only output either a class label or a span of the input. Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task. T5-11B is the checkpoint with 11 billion parameters. - **Developed by:** Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. See [associated paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) and [GitHub repo](https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints) - **Model type:** Language model - **Language(s) (NLP):** English, French, Romanian, German - **License:** Apache 2.0 - **Related Models:** [All T5 Checkpoints](https://huggingface.co/models?search=t5) - **Resources for more information:** - [Research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) - [Google's T5 Blog Post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) - [GitHub Repo](https://github.com/google-research/text-to-text-transfer-transformer) - [Hugging Face T5 Docs](https://huggingface.co/docs/transformers/model_doc/t5) # Uses ## Direct Use and Downstream Use The developers write in a [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) that the model: > Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task, including machine translation, document summarization, question answering, and classification tasks (e.g., sentiment analysis). We can even apply T5 to regression tasks by training it to predict the string representation of a number instead of the number itself. See the [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) and [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. ## Out-of-Scope Use More information needed. # Bias, Risks, and Limitations More information needed. ## Recommendations More information needed. # Training Details ## Training Data The model is pre-trained on the [Colossal Clean Crawled Corpus (C4)](https://www.tensorflow.org/datasets/catalog/c4), which was developed and released in the context of the same [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) as T5. The model was pre-trained on a on a **multi-task mixture of unsupervised (1.) and supervised tasks (2.)**. Thereby, the following datasets were being used for (1.) and (2.): 1. **Datasets used for Unsupervised denoising objective**: - [C4](https://huggingface.co/datasets/c4) - [Wiki-DPR](https://huggingface.co/datasets/wiki_dpr) 2. **Datasets used for Supervised text-to-text language modeling objective** - Sentence acceptability judgment - CoLA [Warstadt et al., 2018](https://arxiv.org/abs/1805.12471) - Sentiment analysis - SST-2 [Socher et al., 2013](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) - Paraphrasing/sentence similarity - MRPC [Dolan and Brockett, 2005](https://aclanthology.org/I05-5002) - STS-B [Ceret al., 2017](https://arxiv.org/abs/1708.00055) - QQP [Iyer et al., 2017](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) - Natural language inference - MNLI [Williams et al., 2017](https://arxiv.org/abs/1704.05426) - QNLI [Rajpurkar et al.,2016](https://arxiv.org/abs/1606.05250) - RTE [Dagan et al., 2005](https://link.springer.com/chapter/10.1007/11736790_9) - CB [De Marneff et al., 2019](https://semanticsarchive.net/Archive/Tg3ZGI2M/Marneffe.pdf) - Sentence completion - COPA [Roemmele et al., 2011](https://www.researchgate.net/publication/221251392_Choice_of_Plausible_Alternatives_An_Evaluation_of_Commonsense_Causal_Reasoning) - Word sense disambiguation - WIC [Pilehvar and Camacho-Collados, 2018](https://arxiv.org/abs/1808.09121) - Question answering - MultiRC [Khashabi et al., 2018](https://aclanthology.org/N18-1023) - ReCoRD [Zhang et al., 2018](https://arxiv.org/abs/1810.12885) - BoolQ [Clark et al., 2019](https://arxiv.org/abs/1905.10044) ## Training Procedure In their [abstract](https://jmlr.org/papers/volume21/20-074/20-074.pdf), the model developers write: > In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. The framework introduced, the T5 framework, involves a training procedure that brings together the approaches studied in the paper. See the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics The developers evaluated the model on 24 tasks, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for full details. ## Results For full results for T5-11B, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf), Table 14. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Google Cloud TPU Pods - **Hours used:** More information needed - **Cloud Provider:** GCP - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Citation **BibTeX:** ```bibtex @article{2020t5, author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, journal = {Journal of Machine Learning Research}, year = {2020}, volume = {21}, number = {140}, pages = {1-67}, url = {http://jmlr.org/papers/v21/20-074.html} } ``` **APA:** - Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., ... & Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140), 1-67. # Model Card Authors This model card was written by the team at Hugging Face. # How to Get Started with the Model ## Disclaimer **Before `transformers` v3.5.0**, due do its immense size, `t5-11b` required some special treatment. If you're using transformers `<= v3.4.0`, `t5-11b` should be loaded with flag `use_cdn` set to `False` as follows: ```python t5 = transformers.T5ForConditionalGeneration.from_pretrained('t5-11b', use_cdn = False) ``` Secondly, a single GPU will most likely not have enough memory to even load the model into memory as the weights alone amount to over 40 GB. - Model parallelism has to be used here to overcome this problem as is explained in this [PR](https://github.com/huggingface/transformers/pull/3578). - DeepSpeed's ZeRO-Offload is another approach as explained in this [post](https://github.com/huggingface/transformers/issues/9996). See the [Hugging Face T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Model) docs and a [Colab Notebook](https://colab.research.google.com/github/google-research/text-to-text-transfer-transformer/blob/main/notebooks/t5-trivia.ipynb) created by the model developers for more context.
null
Non_BioNLP
# Model Card for T5 11B ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876727255704b657944387750412e706e67) # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Training Details](#training-details) 5. [Evaluation](#evaluation) 6. [Environmental Impact](#environmental-impact) 7. [Citation](#citation) 8. [Model Card Authors](#model-card-authors) 9. [How To Get Started With the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description The developers of the Text-To-Text Transfer Transformer (T5) [write](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html): > With T5, we propose reframing all NLP tasks into a unified text-to-text-format where the input and output are always text strings, in contrast to BERT-style models that can only output either a class label or a span of the input. Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task. T5-11B is the checkpoint with 11 billion parameters. - **Developed by:** Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. See [associated paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) and [GitHub repo](https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints) - **Model type:** Language model - **Language(s) (NLP):** English, French, Romanian, German - **License:** Apache 2.0 - **Related Models:** [All T5 Checkpoints](https://huggingface.co/models?search=t5) - **Resources for more information:** - [Research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) - [Google's T5 Blog Post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) - [GitHub Repo](https://github.com/google-research/text-to-text-transfer-transformer) - [Hugging Face T5 Docs](https://huggingface.co/docs/transformers/model_doc/t5) # Uses ## Direct Use and Downstream Use The developers write in a [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) that the model: > Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task, including machine translation, document summarization, question answering, and classification tasks (e.g., sentiment analysis). We can even apply T5 to regression tasks by training it to predict the string representation of a number instead of the number itself. See the [blog post](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) and [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. ## Out-of-Scope Use More information needed. # Bias, Risks, and Limitations More information needed. ## Recommendations More information needed. # Training Details ## Training Data The model is pre-trained on the [Colossal Clean Crawled Corpus (C4)](https://www.tensorflow.org/datasets/catalog/c4), which was developed and released in the context of the same [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) as T5. The model was pre-trained on a on a **multi-task mixture of unsupervised (1.) and supervised tasks (2.)**. Thereby, the following datasets were being used for (1.) and (2.): 1. **Datasets used for Unsupervised denoising objective**: - [C4](https://huggingface.co/datasets/c4) - [Wiki-DPR](https://huggingface.co/datasets/wiki_dpr) 2. **Datasets used for Supervised text-to-text language modeling objective** - Sentence acceptability judgment - CoLA [Warstadt et al., 2018](https://arxiv.org/abs/1805.12471) - Sentiment analysis - SST-2 [Socher et al., 2013](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) - Paraphrasing/sentence similarity - MRPC [Dolan and Brockett, 2005](https://aclanthology.org/I05-5002) - STS-B [Ceret al., 2017](https://arxiv.org/abs/1708.00055) - QQP [Iyer et al., 2017](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) - Natural language inference - MNLI [Williams et al., 2017](https://arxiv.org/abs/1704.05426) - QNLI [Rajpurkar et al.,2016](https://arxiv.org/abs/1606.05250) - RTE [Dagan et al., 2005](https://link.springer.com/chapter/10.1007/11736790_9) - CB [De Marneff et al., 2019](https://semanticsarchive.net/Archive/Tg3ZGI2M/Marneffe.pdf) - Sentence completion - COPA [Roemmele et al., 2011](https://www.researchgate.net/publication/221251392_Choice_of_Plausible_Alternatives_An_Evaluation_of_Commonsense_Causal_Reasoning) - Word sense disambiguation - WIC [Pilehvar and Camacho-Collados, 2018](https://arxiv.org/abs/1808.09121) - Question answering - MultiRC [Khashabi et al., 2018](https://aclanthology.org/N18-1023) - ReCoRD [Zhang et al., 2018](https://arxiv.org/abs/1810.12885) - BoolQ [Clark et al., 2019](https://arxiv.org/abs/1905.10044) ## Training Procedure In their [abstract](https://jmlr.org/papers/volume21/20-074/20-074.pdf), the model developers write: > In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. The framework introduced, the T5 framework, involves a training procedure that brings together the approaches studied in the paper. See the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for further details. # Evaluation ## Testing Data, Factors & Metrics The developers evaluated the model on 24 tasks, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf) for full details. ## Results For full results for T5-11B, see the [research paper](https://jmlr.org/papers/volume21/20-074/20-074.pdf), Table 14. # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Google Cloud TPU Pods - **Hours used:** More information needed - **Cloud Provider:** GCP - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Citation **BibTeX:** ```bibtex @article{2020t5, author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, journal = {Journal of Machine Learning Research}, year = {2020}, volume = {21}, number = {140}, pages = {1-67}, url = {http://jmlr.org/papers/v21/20-074.html} } ``` **APA:** - Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., ... & Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140), 1-67. # Model Card Authors This model card was written by the team at Hugging Face. # How to Get Started with the Model ## Disclaimer **Before `transformers` v3.5.0**, due do its immense size, `t5-11b` required some special treatment. If you're using transformers `<= v3.4.0`, `t5-11b` should be loaded with flag `use_cdn` set to `False` as follows: ```python t5 = transformers.T5ForConditionalGeneration.from_pretrained('t5-11b', use_cdn = False) ``` Secondly, a single GPU will most likely not have enough memory to even load the model into memory as the weights alone amount to over 40 GB. - Model parallelism has to be used here to overcome this problem as is explained in this [PR](https://github.com/huggingface/transformers/pull/3578). - DeepSpeed's ZeRO-Offload is another approach as explained in this [post](https://github.com/huggingface/transformers/issues/9996). See the [Hugging Face T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Model) docs and a [Colab Notebook](https://colab.research.google.com/github/google-research/text-to-text-transfer-transformer/blob/main/notebooks/t5-trivia.ipynb) created by the model developers for more context.
{"datasets": ["c4"], "language": ["en", "fr", "ro", "de", "multilingual"], "license": "apache-2.0", "tags": ["summarization", "translation"], "inference": false}
task
[ "QUESTION_ANSWERING", "TRANSLATION", "SUMMARIZATION", "PARAPHRASING" ]
40,260
fine-tuned/before-finetuning-512-192-gpt-4o-2024-05-13-83930416
fine-tuned
feature-extraction
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "sentence-similarity", "mteb", "en", "dataset:fine-tuned/before-finetuning-512-192-gpt-4o-2024-05-13-83930416", "dataset:allenai/c4", "license:apache-2.0", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-05-28T18:53:06Z
2024-05-28T18:53:40+00:00
8
0
--- datasets: - fine-tuned/before-finetuning-512-192-gpt-4o-2024-05-13-83930416 - allenai/c4 language: - en - en license: apache-2.0 pipeline_tag: feature-extraction tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb --- This model is a fine-tuned version of [**BAAI/bge-large-en-v1.5**](https://huggingface.co/BAAI/bge-large-en-v1.5) designed for the following use case: None ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( 'fine-tuned/before-finetuning-512-192-gpt-4o-2024-05-13-83930416', trust_remote_code=True ) embeddings = model.encode([ 'first text to embed', 'second text to embed' ]) print(cos_sim(embeddings[0], embeddings[1])) ```
null
Non_BioNLP
This model is a fine-tuned version of [**BAAI/bge-large-en-v1.5**](https://huggingface.co/BAAI/bge-large-en-v1.5) designed for the following use case: None ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( 'fine-tuned/before-finetuning-512-192-gpt-4o-2024-05-13-83930416', trust_remote_code=True ) embeddings = model.encode([ 'first text to embed', 'second text to embed' ]) print(cos_sim(embeddings[0], embeddings[1])) ```
{"datasets": ["fine-tuned/before-finetuning-512-192-gpt-4o-2024-05-13-83930416", "allenai/c4"], "language": ["en", "en"], "license": "apache-2.0", "pipeline_tag": "feature-extraction", "tags": ["sentence-transformers", "feature-extraction", "sentence-similarity", "mteb"]}
task
[ "TEXT_CLASSIFICATION" ]
40,261
rambodazimi/roberta-base-finetuned-FFT-WNLI
rambodazimi
null
[ "safetensors", "roberta", "generated_from_trainer", "dataset:glue", "license:apache-2.0", "model-index", "region:us" ]
2024-08-18T03:24:14Z
2024-08-18T03:26:26+00:00
4
0
--- datasets: - glue license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: roberta-base-finetuned-FFT-WNLI results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue args: wnli metrics: - type: accuracy value: 0.4507042253521127 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-finetuned-fft-wnli This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the glue dataset. It achieves the following results on the evaluation set: - Accuracy: 0.4507 ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - weight_decay: 0.01 - num_epochs: 2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-finetuned-fft-wnli This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the glue dataset. It achieves the following results on the evaluation set: - Accuracy: 0.4507 ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - weight_decay: 0.01 - num_epochs: 2
{"datasets": ["glue"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "roberta-base-finetuned-FFT-WNLI", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "args": "wnli"}, "metrics": [{"type": "accuracy", "value": 0.4507042253521127, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,263
Helsinki-NLP/opus-mt-tc-big-zls-de
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "safetensors", "marian", "text2text-generation", "translation", "opus-mt-tc", "bg", "de", "hr", "mk", "sh", "sl", "sr", "license:cc-by-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-08-12T14:51:09Z
2023-10-10T10:57:17+00:00
106
0
--- language: - bg - de - hr - mk - sh - sl - sr license: cc-by-4.0 tags: - translation - opus-mt-tc language_bcp47: - sr_Cyrl - sr_Latn model-index: - name: opus-mt-tc-big-zls-de results: - task: type: translation name: Translation bul-deu dataset: name: flores101-devtest type: flores_101 args: bul deu devtest metrics: - type: bleu value: 28.4 name: BLEU - type: chrf value: 0.57688 name: chr-F - type: bleu value: 27.4 name: BLEU - type: chrf value: 0.56674 name: chr-F - type: bleu value: 29.3 name: BLEU - type: chrf value: 0.57688 name: chr-F - type: bleu value: 26.7 name: BLEU - type: chrf value: 0.56258 name: chr-F - type: bleu value: 30.7 name: BLEU - type: chrf value: 0.59271 name: chr-F - task: type: translation name: Translation bul-deu dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: bul-deu metrics: - type: bleu value: 54.5 name: BLEU - type: chrf value: 0.7122 name: chr-F - type: bleu value: 54.8 name: BLEU - type: chrf value: 0.71283 name: chr-F - type: bleu value: 53.1 name: BLEU - type: chrf value: 0.69448 name: chr-F - type: bleu value: 21.1 name: BLEU - type: chrf value: 0.36339 name: chr-F - type: bleu value: 56.0 name: BLEU - type: chrf value: 0.72489 name: chr-F --- # opus-mt-tc-big-zls-de ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [How to Get Started With the Model](#how-to-get-started-with-the-model) - [Training](#training) - [Evaluation](#evaluation) - [Citation Information](#citation-information) - [Acknowledgements](#acknowledgements) ## Model Details Neural machine translation model for translating from South Slavic languages (zls) to German (de). This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train). **Model Description:** - **Developed by:** Language Technology Research Group at the University of Helsinki - **Model Type:** Translation (transformer-big) - **Release**: 2022-07-26 - **License:** CC-BY-4.0 - **Language(s):** - Source Language(s): bos_Latn bul hbs hrv mkd slv srp_Cyrl srp_Latn - Target Language(s): deu - Language Pair(s): bul-deu hbs-deu hrv-deu mkd-deu slv-deu srp_Cyrl-deu srp_Latn-deu - Valid Target Language Labels: - **Original Model**: [opusTCv20210807_transformer-big_2022-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.zip) - **Resources for more information:** - [OPUS-MT-train GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train) - More information about released models for this language pair: [OPUS-MT zls-deu README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zls-deu/README.md) - [More information about MarianNMT models in the transformers library](https://huggingface.co/docs/transformers/model_doc/marian) - [Tatoeba Translation Challenge](https://github.com/Helsinki-NLP/Tatoeba-Challenge/ ## Uses This model can be used for translation and text-to-text generation. ## Risks, Limitations and Biases **CONTENT WARNING: Readers should be aware that the model is trained on various public data sets that may contain content that is disturbing, offensive, and can propagate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). ## How to Get Started With the Model A short example code: ```python from transformers import MarianMTModel, MarianTokenizer src_text = [ "Jesi li ti student?", "Dve stvari deca treba da dobiju od svojih roditelja: korene i krila." ] model_name = "pytorch-models/opus-mt-tc-big-zls-de" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) for t in translated: print( tokenizer.decode(t, skip_special_tokens=True) ) # expected output: # Sind Sie Student? # Zwei Dinge sollten Kinder von ihren Eltern bekommen: Wurzeln und Flügel. ``` You can also use OPUS-MT models with the transformers pipelines, for example: ```python from transformers import pipeline pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-zls-de") print(pipe("Jesi li ti student?")) # expected output: Sind Sie Student? ``` ## Training - **Data**: opusTCv20210807 ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge)) - **Pre-processing**: SentencePiece (spm32k,spm32k) - **Model Type:** transformer-big - **Original MarianNMT Model**: [opusTCv20210807_transformer-big_2022-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.zip) - **Training Scripts**: [GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train) ## Evaluation * test set translations: [opusTCv20210807_transformer-big_2022-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.test.txt) * test set scores: [opusTCv20210807_transformer-big_2022-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.eval.txt) * benchmark results: [benchmark_results.txt](benchmark_results.txt) * benchmark output: [benchmark_translations.zip](benchmark_translations.zip) | langpair | testset | chr-F | BLEU | #sent | #words | |----------|---------|-------|-------|-------|--------| | bul-deu | tatoeba-test-v2021-08-07 | 0.71220 | 54.5 | 314 | 2224 | | hbs-deu | tatoeba-test-v2021-08-07 | 0.71283 | 54.8 | 1959 | 15559 | | hrv-deu | tatoeba-test-v2021-08-07 | 0.69448 | 53.1 | 782 | 5734 | | slv-deu | tatoeba-test-v2021-08-07 | 0.36339 | 21.1 | 492 | 3003 | | srp_Latn-deu | tatoeba-test-v2021-08-07 | 0.72489 | 56.0 | 986 | 8500 | | bul-deu | flores101-devtest | 0.57688 | 28.4 | 1012 | 25094 | | hrv-deu | flores101-devtest | 0.56674 | 27.4 | 1012 | 25094 | | mkd-deu | flores101-devtest | 0.57688 | 29.3 | 1012 | 25094 | | slv-deu | flores101-devtest | 0.56258 | 26.7 | 1012 | 25094 | | srp_Cyrl-deu | flores101-devtest | 0.59271 | 30.7 | 1012 | 25094 | ## Citation Information * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.) ``` @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", } @inproceedings{tiedemann-2020-tatoeba, title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Fifth Conference on Machine Translation", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.wmt-1.139", pages = "1174--1182", } ``` ## Acknowledgements The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland. ## Model conversion info * transformers version: 4.16.2 * OPUS-MT git hash: 8b9f0b0 * port time: Sat Aug 13 00:05:30 EEST 2022 * port machine: LM0-400-22516.local
null
Non_BioNLP
# opus-mt-tc-big-zls-de ## Table of Contents - [Model Details](#model-details) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [How to Get Started With the Model](#how-to-get-started-with-the-model) - [Training](#training) - [Evaluation](#evaluation) - [Citation Information](#citation-information) - [Acknowledgements](#acknowledgements) ## Model Details Neural machine translation model for translating from South Slavic languages (zls) to German (de). This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train). **Model Description:** - **Developed by:** Language Technology Research Group at the University of Helsinki - **Model Type:** Translation (transformer-big) - **Release**: 2022-07-26 - **License:** CC-BY-4.0 - **Language(s):** - Source Language(s): bos_Latn bul hbs hrv mkd slv srp_Cyrl srp_Latn - Target Language(s): deu - Language Pair(s): bul-deu hbs-deu hrv-deu mkd-deu slv-deu srp_Cyrl-deu srp_Latn-deu - Valid Target Language Labels: - **Original Model**: [opusTCv20210807_transformer-big_2022-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.zip) - **Resources for more information:** - [OPUS-MT-train GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train) - More information about released models for this language pair: [OPUS-MT zls-deu README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zls-deu/README.md) - [More information about MarianNMT models in the transformers library](https://huggingface.co/docs/transformers/model_doc/marian) - [Tatoeba Translation Challenge](https://github.com/Helsinki-NLP/Tatoeba-Challenge/ ## Uses This model can be used for translation and text-to-text generation. ## Risks, Limitations and Biases **CONTENT WARNING: Readers should be aware that the model is trained on various public data sets that may contain content that is disturbing, offensive, and can propagate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). ## How to Get Started With the Model A short example code: ```python from transformers import MarianMTModel, MarianTokenizer src_text = [ "Jesi li ti student?", "Dve stvari deca treba da dobiju od svojih roditelja: korene i krila." ] model_name = "pytorch-models/opus-mt-tc-big-zls-de" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) for t in translated: print( tokenizer.decode(t, skip_special_tokens=True) ) # expected output: # Sind Sie Student? # Zwei Dinge sollten Kinder von ihren Eltern bekommen: Wurzeln und Flügel. ``` You can also use OPUS-MT models with the transformers pipelines, for example: ```python from transformers import pipeline pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-zls-de") print(pipe("Jesi li ti student?")) # expected output: Sind Sie Student? ``` ## Training - **Data**: opusTCv20210807 ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge)) - **Pre-processing**: SentencePiece (spm32k,spm32k) - **Model Type:** transformer-big - **Original MarianNMT Model**: [opusTCv20210807_transformer-big_2022-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.zip) - **Training Scripts**: [GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train) ## Evaluation * test set translations: [opusTCv20210807_transformer-big_2022-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.test.txt) * test set scores: [opusTCv20210807_transformer-big_2022-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zls-deu/opusTCv20210807_transformer-big_2022-07-26.eval.txt) * benchmark results: [benchmark_results.txt](benchmark_results.txt) * benchmark output: [benchmark_translations.zip](benchmark_translations.zip) | langpair | testset | chr-F | BLEU | #sent | #words | |----------|---------|-------|-------|-------|--------| | bul-deu | tatoeba-test-v2021-08-07 | 0.71220 | 54.5 | 314 | 2224 | | hbs-deu | tatoeba-test-v2021-08-07 | 0.71283 | 54.8 | 1959 | 15559 | | hrv-deu | tatoeba-test-v2021-08-07 | 0.69448 | 53.1 | 782 | 5734 | | slv-deu | tatoeba-test-v2021-08-07 | 0.36339 | 21.1 | 492 | 3003 | | srp_Latn-deu | tatoeba-test-v2021-08-07 | 0.72489 | 56.0 | 986 | 8500 | | bul-deu | flores101-devtest | 0.57688 | 28.4 | 1012 | 25094 | | hrv-deu | flores101-devtest | 0.56674 | 27.4 | 1012 | 25094 | | mkd-deu | flores101-devtest | 0.57688 | 29.3 | 1012 | 25094 | | slv-deu | flores101-devtest | 0.56258 | 26.7 | 1012 | 25094 | | srp_Cyrl-deu | flores101-devtest | 0.59271 | 30.7 | 1012 | 25094 | ## Citation Information * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.) ``` @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", } @inproceedings{tiedemann-2020-tatoeba, title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Fifth Conference on Machine Translation", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.wmt-1.139", pages = "1174--1182", } ``` ## Acknowledgements The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland. ## Model conversion info * transformers version: 4.16.2 * OPUS-MT git hash: 8b9f0b0 * port time: Sat Aug 13 00:05:30 EEST 2022 * port machine: LM0-400-22516.local
{"language": ["bg", "de", "hr", "mk", "sh", "sl", "sr"], "license": "cc-by-4.0", "tags": ["translation", "opus-mt-tc"], "language_bcp47": ["sr_Cyrl", "sr_Latn"], "model-index": [{"name": "opus-mt-tc-big-zls-de", "results": [{"task": {"type": "translation", "name": "Translation bul-deu"}, "dataset": {"name": "flores101-devtest", "type": "flores_101", "args": "bul deu devtest"}, "metrics": [{"type": "bleu", "value": 28.4, "name": "BLEU"}, {"type": "chrf", "value": 0.57688, "name": "chr-F"}, {"type": "bleu", "value": 27.4, "name": "BLEU"}, {"type": "chrf", "value": 0.56674, "name": "chr-F"}, {"type": "bleu", "value": 29.3, "name": "BLEU"}, {"type": "chrf", "value": 0.57688, "name": "chr-F"}, {"type": "bleu", "value": 26.7, "name": "BLEU"}, {"type": "chrf", "value": 0.56258, "name": "chr-F"}, {"type": "bleu", "value": 30.7, "name": "BLEU"}, {"type": "chrf", "value": 0.59271, "name": "chr-F"}]}, {"task": {"type": "translation", "name": "Translation bul-deu"}, "dataset": {"name": "tatoeba-test-v2021-08-07", "type": "tatoeba_mt", "args": "bul-deu"}, "metrics": [{"type": "bleu", "value": 54.5, "name": "BLEU"}, {"type": "chrf", "value": 0.7122, "name": "chr-F"}, {"type": "bleu", "value": 54.8, "name": "BLEU"}, {"type": "chrf", "value": 0.71283, "name": "chr-F"}, {"type": "bleu", "value": 53.1, "name": "BLEU"}, {"type": "chrf", "value": 0.69448, "name": "chr-F"}, {"type": "bleu", "value": 21.1, "name": "BLEU"}, {"type": "chrf", "value": 0.36339, "name": "chr-F"}, {"type": "bleu", "value": 56.0, "name": "BLEU"}, {"type": "chrf", "value": 0.72489, "name": "chr-F"}]}]}]}
task
[ "TRANSLATION" ]
40,264
gokuls/hBERTv2_new_pretrain_48_KD_qnli
gokuls
text-classification
[ "transformers", "pytorch", "tensorboard", "hybridbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-14T17:24:50Z
2023-06-14T19:29:28+00:00
10
0
--- datasets: - glue language: - en metrics: - accuracy tags: - generated_from_trainer model-index: - name: hBERTv2_new_pretrain_48_KD_qnli results: - task: type: text-classification name: Text Classification dataset: name: GLUE QNLI type: glue config: qnli split: validation args: qnli metrics: - type: accuracy value: 0.6675819146988834 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_new_pretrain_48_KD_qnli This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2_complete_training_new_48_KD](https://huggingface.co/gokuls/bert_12_layer_model_v2_complete_training_new_48_KD) on the GLUE QNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.6195 - Accuracy: 0.6676 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6749 | 1.0 | 819 | 0.6686 | 0.5772 | | 0.6495 | 2.0 | 1638 | 0.6449 | 0.6224 | | 0.6018 | 3.0 | 2457 | 0.6230 | 0.6586 | | 0.5325 | 4.0 | 3276 | 0.6508 | 0.6690 | | 0.4632 | 5.0 | 4095 | 0.6195 | 0.6676 | | 0.3951 | 6.0 | 4914 | 0.6860 | 0.6733 | | 0.3376 | 7.0 | 5733 | 0.7480 | 0.6787 | | 0.2891 | 8.0 | 6552 | 0.9469 | 0.6548 | | 0.2489 | 9.0 | 7371 | 0.8288 | 0.6736 | | 0.2173 | 10.0 | 8190 | 0.9671 | 0.6736 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv2_new_pretrain_48_KD_qnli This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2_complete_training_new_48_KD](https://huggingface.co/gokuls/bert_12_layer_model_v2_complete_training_new_48_KD) on the GLUE QNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.6195 - Accuracy: 0.6676 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6749 | 1.0 | 819 | 0.6686 | 0.5772 | | 0.6495 | 2.0 | 1638 | 0.6449 | 0.6224 | | 0.6018 | 3.0 | 2457 | 0.6230 | 0.6586 | | 0.5325 | 4.0 | 3276 | 0.6508 | 0.6690 | | 0.4632 | 5.0 | 4095 | 0.6195 | 0.6676 | | 0.3951 | 6.0 | 4914 | 0.6860 | 0.6733 | | 0.3376 | 7.0 | 5733 | 0.7480 | 0.6787 | | 0.2891 | 8.0 | 6552 | 0.9469 | 0.6548 | | 0.2489 | 9.0 | 7371 | 0.8288 | 0.6736 | | 0.2173 | 10.0 | 8190 | 0.9671 | 0.6736 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
{"datasets": ["glue"], "language": ["en"], "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "hBERTv2_new_pretrain_48_KD_qnli", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE QNLI", "type": "glue", "config": "qnli", "split": "validation", "args": "qnli"}, "metrics": [{"type": "accuracy", "value": 0.6675819146988834, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,265
Alireza1044/albert-base-v2-stsb
Alireza1044
text-classification
[ "transformers", "pytorch", "tensorboard", "albert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2021-07-26T10:57:27+00:00
17
0
--- datasets: - glue language: - en license: apache-2.0 metrics: - spearmanr tags: - generated_from_trainer model_index: - name: stsb results: - task: name: Text Classification type: text-classification dataset: name: GLUE STSB type: glue args: stsb metric: name: Spearmanr type: spearmanr value: 0.9050744778895732 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # stsb This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on the GLUE STSB dataset. It achieves the following results on the evaluation set: - Loss: 0.3978 - Pearson: 0.9090 - Spearmanr: 0.9051 - Combined Score: 0.9071 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4.0 ### Training results ### Framework versions - Transformers 4.9.0 - Pytorch 1.9.0+cu102 - Datasets 1.10.2 - Tokenizers 0.10.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # stsb This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on the GLUE STSB dataset. It achieves the following results on the evaluation set: - Loss: 0.3978 - Pearson: 0.9090 - Spearmanr: 0.9051 - Combined Score: 0.9071 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4.0 ### Training results ### Framework versions - Transformers 4.9.0 - Pytorch 1.9.0+cu102 - Datasets 1.10.2 - Tokenizers 0.10.3
{"datasets": ["glue"], "language": ["en"], "license": "apache-2.0", "metrics": ["spearmanr"], "tags": ["generated_from_trainer"], "model_index": [{"name": "stsb", "results": [{"task": {"name": "Text Classification", "type": "text-classification"}, "dataset": {"name": "GLUE STSB", "type": "glue", "args": "stsb"}, "metric": {"name": "Spearmanr", "type": "spearmanr", "value": 0.9050744778895732}}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,266
Babelscape/t5-base-summarization-claim-extractor
Babelscape
text2text-generation
[ "transformers", "safetensors", "t5", "text2text-generation", "en", "arxiv:2403.02270", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-06-27T13:39:22Z
2025-02-28T08:48:45+00:00
192,763
8
--- language: - en library_name: transformers license: - cc-by-nc-sa-4.0 widget: - text: A major tech company has unveiled its first fully autonomous electric vehicle, boasting a range of 500 miles per charge and advanced safety features designed to revolutionize the transportation industry. - text: A new global initiative to clean up ocean plastic aims to remove 50% of floating debris within a decade, using innovative autonomous vessels powered by renewable energy. - text: A historic peace agreement was signed between two long-standing rival nations, marking a turning point in diplomatic relations and promising economic and social cooperation for years to come. --- # Model Card: T5-base-summarization-claim-extractor ## Model Description **Model Name:** T5-base-summarization-claim-extractor **Authors:** Alessandro Scirè, Karim Ghonim, and Roberto Navigli **Contact:** [email protected], [email protected] **Language:** English **Primary Use:** Extraction of atomic claims from a summary ### Overview The T5-base-summarization-claim-extractor is a model developed for the task of extracting atomic claims from summaries. The model is based on the T5 architecture which is then fine-tuned specifically for claim extraction. This model was introduced as part of the research presented in the paper ["FENICE: Factuality Evaluation of summarization based on Natural Language Inference and Claim Extraction" by Alessandro Scirè, Karim Ghonim, and Roberto Navigli.](https://aclanthology.org/2024.findings-acl.841.pdf) FENICE leverages Natural Language Inference (NLI) and Claim Extraction to evaluate the factuality of summaries. [ArXiv version](https://arxiv.org/abs/2403.02270). ### Intended Use This model is designed to: - Extract atomic claims from summaries. - Serve as a component in pipelines for factuality evaluation of summaries. ## Example Code ```python from transformers import T5ForConditionalGeneration, T5Tokenizer tokenizer = T5Tokenizer.from_pretrained("Babelscape/t5-base-summarization-claim-extractor") model = T5ForConditionalGeneration.from_pretrained("Babelscape/t5-base-summarization-claim-extractor") summary = 'Simone Biles made a triumphant return to the Olympic stage at the Paris 2024 Games, competing in the women’s gymnastics qualifications. Overcoming a previous struggle with the “twisties” that led to her withdrawal from events at the Tokyo 2020 Olympics, Biles dazzled with strong performances on all apparatus, helping the U.S. team secure a commanding lead in the qualifications. Her routines showcased her resilience and skill, drawing enthusiastic support from a star-studded audience' tok_input = tokenizer.batch_encode_plus([summary], return_tensors="pt", padding=True) claims = model.generate(**tok_input) claims = tokenizer.batch_decode(claims, skip_special_tokens=True) ``` **Note**: The model outputs the claims in a single string. **Kindly remember to split the string into sentences** in order to retrieve the singular claims. ### Training For details regarding the training process, please checkout our paper(https://aclanthology.org/2024.findings-acl.841.pdf) (section 4.1). ### Performance | <div style="width: 250px">Model</div> | easiness<sub>P</sub> | easiness<sub>R</sub> | easiness<sub>F1</sub> | |:-------------------------------------:|:--------------------:|:--------------------:|:---------------------:| | GPT-3.5 | 80.1 | 70.9 | 74.9 | | t5-base-summarization-claim-extractor | 79.2 | 68.8 | 73.4 | **Table 1:** Easiness Precision (easiness<sub>P</sub>), Recall (easiness<sub>R</sub>), and F1 score (easiness<sub>F1</sub>) results for the LLM-based claim extractor, namely **GPT-3.5**, and **t5-base-summarization-claim-extractor**, assessed on [ROSE (Liu et al., 2023b)](https://aclanthology.org/2023.acl-long.228/). Further details on the model's performance and the metrics used can be found in the [paper](https://aclanthology.org/2024.findings-acl.841.pdf) (section 4.1). ### Main Repository For more details about FENICE, check out the GitHub repository: [Babelscape/FENICE](https://github.com/Babelscape/FENICE) ### Citation If you use this model in your work, please cite the following paper: ```bibtex @inproceedings{scire-etal-2024-fenice, title = "{FENICE}: Factuality Evaluation of summarization based on Natural language Inference and Claim Extraction", author = "Scir{\`e}, Alessandro and Ghonim, Karim and Navigli, Roberto", editor = "Ku, Lun-Wei and Martins, Andre and Srikumar, Vivek", booktitle = "Findings of the Association for Computational Linguistics ACL 2024", month = aug, year = "2024", address = "Bangkok, Thailand and virtual meeting", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2024.findings-acl.841", pages = "14148--14161", } ``` ### Limitations - The model is specifically designed for extracting claims from summaries and may not perform well on other types of texts. - The model is currently available only in English and may not generalize well to other languages. ### Ethical Considerations Users should be aware that while this model extracts claims that can be evaluated for factuality, it does not determine the truthfulness of those claims. Therefore, it should be used in conjunction with other tools or human judgment when evaluating the reliability of summaries. ### Acknowledgments This work was made possible thanks to the support of Babelscape and Sapienza NLP.
null
Non_BioNLP
# Model Card: T5-base-summarization-claim-extractor ## Model Description **Model Name:** T5-base-summarization-claim-extractor **Authors:** Alessandro Scirè, Karim Ghonim, and Roberto Navigli **Contact:** [email protected], [email protected] **Language:** English **Primary Use:** Extraction of atomic claims from a summary ### Overview The T5-base-summarization-claim-extractor is a model developed for the task of extracting atomic claims from summaries. The model is based on the T5 architecture which is then fine-tuned specifically for claim extraction. This model was introduced as part of the research presented in the paper ["FENICE: Factuality Evaluation of summarization based on Natural Language Inference and Claim Extraction" by Alessandro Scirè, Karim Ghonim, and Roberto Navigli.](https://aclanthology.org/2024.findings-acl.841.pdf) FENICE leverages Natural Language Inference (NLI) and Claim Extraction to evaluate the factuality of summaries. [ArXiv version](https://arxiv.org/abs/2403.02270). ### Intended Use This model is designed to: - Extract atomic claims from summaries. - Serve as a component in pipelines for factuality evaluation of summaries. ## Example Code ```python from transformers import T5ForConditionalGeneration, T5Tokenizer tokenizer = T5Tokenizer.from_pretrained("Babelscape/t5-base-summarization-claim-extractor") model = T5ForConditionalGeneration.from_pretrained("Babelscape/t5-base-summarization-claim-extractor") summary = 'Simone Biles made a triumphant return to the Olympic stage at the Paris 2024 Games, competing in the women’s gymnastics qualifications. Overcoming a previous struggle with the “twisties” that led to her withdrawal from events at the Tokyo 2020 Olympics, Biles dazzled with strong performances on all apparatus, helping the U.S. team secure a commanding lead in the qualifications. Her routines showcased her resilience and skill, drawing enthusiastic support from a star-studded audience' tok_input = tokenizer.batch_encode_plus([summary], return_tensors="pt", padding=True) claims = model.generate(**tok_input) claims = tokenizer.batch_decode(claims, skip_special_tokens=True) ``` **Note**: The model outputs the claims in a single string. **Kindly remember to split the string into sentences** in order to retrieve the singular claims. ### Training For details regarding the training process, please checkout our paper(https://aclanthology.org/2024.findings-acl.841.pdf) (section 4.1). ### Performance | <div style="width: 250px">Model</div> | easiness<sub>P</sub> | easiness<sub>R</sub> | easiness<sub>F1</sub> | |:-------------------------------------:|:--------------------:|:--------------------:|:---------------------:| | GPT-3.5 | 80.1 | 70.9 | 74.9 | | t5-base-summarization-claim-extractor | 79.2 | 68.8 | 73.4 | **Table 1:** Easiness Precision (easiness<sub>P</sub>), Recall (easiness<sub>R</sub>), and F1 score (easiness<sub>F1</sub>) results for the LLM-based claim extractor, namely **GPT-3.5**, and **t5-base-summarization-claim-extractor**, assessed on [ROSE (Liu et al., 2023b)](https://aclanthology.org/2023.acl-long.228/). Further details on the model's performance and the metrics used can be found in the [paper](https://aclanthology.org/2024.findings-acl.841.pdf) (section 4.1). ### Main Repository For more details about FENICE, check out the GitHub repository: [Babelscape/FENICE](https://github.com/Babelscape/FENICE) ### Citation If you use this model in your work, please cite the following paper: ```bibtex @inproceedings{scire-etal-2024-fenice, title = "{FENICE}: Factuality Evaluation of summarization based on Natural language Inference and Claim Extraction", author = "Scir{\`e}, Alessandro and Ghonim, Karim and Navigli, Roberto", editor = "Ku, Lun-Wei and Martins, Andre and Srikumar, Vivek", booktitle = "Findings of the Association for Computational Linguistics ACL 2024", month = aug, year = "2024", address = "Bangkok, Thailand and virtual meeting", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2024.findings-acl.841", pages = "14148--14161", } ``` ### Limitations - The model is specifically designed for extracting claims from summaries and may not perform well on other types of texts. - The model is currently available only in English and may not generalize well to other languages. ### Ethical Considerations Users should be aware that while this model extracts claims that can be evaluated for factuality, it does not determine the truthfulness of those claims. Therefore, it should be used in conjunction with other tools or human judgment when evaluating the reliability of summaries. ### Acknowledgments This work was made possible thanks to the support of Babelscape and Sapienza NLP.
{"language": ["en"], "library_name": "transformers", "license": ["cc-by-nc-sa-4.0"], "widget": [{"text": "A major tech company has unveiled its first fully autonomous electric vehicle, boasting a range of 500 miles per charge and advanced safety features designed to revolutionize the transportation industry."}, {"text": "A new global initiative to clean up ocean plastic aims to remove 50% of floating debris within a decade, using innovative autonomous vessels powered by renewable energy."}, {"text": "A historic peace agreement was signed between two long-standing rival nations, marking a turning point in diplomatic relations and promising economic and social cooperation for years to come."}]}
task
[ "SUMMARIZATION" ]
40,267
Helsinki-NLP/opus-mt-en-st
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "en", "st", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T11:31:14+00:00
56
1
--- license: apache-2.0 tags: - translation --- ### opus-mt-en-st * source languages: en * target languages: st * OPUS readme: [en-st](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-st/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-st/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-st/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-st/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.st | 49.8 | 0.665 |
null
Non_BioNLP
### opus-mt-en-st * source languages: en * target languages: st * OPUS readme: [en-st](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-st/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-st/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-st/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-st/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.st | 49.8 | 0.665 |
{"license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
40,268
KarelDO/lstm.CEBaB_confounding.price_food_ambiance_negative.absa.5-class.seed_43
KarelDO
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "en", "dataset:OpenTable", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-10-14T04:40:07Z
2022-10-14T04:41:16+00:00
10
0
--- datasets: - OpenTable language: - en metrics: - accuracy tags: - generated_from_trainer model-index: - name: lstm.CEBaB_confounding.price_food_ambiance_negative.absa.5-class.seed_43 results: - task: type: text-classification name: Text Classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - type: accuracy value: 0.7503059975520195 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lstm.CEBaB_confounding.price_food_ambiance_negative.absa.5-class.seed_43 This model is a fine-tuned version of [lstm](https://huggingface.co/lstm) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.7212 - Accuracy: 0.7503 - Macro-f1: 0.7450 - Weighted-macro-f1: 0.7521 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lstm.CEBaB_confounding.price_food_ambiance_negative.absa.5-class.seed_43 This model is a fine-tuned version of [lstm](https://huggingface.co/lstm) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.7212 - Accuracy: 0.7503 - Macro-f1: 0.7450 - Weighted-macro-f1: 0.7521 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
{"datasets": ["OpenTable"], "language": ["en"], "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "lstm.CEBaB_confounding.price_food_ambiance_negative.absa.5-class.seed_43", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "OpenTable OPENTABLE-ABSA", "type": "OpenTable", "args": "opentable-absa"}, "metrics": [{"type": "accuracy", "value": 0.7503059975520195, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,270
Jisu80609/t5-small-custom
Jisu80609
null
[ "region:us" ]
2024-09-18T12:11:34Z
2024-09-18T12:11:43+00:00
0
0
--- {} --- ## Model Card for t5_small Summarization Model ## Model Details - 모델 이름: t5-small - 모델 아키텍처: 이 모델은 T5(Text-To-Text Transfer Transformer) 아키텍처를 기반으로 하며, 모든 자연어 처리(NLP) 작업을 텍스트-투-텍스트 문제로 처리하도록 설계되었습니다. - 사전 학습 모델 Hugging Face `transformers` 라이브러리의 `t5-small` 사전 학습 모델을 사용했습니다. - 작업: 이 모델은 CNN/DailyMail 데이터셋을 사용한 텍스트 요약 작업에 대해 미세 조정되었습니다. ## Training Data - 데이터셋: 모델의 학습에 사용된 데이터는 **CNN/DailyMail** 데이터셋의 버전 3.0.0입니다. 이 데이터셋은 뉴스 기사(`article`)와 그에 대한 요약문(`highlights`)으로 구성되어 있으며, 모델의 입력과 출력으로 사용됩니다. - 데이터 분할: - 학습 데이터: CNN/DailyMail 학습 데이터의 1% 사용 (~287개 샘플) - 검증 데이터: CNN/DailyMail 검증 데이터의 1% 사용 (~133개 샘플) ## Training Procedure - **최적화 알고리즘**: AdamW 옵티마이저, 학습률은 2e-5로 설정. - **배치 크기**: 학습과 검증 모두에서 배치 크기를 4로 설정. - **학습 에포크**: 1 에포크 동안 학습. - **워밍업 단계**: 학습 초기에 500개의 워밍업 단계를 통해 학습률을 점진적으로 증가시킴. - **가중치 감쇠**: 0.01의 가중치 감쇠를 적용. - **혼합 정밀도**: FP16(혼합 정밀도)을 사용하여 메모리 사용량과 학습 속도를 최적화. ## How to Use 이 모델은 FP16(혼합 정밀도)을 활성화한 단일 GPU에서 학습되었습니다. ## Evaluation 이 모델은 텍스트 생성 작업에서 주로 사용되는 두 가지 평가 메트릭을 사용하여 평가되었습니다: ROUGE: 자동 요약과 기계 번역 평가에 자주 사용되는 ROUGE 메트릭을 사용하여 평가. ROUGE-1: 생성된 요약문과 참조 요약문 간의 unigram(단일 단어) 중복을 측정. ROUGE-2: 생성된 요약문과 참조 요약문 간의 bigram(두 단어 조합) 중복을 측정. ROUGE-L: 생성된 요약문과 참조 요약문 간의 가장 긴 공통 서열(LCS)을 기반으로 평가. BLEU: 번역 품질을 평가하는 메트릭으로, 생성된 텍스트와 참조 텍스트 간의 n-gram 정밀도를 측정. BLEU-1: unigram 정밀도 측정. BLEU-2: bigram 정밀도 측정. BLEU-4: 4-gram 정밀도 측정. 평가 결과는 ROUGE 및 BLEU 메트릭을 사용하여 계산되었습니다. ## Limitations 학습 데이터 크기: 이 모델은 CNN/DailyMail 데이터셋의 1%만으로 미세 조정되었기 때문에 매우 적은 양의 데이터로 학습되었습니다. 따라서 다른 도메인이나 유형의 텍스트에 대해 잘 일반화되지 않을 수 있습니다. 요약문 길이: 입력 길이는 최대 512 토큰으로 설정되어 있습니다. 512 토큰을 초과하는 기사는 잘릴 수 있으며, 이로 인해 요약이 불완전해질 수 있습니다. 모델 크기: t5-small은 작은 모델이므로, 더 복잡하거나 긴 문서에 대해 성능이 제한될 수 있습니다. 더 큰 버전인 t5-base 또는 t5-large를 사용할 경우 성능이 개선될 수 있습니다. ## Ethical Considerations 데이터 편향: CNN/DailyMail 데이터셋은 특정 뉴스 기관의 기사를 포함하고 있기 때문에, 데이터에 편향이 포함될 가능성이 있습니다. 이는 생성된 요약문이 편향되거나 왜곡될 가능성을 내포합니다. 잘못된 정보: 이 모델은 추상적인 요약문을 생성하므로, 입력 텍스트가 비구조적이거나 애매한 경우 잘못된 정보를 포함한 요약을 생성할 수 있습니다. 프라이버시: 이 모델이 민감하거나 개인 정보가 포함된 내용을 요약하는 경우, 프라이버시 문제가 발생할 수 있습니다. 특히 민감한 내용을 요약할 때는 관련 법규와 프라이버시 보호 규정을 준수하는 것이 중요합니다.
null
Non_BioNLP
## Model Card for t5_small Summarization Model ## Model Details - 모델 이름: t5-small - 모델 아키텍처: 이 모델은 T5(Text-To-Text Transfer Transformer) 아키텍처를 기반으로 하며, 모든 자연어 처리(NLP) 작업을 텍스트-투-텍스트 문제로 처리하도록 설계되었습니다. - 사전 학습 모델 Hugging Face `transformers` 라이브러리의 `t5-small` 사전 학습 모델을 사용했습니다. - 작업: 이 모델은 CNN/DailyMail 데이터셋을 사용한 텍스트 요약 작업에 대해 미세 조정되었습니다. ## Training Data - 데이터셋: 모델의 학습에 사용된 데이터는 **CNN/DailyMail** 데이터셋의 버전 3.0.0입니다. 이 데이터셋은 뉴스 기사(`article`)와 그에 대한 요약문(`highlights`)으로 구성되어 있으며, 모델의 입력과 출력으로 사용됩니다. - 데이터 분할: - 학습 데이터: CNN/DailyMail 학습 데이터의 1% 사용 (~287개 샘플) - 검증 데이터: CNN/DailyMail 검증 데이터의 1% 사용 (~133개 샘플) ## Training Procedure - **최적화 알고리즘**: AdamW 옵티마이저, 학습률은 2e-5로 설정. - **배치 크기**: 학습과 검증 모두에서 배치 크기를 4로 설정. - **학습 에포크**: 1 에포크 동안 학습. - **워밍업 단계**: 학습 초기에 500개의 워밍업 단계를 통해 학습률을 점진적으로 증가시킴. - **가중치 감쇠**: 0.01의 가중치 감쇠를 적용. - **혼합 정밀도**: FP16(혼합 정밀도)을 사용하여 메모리 사용량과 학습 속도를 최적화. ## How to Use 이 모델은 FP16(혼합 정밀도)을 활성화한 단일 GPU에서 학습되었습니다. ## Evaluation 이 모델은 텍스트 생성 작업에서 주로 사용되는 두 가지 평가 메트릭을 사용하여 평가되었습니다: ROUGE: 자동 요약과 기계 번역 평가에 자주 사용되는 ROUGE 메트릭을 사용하여 평가. ROUGE-1: 생성된 요약문과 참조 요약문 간의 unigram(단일 단어) 중복을 측정. ROUGE-2: 생성된 요약문과 참조 요약문 간의 bigram(두 단어 조합) 중복을 측정. ROUGE-L: 생성된 요약문과 참조 요약문 간의 가장 긴 공통 서열(LCS)을 기반으로 평가. BLEU: 번역 품질을 평가하는 메트릭으로, 생성된 텍스트와 참조 텍스트 간의 n-gram 정밀도를 측정. BLEU-1: unigram 정밀도 측정. BLEU-2: bigram 정밀도 측정. BLEU-4: 4-gram 정밀도 측정. 평가 결과는 ROUGE 및 BLEU 메트릭을 사용하여 계산되었습니다. ## Limitations 학습 데이터 크기: 이 모델은 CNN/DailyMail 데이터셋의 1%만으로 미세 조정되었기 때문에 매우 적은 양의 데이터로 학습되었습니다. 따라서 다른 도메인이나 유형의 텍스트에 대해 잘 일반화되지 않을 수 있습니다. 요약문 길이: 입력 길이는 최대 512 토큰으로 설정되어 있습니다. 512 토큰을 초과하는 기사는 잘릴 수 있으며, 이로 인해 요약이 불완전해질 수 있습니다. 모델 크기: t5-small은 작은 모델이므로, 더 복잡하거나 긴 문서에 대해 성능이 제한될 수 있습니다. 더 큰 버전인 t5-base 또는 t5-large를 사용할 경우 성능이 개선될 수 있습니다. ## Ethical Considerations 데이터 편향: CNN/DailyMail 데이터셋은 특정 뉴스 기관의 기사를 포함하고 있기 때문에, 데이터에 편향이 포함될 가능성이 있습니다. 이는 생성된 요약문이 편향되거나 왜곡될 가능성을 내포합니다. 잘못된 정보: 이 모델은 추상적인 요약문을 생성하므로, 입력 텍스트가 비구조적이거나 애매한 경우 잘못된 정보를 포함한 요약을 생성할 수 있습니다. 프라이버시: 이 모델이 민감하거나 개인 정보가 포함된 내용을 요약하는 경우, 프라이버시 문제가 발생할 수 있습니다. 특히 민감한 내용을 요약할 때는 관련 법규와 프라이버시 보호 규정을 준수하는 것이 중요합니다.
{}
task
[ "SUMMARIZATION" ]
40,271
asun17904/multiberts-seed_1_crows_pairs_classifieronly
asun17904
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "dataset:crows_pairs", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-24T03:11:19Z
2023-03-24T16:00:05+00:00
27
0
--- datasets: - crows_pairs license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: multiberts-seed_1_crows_pairs_classifieronly results: - task: type: text-classification name: Text Classification dataset: name: crows_pairs type: crows_pairs config: crows_pairs split: test args: crows_pairs metrics: - type: accuracy value: 0.5331125827814569 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # multiberts-seed_1_crows_pairs_classifieronly This model is a fine-tuned version of [google/multiberts-seed_1](https://huggingface.co/google/multiberts-seed_1) on the crows_pairs dataset. It achieves the following results on the evaluation set: - Loss: 0.6903 - Accuracy: 0.5331 - Tp: 0.3477 - Tn: 0.1854 - Fp: 0.3146 - Fn: 0.1523 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Tp | Tn | Fp | Fn | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:------:|:------:| | 0.7016 | 1.05 | 20 | 0.6910 | 0.5364 | 0.4437 | 0.0927 | 0.4073 | 0.0563 | | 0.7072 | 2.11 | 40 | 0.6914 | 0.5166 | 0.4834 | 0.0331 | 0.4669 | 0.0166 | | 0.7003 | 3.16 | 60 | 0.6910 | 0.5497 | 0.0927 | 0.4570 | 0.0430 | 0.4073 | | 0.7004 | 4.21 | 80 | 0.6908 | 0.5265 | 0.2583 | 0.2682 | 0.2318 | 0.2417 | | 0.6985 | 5.26 | 100 | 0.6916 | 0.5033 | 0.0033 | 0.5 | 0.0 | 0.4967 | | 0.7107 | 6.32 | 120 | 0.6909 | 0.5364 | 0.4603 | 0.0762 | 0.4238 | 0.0397 | | 0.7135 | 7.37 | 140 | 0.6908 | 0.5364 | 0.4139 | 0.1225 | 0.3775 | 0.0861 | | 0.7041 | 8.42 | 160 | 0.6908 | 0.5464 | 0.2848 | 0.2616 | 0.2384 | 0.2152 | | 0.7022 | 9.47 | 180 | 0.6907 | 0.5298 | 0.2450 | 0.2848 | 0.2152 | 0.2550 | | 0.6996 | 10.53 | 200 | 0.6908 | 0.5265 | 0.4371 | 0.0894 | 0.4106 | 0.0629 | | 0.7049 | 11.58 | 220 | 0.6914 | 0.5066 | 0.0066 | 0.5 | 0.0 | 0.4934 | | 0.6975 | 12.63 | 240 | 0.6912 | 0.5232 | 0.4834 | 0.0397 | 0.4603 | 0.0166 | | 0.6967 | 13.68 | 260 | 0.6907 | 0.5497 | 0.2748 | 0.2748 | 0.2252 | 0.2252 | | 0.705 | 14.74 | 280 | 0.6912 | 0.5132 | 0.0132 | 0.5 | 0.0 | 0.4868 | | 0.6943 | 15.79 | 300 | 0.6909 | 0.5298 | 0.4768 | 0.0530 | 0.4470 | 0.0232 | | 0.7057 | 16.84 | 320 | 0.6906 | 0.5364 | 0.4272 | 0.1093 | 0.3907 | 0.0728 | | 0.6995 | 17.89 | 340 | 0.6905 | 0.5530 | 0.2252 | 0.3278 | 0.1722 | 0.2748 | | 0.6989 | 18.95 | 360 | 0.6905 | 0.5464 | 0.1854 | 0.3609 | 0.1391 | 0.3146 | | 0.7156 | 20.0 | 380 | 0.6912 | 0.5199 | 0.4834 | 0.0364 | 0.4636 | 0.0166 | | 0.6957 | 21.05 | 400 | 0.6905 | 0.5497 | 0.2285 | 0.3212 | 0.1788 | 0.2715 | | 0.6978 | 22.11 | 420 | 0.6906 | 0.5331 | 0.4371 | 0.0960 | 0.4040 | 0.0629 | | 0.6988 | 23.16 | 440 | 0.6905 | 0.5331 | 0.3179 | 0.2152 | 0.2848 | 0.1821 | | 0.7054 | 24.21 | 460 | 0.6905 | 0.5497 | 0.1623 | 0.3874 | 0.1126 | 0.3377 | | 0.6997 | 25.26 | 480 | 0.6907 | 0.5166 | 0.4636 | 0.0530 | 0.4470 | 0.0364 | | 0.7007 | 26.32 | 500 | 0.6904 | 0.5364 | 0.3940 | 0.1424 | 0.3576 | 0.1060 | | 0.6973 | 27.37 | 520 | 0.6914 | 0.5 | 0.0 | 0.5 | 0.0 | 0.5 | | 0.704 | 28.42 | 540 | 0.6905 | 0.5364 | 0.4338 | 0.1026 | 0.3974 | 0.0662 | | 0.7028 | 29.47 | 560 | 0.6905 | 0.5530 | 0.1192 | 0.4338 | 0.0662 | 0.3808 | | 0.6959 | 30.53 | 580 | 0.6905 | 0.5364 | 0.4404 | 0.0960 | 0.4040 | 0.0596 | | 0.704 | 31.58 | 600 | 0.6904 | 0.5464 | 0.2583 | 0.2881 | 0.2119 | 0.2417 | | 0.704 | 32.63 | 620 | 0.6903 | 0.5397 | 0.3146 | 0.2252 | 0.2748 | 0.1854 | | 0.7003 | 33.68 | 640 | 0.6903 | 0.5430 | 0.3079 | 0.2351 | 0.2649 | 0.1921 | | 0.7058 | 34.74 | 660 | 0.6903 | 0.5464 | 0.3079 | 0.2384 | 0.2616 | 0.1921 | | 0.7095 | 35.79 | 680 | 0.6903 | 0.5430 | 0.2649 | 0.2781 | 0.2219 | 0.2351 | | 0.7019 | 36.84 | 700 | 0.6905 | 0.5397 | 0.0695 | 0.4702 | 0.0298 | 0.4305 | | 0.7167 | 37.89 | 720 | 0.6907 | 0.5099 | 0.0166 | 0.4934 | 0.0066 | 0.4834 | | 0.6975 | 38.95 | 740 | 0.6903 | 0.5497 | 0.3113 | 0.2384 | 0.2616 | 0.1887 | | 0.7036 | 40.0 | 760 | 0.6903 | 0.5397 | 0.3311 | 0.2086 | 0.2914 | 0.1689 | | 0.7009 | 41.05 | 780 | 0.6903 | 0.5331 | 0.3940 | 0.1391 | 0.3609 | 0.1060 | | 0.7004 | 42.11 | 800 | 0.6903 | 0.5430 | 0.2980 | 0.2450 | 0.2550 | 0.2020 | | 0.699 | 43.16 | 820 | 0.6903 | 0.5430 | 0.2152 | 0.3278 | 0.1722 | 0.2848 | | 0.6962 | 44.21 | 840 | 0.6903 | 0.5430 | 0.2980 | 0.2450 | 0.2550 | 0.2020 | | 0.6981 | 45.26 | 860 | 0.6903 | 0.5397 | 0.3543 | 0.1854 | 0.3146 | 0.1457 | | 0.7008 | 46.32 | 880 | 0.6903 | 0.5430 | 0.3808 | 0.1623 | 0.3377 | 0.1192 | | 0.7048 | 47.37 | 900 | 0.6903 | 0.5530 | 0.3742 | 0.1788 | 0.3212 | 0.1258 | | 0.707 | 48.42 | 920 | 0.6903 | 0.5497 | 0.3709 | 0.1788 | 0.3212 | 0.1291 | | 0.7014 | 49.47 | 940 | 0.6903 | 0.5331 | 0.3477 | 0.1854 | 0.3146 | 0.1523 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1 - Datasets 2.10.1 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # multiberts-seed_1_crows_pairs_classifieronly This model is a fine-tuned version of [google/multiberts-seed_1](https://huggingface.co/google/multiberts-seed_1) on the crows_pairs dataset. It achieves the following results on the evaluation set: - Loss: 0.6903 - Accuracy: 0.5331 - Tp: 0.3477 - Tn: 0.1854 - Fp: 0.3146 - Fn: 0.1523 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Tp | Tn | Fp | Fn | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:------:|:------:| | 0.7016 | 1.05 | 20 | 0.6910 | 0.5364 | 0.4437 | 0.0927 | 0.4073 | 0.0563 | | 0.7072 | 2.11 | 40 | 0.6914 | 0.5166 | 0.4834 | 0.0331 | 0.4669 | 0.0166 | | 0.7003 | 3.16 | 60 | 0.6910 | 0.5497 | 0.0927 | 0.4570 | 0.0430 | 0.4073 | | 0.7004 | 4.21 | 80 | 0.6908 | 0.5265 | 0.2583 | 0.2682 | 0.2318 | 0.2417 | | 0.6985 | 5.26 | 100 | 0.6916 | 0.5033 | 0.0033 | 0.5 | 0.0 | 0.4967 | | 0.7107 | 6.32 | 120 | 0.6909 | 0.5364 | 0.4603 | 0.0762 | 0.4238 | 0.0397 | | 0.7135 | 7.37 | 140 | 0.6908 | 0.5364 | 0.4139 | 0.1225 | 0.3775 | 0.0861 | | 0.7041 | 8.42 | 160 | 0.6908 | 0.5464 | 0.2848 | 0.2616 | 0.2384 | 0.2152 | | 0.7022 | 9.47 | 180 | 0.6907 | 0.5298 | 0.2450 | 0.2848 | 0.2152 | 0.2550 | | 0.6996 | 10.53 | 200 | 0.6908 | 0.5265 | 0.4371 | 0.0894 | 0.4106 | 0.0629 | | 0.7049 | 11.58 | 220 | 0.6914 | 0.5066 | 0.0066 | 0.5 | 0.0 | 0.4934 | | 0.6975 | 12.63 | 240 | 0.6912 | 0.5232 | 0.4834 | 0.0397 | 0.4603 | 0.0166 | | 0.6967 | 13.68 | 260 | 0.6907 | 0.5497 | 0.2748 | 0.2748 | 0.2252 | 0.2252 | | 0.705 | 14.74 | 280 | 0.6912 | 0.5132 | 0.0132 | 0.5 | 0.0 | 0.4868 | | 0.6943 | 15.79 | 300 | 0.6909 | 0.5298 | 0.4768 | 0.0530 | 0.4470 | 0.0232 | | 0.7057 | 16.84 | 320 | 0.6906 | 0.5364 | 0.4272 | 0.1093 | 0.3907 | 0.0728 | | 0.6995 | 17.89 | 340 | 0.6905 | 0.5530 | 0.2252 | 0.3278 | 0.1722 | 0.2748 | | 0.6989 | 18.95 | 360 | 0.6905 | 0.5464 | 0.1854 | 0.3609 | 0.1391 | 0.3146 | | 0.7156 | 20.0 | 380 | 0.6912 | 0.5199 | 0.4834 | 0.0364 | 0.4636 | 0.0166 | | 0.6957 | 21.05 | 400 | 0.6905 | 0.5497 | 0.2285 | 0.3212 | 0.1788 | 0.2715 | | 0.6978 | 22.11 | 420 | 0.6906 | 0.5331 | 0.4371 | 0.0960 | 0.4040 | 0.0629 | | 0.6988 | 23.16 | 440 | 0.6905 | 0.5331 | 0.3179 | 0.2152 | 0.2848 | 0.1821 | | 0.7054 | 24.21 | 460 | 0.6905 | 0.5497 | 0.1623 | 0.3874 | 0.1126 | 0.3377 | | 0.6997 | 25.26 | 480 | 0.6907 | 0.5166 | 0.4636 | 0.0530 | 0.4470 | 0.0364 | | 0.7007 | 26.32 | 500 | 0.6904 | 0.5364 | 0.3940 | 0.1424 | 0.3576 | 0.1060 | | 0.6973 | 27.37 | 520 | 0.6914 | 0.5 | 0.0 | 0.5 | 0.0 | 0.5 | | 0.704 | 28.42 | 540 | 0.6905 | 0.5364 | 0.4338 | 0.1026 | 0.3974 | 0.0662 | | 0.7028 | 29.47 | 560 | 0.6905 | 0.5530 | 0.1192 | 0.4338 | 0.0662 | 0.3808 | | 0.6959 | 30.53 | 580 | 0.6905 | 0.5364 | 0.4404 | 0.0960 | 0.4040 | 0.0596 | | 0.704 | 31.58 | 600 | 0.6904 | 0.5464 | 0.2583 | 0.2881 | 0.2119 | 0.2417 | | 0.704 | 32.63 | 620 | 0.6903 | 0.5397 | 0.3146 | 0.2252 | 0.2748 | 0.1854 | | 0.7003 | 33.68 | 640 | 0.6903 | 0.5430 | 0.3079 | 0.2351 | 0.2649 | 0.1921 | | 0.7058 | 34.74 | 660 | 0.6903 | 0.5464 | 0.3079 | 0.2384 | 0.2616 | 0.1921 | | 0.7095 | 35.79 | 680 | 0.6903 | 0.5430 | 0.2649 | 0.2781 | 0.2219 | 0.2351 | | 0.7019 | 36.84 | 700 | 0.6905 | 0.5397 | 0.0695 | 0.4702 | 0.0298 | 0.4305 | | 0.7167 | 37.89 | 720 | 0.6907 | 0.5099 | 0.0166 | 0.4934 | 0.0066 | 0.4834 | | 0.6975 | 38.95 | 740 | 0.6903 | 0.5497 | 0.3113 | 0.2384 | 0.2616 | 0.1887 | | 0.7036 | 40.0 | 760 | 0.6903 | 0.5397 | 0.3311 | 0.2086 | 0.2914 | 0.1689 | | 0.7009 | 41.05 | 780 | 0.6903 | 0.5331 | 0.3940 | 0.1391 | 0.3609 | 0.1060 | | 0.7004 | 42.11 | 800 | 0.6903 | 0.5430 | 0.2980 | 0.2450 | 0.2550 | 0.2020 | | 0.699 | 43.16 | 820 | 0.6903 | 0.5430 | 0.2152 | 0.3278 | 0.1722 | 0.2848 | | 0.6962 | 44.21 | 840 | 0.6903 | 0.5430 | 0.2980 | 0.2450 | 0.2550 | 0.2020 | | 0.6981 | 45.26 | 860 | 0.6903 | 0.5397 | 0.3543 | 0.1854 | 0.3146 | 0.1457 | | 0.7008 | 46.32 | 880 | 0.6903 | 0.5430 | 0.3808 | 0.1623 | 0.3377 | 0.1192 | | 0.7048 | 47.37 | 900 | 0.6903 | 0.5530 | 0.3742 | 0.1788 | 0.3212 | 0.1258 | | 0.707 | 48.42 | 920 | 0.6903 | 0.5497 | 0.3709 | 0.1788 | 0.3212 | 0.1291 | | 0.7014 | 49.47 | 940 | 0.6903 | 0.5331 | 0.3477 | 0.1854 | 0.3146 | 0.1523 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1 - Datasets 2.10.1 - Tokenizers 0.13.2
{"datasets": ["crows_pairs"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "multiberts-seed_1_crows_pairs_classifieronly", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "crows_pairs", "type": "crows_pairs", "config": "crows_pairs", "split": "test", "args": "crows_pairs"}, "metrics": [{"type": "accuracy", "value": 0.5331125827814569, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,272
aroot/mbart-finetuned-eng-kor-60086652858
aroot
translation
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-30T20:04:00Z
2023-06-30T20:22:55+00:00
8
0
--- metrics: - bleu tags: - translation - generated_from_trainer model-index: - name: mbart-finetuned-eng-kor-60086652858 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-finetuned-eng-kor-60086652858 This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1052 - Bleu: 5.7445 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1 - Datasets 2.12.0 - Tokenizers 0.11.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-finetuned-eng-kor-60086652858 This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1052 - Bleu: 5.7445 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1 - Datasets 2.12.0 - Tokenizers 0.11.0
{"metrics": ["bleu"], "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "mbart-finetuned-eng-kor-60086652858", "results": []}]}
task
[ "TRANSLATION" ]
40,273
LoneStriker/Bagel-Hermes-2x34b-5.0bpw-h6-exl2
LoneStriker
text-generation
[ "transformers", "safetensors", "mixtral", "text-generation", "yi", "moe", "conversational", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-01-12T23:33:59Z
2024-01-12T23:49:33+00:00
12
1
--- license: other license_name: yi-license license_link: https://huggingface.co/01-ai/Yi-34B-200K/blob/main/LICENSE tags: - yi - moe --- ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/6468ce47e134d050a58aa89c/mLH2E0dk9On_LcFX9yhuS.jpeg) # Bagel-Hermes-2x34B This is the model for Bagel-Hermes-2x34B. I used [mergekit](https://github.com/cg123/mergekit) to make this MOE model. # Prompt Template(s): Since [bagel-dpo-34b-v0.2](https://huggingface.co/jondurbin/bagel-dpo-34b-v0.2) uses many prompt templates, and [Nous-Hermes-2-Yi-34B](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B) uses ChatML, you can utilize ChatML and other prompt templates provided by bagel. **Note:** I currently do not know which prompt template is best. ### ChatML: ``` <|im_start|>system {system}<|im_end|> <|im_start|>user {user}<|im_end|> <|im_start|>assistant {asistant}<|im_end|> ``` ### Alpaca (sort of) ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {system} {instruction} ### Response: ``` ### Vicuna ``` {system} USER: {instruction} ASSISTANT: ``` Visit [bagel-dpo-34b-v0.2](https://huggingface.co/jondurbin/bagel-dpo-34b-v0.2) to try more prompt templates. # Yaml Config to reproduce ```yaml base_model: nontoxic-bagel-34b-v0.2 gate_mode: hidden dtype: bfloat16 experts: - source_model: bagel-dpo-34b-v0.2 positive_prompts: ["question answering", "Q:", science", "biology", "chemistry", "physics"] - source_model: Nous-Hermes-2-Yi-34B positive_prompts: ["chat", "math", "reason", "mathematics", "solve", "count", "python", "javascript", "programming", "algorithm", "tell me", "assistant"] ``` # Quantizationed versions Quantizationed versions of this model is available thanks to [TheBloke](https://hf.co/TheBloke). ##### GPTQ - [TheBloke/Bagel-Hermes-2x34B-GPTQ](https://huggingface.co/TheBloke/Bagel-Hermes-2x34B-GPTQ) ##### GGUF - [TheBloke/Bagel-Hermes-2x34B-GGUF](https://huggingface.co/TheBloke/Bagel-Hermes-2x34B-GGUF) ##### AWQ - [TheBloke/Bagel-Hermes-2x34B-AWQ](https://huggingface.co/TheBloke/Bagel-Hermes-2x34B-AWQ) If you would like to support me: [☕ Buy Me a Coffee](https://www.buymeacoffee.com/weyaxi)
null
Non_BioNLP
![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/6468ce47e134d050a58aa89c/mLH2E0dk9On_LcFX9yhuS.jpeg) # Bagel-Hermes-2x34B This is the model for Bagel-Hermes-2x34B. I used [mergekit](https://github.com/cg123/mergekit) to make this MOE model. # Prompt Template(s): Since [bagel-dpo-34b-v0.2](https://huggingface.co/jondurbin/bagel-dpo-34b-v0.2) uses many prompt templates, and [Nous-Hermes-2-Yi-34B](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B) uses ChatML, you can utilize ChatML and other prompt templates provided by bagel. **Note:** I currently do not know which prompt template is best. ### ChatML: ``` <|im_start|>system {system}<|im_end|> <|im_start|>user {user}<|im_end|> <|im_start|>assistant {asistant}<|im_end|> ``` ### Alpaca (sort of) ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {system} {instruction} ### Response: ``` ### Vicuna ``` {system} USER: {instruction} ASSISTANT: ``` Visit [bagel-dpo-34b-v0.2](https://huggingface.co/jondurbin/bagel-dpo-34b-v0.2) to try more prompt templates. # Yaml Config to reproduce ```yaml base_model: nontoxic-bagel-34b-v0.2 gate_mode: hidden dtype: bfloat16 experts: - source_model: bagel-dpo-34b-v0.2 positive_prompts: ["question answering", "Q:", science", "biology", "chemistry", "physics"] - source_model: Nous-Hermes-2-Yi-34B positive_prompts: ["chat", "math", "reason", "mathematics", "solve", "count", "python", "javascript", "programming", "algorithm", "tell me", "assistant"] ``` # Quantizationed versions Quantizationed versions of this model is available thanks to [TheBloke](https://hf.co/TheBloke). ##### GPTQ - [TheBloke/Bagel-Hermes-2x34B-GPTQ](https://huggingface.co/TheBloke/Bagel-Hermes-2x34B-GPTQ) ##### GGUF - [TheBloke/Bagel-Hermes-2x34B-GGUF](https://huggingface.co/TheBloke/Bagel-Hermes-2x34B-GGUF) ##### AWQ - [TheBloke/Bagel-Hermes-2x34B-AWQ](https://huggingface.co/TheBloke/Bagel-Hermes-2x34B-AWQ) If you would like to support me: [☕ Buy Me a Coffee](https://www.buymeacoffee.com/weyaxi)
{"license": "other", "license_name": "yi-license", "license_link": "https://huggingface.co/01-ai/Yi-34B-200K/blob/main/LICENSE", "tags": ["yi", "moe"]}
task
[ "QUESTION_ANSWERING" ]
40,274
LinWeizheDragon/PreFLMR_ViT-B
LinWeizheDragon
feature-extraction
[ "transformers", "safetensors", "flmr", "feature-extraction", "retrieval", "multi-modal", "knowledge-based visual question answering", "FLMR", "PreFLMR", "custom_code", "en", "arxiv:2402.08327", "license:mit", "region:us" ]
2024-02-20T02:55:41Z
2024-02-27T05:35:34+00:00
332
2
--- language: - en library_name: transformers license: mit tags: - retrieval - multi-modal - knowledge-based visual question answering - FLMR - PreFLMR --- # PreFLMR model card PreFLMR is an open-source model for multimodal knowledge retrieval. It is a transformer-based model that uses a combination of text and image inputs to retrieve relevant documents from a large corpus. ## Model Details ### Model Description - **Model type:** FLMRModelForRetrieval - **Language(s) (NLP):** English - **License:** MIT License ### Paper and resources for more detail - **Blog Post for quick overview:** https://www.jinghong-chen.net/preflmr-sota-open-sourced-multi/ - **Paper:** https://arxiv.org/abs/2402.08327 - **Gradio Demo:** https://u60544-b8d4-53eaa55d.westx.seetacloud.com:8443/ - **Repository:** https://github.com/LinWeizheDragon/FLMR - **Project Page:** https://preflmr.github.io/ ## Uses ### Direct Use This model can be used directly to retrieve documents from a large corpus using a combination of text and image input queries. The retrieval usage can be found in the [official implementation](https://github.com/LinWeizheDragon/FLMR). ### Downstream Use <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> This model can be used combined with language models to create a retrieval-augmented language model. The use for Knowledge-based VQA can be found in [RAVQA](https://github.com/linweizhedragon/retrieval-augmented-visual-question-answering) ## How to Get Started with the Model For details of training, indexing, and performing retrieval, please refer to [here](https://github.com/LinWeizheDragon/FLMR). ## Training datasets The model is pre-trained on three types of tasks with a total of nine datasets: 1. Image to Text retrieval: WIT, KVQA, and CC3M 2. Question to Text retrieval: MSMARCO 3. Image & Question to Text retrieval: LLaVA, OVEN, OKVQA, Infoseek and E-VQA These datasets were converted to retrieval format. For details on the dataset split and conversion process, please refer to the paper [PreFLMR: Scaling Up Fine-Grained Late-Interaction Multi-modal Retrievers](https://arxiv.org/abs/2402.08327). We will release the proprocessed datasets soon. ## Evaluation datasets We evaluate our models on WIT, LLaVA, OVEN, KVQA, IGLUE (subset of WIT), Infoseek, E-VQA, OKVQA and MSMARCO. | Model | Vision Encoder | Text Encoder | Checkpoint Name | No. Param. | WIT | LLaVA | OVEN | KVQA | IGLUE | Infoseek | E-VQA | OKVQA | MSMARCO | |---------|----------------|--------------|-------------------------------------------------------------|-------|-------|--------|-------|-------|-------|----------|-------|--------|-------| | PreFLMR | ViT-B | Base-v2 | [LinWeizheDragon/PreFLMR_ViT-B](https://huggingface.co/LinWeizheDragon/PreFLMR_ViT-B) | 327M | 41.7 | 67.2 | 46.3 | 28.6 | 57.3 | 48.8 | 67.9 | 66.1 | 79.5 | | PreFLMR | ViT-L | Base-v2 | [LinWeizheDragon/PreFLMR_ViT-L](https://huggingface.co/LinWeizheDragon/PreFLMR_ViT-L) | 543M | 60.5 | 71.8 | 59.8 | 43.6 | 69.2 | 57.9 | 70.8 | 68.5 | 78.7 | | PreFLMR | ViT-G | Base-v2 | [LinWeizheDragon/PreFLMR_ViT-G](https://huggingface.co/LinWeizheDragon/PreFLMR_ViT-G) | 2.1B | 61.5 | 72.4 | 63.4 | 42.1 |71.5 | 59.6 | 73.1 | 68.6 | 78.6 | For the evaluation metrics, WIT uses Recall@10, IGLUE uses Recall@1, and all the rest datasets use Recall@5. ## Citation **BibTeX:** ``` @article{Lin_Mei_Chen_Byrne_2024, title={PreFLMR: Scaling Up Fine-Grained Late-Interaction Multi-modal Retrievers}, url={http://arxiv.org/abs/2402.08327}, number={arXiv:2402.08327}, publisher={arXiv}, author={Lin, Weizhe and Mei, Jingbiao and Chen, Jinghong and Byrne, Bill}, year={2024}} ```
null
Non_BioNLP
# PreFLMR model card PreFLMR is an open-source model for multimodal knowledge retrieval. It is a transformer-based model that uses a combination of text and image inputs to retrieve relevant documents from a large corpus. ## Model Details ### Model Description - **Model type:** FLMRModelForRetrieval - **Language(s) (NLP):** English - **License:** MIT License ### Paper and resources for more detail - **Blog Post for quick overview:** https://www.jinghong-chen.net/preflmr-sota-open-sourced-multi/ - **Paper:** https://arxiv.org/abs/2402.08327 - **Gradio Demo:** https://u60544-b8d4-53eaa55d.westx.seetacloud.com:8443/ - **Repository:** https://github.com/LinWeizheDragon/FLMR - **Project Page:** https://preflmr.github.io/ ## Uses ### Direct Use This model can be used directly to retrieve documents from a large corpus using a combination of text and image input queries. The retrieval usage can be found in the [official implementation](https://github.com/LinWeizheDragon/FLMR). ### Downstream Use <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> This model can be used combined with language models to create a retrieval-augmented language model. The use for Knowledge-based VQA can be found in [RAVQA](https://github.com/linweizhedragon/retrieval-augmented-visual-question-answering) ## How to Get Started with the Model For details of training, indexing, and performing retrieval, please refer to [here](https://github.com/LinWeizheDragon/FLMR). ## Training datasets The model is pre-trained on three types of tasks with a total of nine datasets: 1. Image to Text retrieval: WIT, KVQA, and CC3M 2. Question to Text retrieval: MSMARCO 3. Image & Question to Text retrieval: LLaVA, OVEN, OKVQA, Infoseek and E-VQA These datasets were converted to retrieval format. For details on the dataset split and conversion process, please refer to the paper [PreFLMR: Scaling Up Fine-Grained Late-Interaction Multi-modal Retrievers](https://arxiv.org/abs/2402.08327). We will release the proprocessed datasets soon. ## Evaluation datasets We evaluate our models on WIT, LLaVA, OVEN, KVQA, IGLUE (subset of WIT), Infoseek, E-VQA, OKVQA and MSMARCO. | Model | Vision Encoder | Text Encoder | Checkpoint Name | No. Param. | WIT | LLaVA | OVEN | KVQA | IGLUE | Infoseek | E-VQA | OKVQA | MSMARCO | |---------|----------------|--------------|-------------------------------------------------------------|-------|-------|--------|-------|-------|-------|----------|-------|--------|-------| | PreFLMR | ViT-B | Base-v2 | [LinWeizheDragon/PreFLMR_ViT-B](https://huggingface.co/LinWeizheDragon/PreFLMR_ViT-B) | 327M | 41.7 | 67.2 | 46.3 | 28.6 | 57.3 | 48.8 | 67.9 | 66.1 | 79.5 | | PreFLMR | ViT-L | Base-v2 | [LinWeizheDragon/PreFLMR_ViT-L](https://huggingface.co/LinWeizheDragon/PreFLMR_ViT-L) | 543M | 60.5 | 71.8 | 59.8 | 43.6 | 69.2 | 57.9 | 70.8 | 68.5 | 78.7 | | PreFLMR | ViT-G | Base-v2 | [LinWeizheDragon/PreFLMR_ViT-G](https://huggingface.co/LinWeizheDragon/PreFLMR_ViT-G) | 2.1B | 61.5 | 72.4 | 63.4 | 42.1 |71.5 | 59.6 | 73.1 | 68.6 | 78.6 | For the evaluation metrics, WIT uses Recall@10, IGLUE uses Recall@1, and all the rest datasets use Recall@5. ## Citation **BibTeX:** ``` @article{Lin_Mei_Chen_Byrne_2024, title={PreFLMR: Scaling Up Fine-Grained Late-Interaction Multi-modal Retrievers}, url={http://arxiv.org/abs/2402.08327}, number={arXiv:2402.08327}, publisher={arXiv}, author={Lin, Weizhe and Mei, Jingbiao and Chen, Jinghong and Byrne, Bill}, year={2024}} ```
{"language": ["en"], "library_name": "transformers", "license": "mit", "tags": ["retrieval", "multi-modal", "knowledge-based visual question answering", "FLMR", "PreFLMR"]}
task
[ "QUESTION_ANSWERING" ]
40,275
inceptionai/jais-family-30b-16k
inceptionai
text-generation
[ "safetensors", "jais", "Arabic", "English", "LLM", "Decoder", "causal-lm", "jais-family", "text-generation", "custom_code", "ar", "en", "arxiv:2307.09288", "arxiv:2308.16149", "arxiv:2402.12840", "license:apache-2.0", "region:us" ]
2024-08-02T11:58:41Z
2024-09-11T11:20:17+00:00
11
9
--- language: - ar - en license: apache-2.0 pipeline_tag: text-generation tags: - Arabic - English - LLM - Decoder - causal-lm - jais-family --- # Jais Family Model Card The Jais family of models is a comprehensive series of bilingual English-Arabic large language models (LLMs). These models are optimized to excel in Arabic while having strong English capabilities. We release two variants of foundation models that include: - Models **pre-trained from scratch** (`jais-family-*`). - Models **pre-trained adaptively from [Llama-2](https://arxiv.org/pdf/2307.09288)** (`jais-adapted-*`). In this release, we introduce 20 models across 8 sizes, ranging from 590M to 70B parameters, trained on up to 1.6T tokens of Arabic, English, and code data. *All* pre-trained models in this series are instruction fine-tuned (`*-chat`) for dialog using a curated mix of Arabic and English instruction data. We hope this extensive release will accelerate research in Arabic NLP, and enable numerous downstream applications for the Arabic speaking and bilingual community. The training and adaptation techniques we demonstrate successfully for Arabic models are extensible to other low and medium resource languages. ## Jais Family Details - **Developed by:** Inception, Cerebras Systems. - **Language(s):** (NLP): Arabic (MSA) and English. - **Input:** Text only data. - **Output:** Model generates text. - **Model Sizes:** 590M, 1.3B, 2.7B, 6.7B, 7B, 13B, 30B, 70B. - **Demo:** [Access the live demo here](https://arabic-gpt.ai/) - **License:** Apache 2.0 | **Pre-trained Model** | **Fine-tuned Model** | **Size (Parameters)** | **Context length (Tokens)** | |:---------------------|:--------|:-------|:-------| | [jais-family-30b-16k](https://huggingface.co/inceptionai/jais-family-30b-16k) | [Jais-family-30b-16k-chat](https://huggingface.co/inceptionai/jais-family-30b-16k-chat) | 30B | 16,384 | | [jais-family-30b-8k](https://huggingface.co/inceptionai/jais-family-30b-8k) | [Jais-family-30b-8k-chat](https://huggingface.co/inceptionai/jais-family-30b-8k-chat) | 30B | 8,192 | | [jais-family-13b ](https://huggingface.co/inceptionai/jais-family-13b) | [Jais-family-13b-chat](https://huggingface.co/inceptionai/jais-family-13b-chat) | 13B | 2,048 | | [jais-family-6p7b](https://huggingface.co/inceptionai/jais-family-6p7b) | [Jais-family-6p7b-chat](https://huggingface.co/inceptionai/jais-family-6p7b-chat) | 6.7B | 2,048 | | [jais-family-2p7b](https://huggingface.co/inceptionai/jais-family-2p7b) | [Jais-family-2p7b-chat](https://huggingface.co/inceptionai/jais-family-2p7b-chat) | 2.7B | 2,048 | | [jais-family-1p3b](https://huggingface.co/inceptionai/jais-family-1p3b) | [Jais-family-1p3b-chat](https://huggingface.co/inceptionai/jais-family-1p3b-chat) | 1.3B | 2,048 | | [jais-family-590m](https://huggingface.co/inceptionai/jais-family-590m) | [Jais-family-590m-chat](https://huggingface.co/inceptionai/jais-family-590m-chat) | 590M | 2,048 | | **Adapted pre-trained Model** | **Fine-tuned Model** | **Size (Parameters)** | **Context length (Tokens)** | |:---------------------|:--------|:-------|:-------| | [jais-adapted-70b](https://huggingface.co/inceptionai/jais-adapted-70b) | [Jais-adapted-70b-chat](https://huggingface.co/inceptionai/jais-adapted-70b-chat) | 70B | 4,096 | | [jais-adapted-13b](https://huggingface.co/inceptionai/jais-adapted-13b) | [Jais-adapted-13b-chat](https://huggingface.co/inceptionai/jais-adapted-13b-chat) | 13B | 4,096 | | [jais-adapted-7b](https://huggingface.co/inceptionai/jais-adapted-7b) | [Jais-adapted-7b-chat](https://huggingface.co/inceptionai/jais-adapted-7b-chat) | 7B | 4,096 | ### Model Architecture: <a name="model-architecture"></a> All models in this family are auto-regressive language models that use a transformer-based, decoder-only architecture (GPT-3). Jais models (`jais-family-*`) are *trained from scratch*, incorporating the SwiGLU non-linear activation function and ALiBi position encoding. These architectural enhancements allow the models to extrapolate at long sequence lengths, leading to improved context handling and precision. Jais adapted models (`jais-adapted-*`) are *built on top of Llama-2*, which employs RoPE position embedding and Grouped Query Attention. We introduce tokenizer expansion with Arabic data, which improves fertility and compute efficiency by over 3x. In particular, we add `32,000` new Arabic tokens from the Jais-30b vocabulary into the Llama-2 tokenizer. To initialize these new Arabic token embeddings we first learn a linear projection from the embedding space of Jais-30b to Llama's embedding space, using the set of shared English tokens present in both vocabularies. Next, this learned projection is applied to transform the existing Jais-30b Arabic embeddings into the Llama-2 embedding space. ## Getting started Below is sample code to use the model. Note that the model requires a custom model class, so users must enable `trust_remote_code=True` while loading the model. ```python # -*- coding: utf-8 -*- import torch from transformers import AutoTokenizer, AutoModelForCausalLM model_path = "inceptionai/jais-family-30b-16k" device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True) def get_response(text, tokenizer=tokenizer, model=model): input_ids = tokenizer(text, return_tensors="pt").input_ids inputs = input_ids.to(device) input_len = inputs.shape[-1] generate_ids = model.generate( inputs, top_p=0.9, temperature=0.3, max_length=2048, min_length=input_len + 4, repetition_penalty=1.2, do_sample=True, ) response = tokenizer.batch_decode( generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True )[0] return response text = "عاصمة دولة الإمارات العربية المتحدة ه" print(get_response(text)) text = "The capital of UAE is" print(get_response(text)) ``` ## Training Details ### Pretraining Data The Jais family of models are trained on up to 1.6 Trillion tokens of diverse English, Arabic and Code data. The data consists of the following sources: - **Web:** We used publicly available web pages, wikipedia articles, news articles, and social network content in both Arabic and English. - **Code:** To enhance the reasoning capability of our model, we include Code data in various programming languages. - **Books:** We used a selection of publicly available Arabic and English books data, which improves long-range context modelling and coherent storytelling. - **Scientific:** A subset of ArXiv papers were included to improve reasoning and long context abilities. - **Synthetic:** We augment the volume of Arabic data by translating English to Arabic using an in-house machine translation system. We restrict this to high quality English resources such as English Wikipedia and English books. We extensively preprocess and deduplicate the training data. For Arabic, we used a custom preprocessing pipeline to filter for data with high linguistic quality. More information on this pipeline can be found in the [Jais paper](https://arxiv.org/abs/2308.16149). - **Jais pre-trained** (`jais-family-*`): Following our previous experimentation with language alignment mixing in [Jais](https://arxiv.org/abs/2308.16149), we used a ratio of 1:2:0.4 of Arabic:English:Code data. This recipe for <u>from scratch pre-training</u> addresses Arabic data scarcity while improving performance in both languages. - **Jais adapted pre-trained** (`jais-adapted-*`): For the <u>adapted pre-training of Llama-2</u>, we utilized a larger Arabic dataset of ~334B Arabic tokens mixed with English and Code data. We vary the mixing ratio, at different model sizes, to introduce strong Arabic capabilities while maintaining performance in English. | **Pre-trained model** | **English data (tokens)** | **Arabic data (tokens)** | **Code data (tokens)** | **Total data (tokens)** | |-------------------------|---------------------------|--------------------------|------------------------|------------------------| | [jais-family-30b-16k](https://huggingface.co/inceptionai/jais-family-30b-16k) | 980B | 490B | 196B | 1666B | | [jais-family-30b-8k](https://huggingface.co/inceptionai/jais-family-30b-8k) | 882B | 441B | 177B | 1500B | | [jais-family-13b ](https://huggingface.co/inceptionai/jais-family-13b) | 283B | 141B | 56B | 480B | | [jais-family-6p7b](https://huggingface.co/inceptionai/jais-family-6p7b) | 283B | 141B | 56B | 480B | | [jais-family-2p7b](https://huggingface.co/inceptionai/jais-family-2p7b) | 283B | 141B | 56B | 480B | | [jais-family-1p3b](https://huggingface.co/inceptionai/jais-family-1p3b) | 283B | 141B | 56B | 480B | | [jais-family-590m](https://huggingface.co/inceptionai/jais-family-590m) | 283B | 141B | 56B | 480B | | [jais-adapted-70b](https://huggingface.co/inceptionai/jais-adapted-70b) | 33B | 334B | 4B | 371B | | [jais-adapted-13b](https://huggingface.co/inceptionai/jais-adapted-13b) | 127B | 140B | 13B | 280B | | [jais-adapted-7b](https://huggingface.co/inceptionai/jais-adapted-7b) | 18B | 19B | 2B | 39B | ### Finetuning data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> All chat models in the Jais family are fine-tuned using Arabic and English prompt-response pairs in both single-turn and multi-turn settings. Data sources include open-source fine-tuning datasets filtered for topic and style diversity. Additionally, internally curated human data is incorporated to enhance cultural adaptation. This data is supplemented with content generated using synthetic methods including machine translation, distillation, and model self-chat. Overall, our updated instruction-tuning dataset comprises ~10M and ~4M prompt-response pairs in English and Arabic respectively. ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> During the pre-training of (`jais-family-*`) models, documents are packed into sequences separated by EOS tokens, and the model is trained autoregressively, applying the loss to all tokens. For jais-30b models, the context length is progressively expanded from 2k to 8K to 16K by incorporating curated long-context documents in training. This progressive expansion leverages faster initial training at shorter context lengths, while gradually extending support for larger context lengths towards the end of the training process. During the adapted pre-training of the (`jais-adapted-*`) models, we first initialize the new tokenizer and Arabic embeddings as described in [Model Architecture](#model-architecture). In training, we implemented a two-stage approach to overcome observed higher norms of the new Arabic embeddings. In the first stage, the backbone of the model is frozen, and the embeddings are trained using approximately 15 billion tokens from a bilingual corpus of English and Arabic. In the second stage, the backbone is unfrozen, and continuous pretraining is conducted with all parameters. During instruction tuning, each training example consists of a single-turn or multi-turn prompt and it's response. Instead of one example per sequence, examples are packed together while the loss is masked on the prompt tokens. This approach speeds up training by allowing more examples to be processed per batch. ### Training Hyperparameters: #### Jais-family-30b-16k | Hyperparameter | Value | |----------------|-------------------------------------------| | Precision | fp32 | | Optimizer | AdamW | | Learning rate | 0 to 0.012(<=69 warmup steps)<br>0.012 to 0.00231(>69 and <=137273 steps)<br>0.00231 to 0.00048(>137273 and <= 260648 steps)<br>0.00048 to 0.000048(>260648 and <=287032 steps)| | Weight decay | 0.1 | | Batch size | 2664(<=137273 steps)<br>748(>137273 and <= 260648 steps)<br>384(>260648 and <=287032 steps)| | Context Length | 2048(<=137273 steps)<br>8192(>137273 and <= 260648 steps)<br>16384(>260648 and <=287032 steps)| | Steps | 287032 | ### Compute Infrastructure The training process was performed on the Condor Galaxy (CG) supercomputer platform. A CG contains 64 Cerebras CS-2 Wafer-Scale Engines (WSE-2) with 40 GB of SRAM, and achieves a total of 960 PetaFLOP/s. ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> We conducted a comprehensive evaluation of Jais models focusing on both English and Arabic, using LM-harness in a zero-shot setting. The evaluation criteria spanned various dimensions, including: - **Knowledge:** How well the model answers factual questions. - **Reasoning:** The model's ability to answer questions requiring reasoning. - **Misinformation/Bias:** Assessment of the model's susceptibility to generating false or misleading information, and its neutrality. ### Arabic evaluation results: <style> .table-container { overflow-x: auto; white-space: nowrap; } </style> <div class="table-container"> | **Models** | Avg | ArabicMMLU*| MMLU | EXAMS*| LitQA*| agqa | agrc | Hellaswag | PIQA | BoolQA | Situated QA | ARC-C | OpenBookQA | TruthfulQA | CrowS-Pairs | |--------------------------|-------|------------|-------|-------|-------|------|------|------------|------|--------|-------------|-------|------------|------------|-------------| | jais-family-30b-16k | 49.2 | 44.0 | 33.4 | 40.9 | 60 | 47.8 | 49.3 | 60.9 | 68.6 | 70.3 | 41.6 | 38.7 | 31.8 | 45.2 | 57 | | jais-family-30b-8k | 49.7 | 46.0 | 34 | 42 | 60.6 | 47.6 | 50.4 | 60.4 | 69 | 67.7 | 42.2 | 39.2 | 33.8 | 45.1 | 57.3 | | jais-family-13b | 46.1 | 34.0 | 30.3 | 42.7 | 58.3 | 40.5 | 45.5 | 57.3 | 68.1 | 63.1 | 41.6 | 35.3 | 31.4 | 41 | 56.1 | | jais-family-6p7b | 44.6 | 32.2 | 29.9 | 39 | 50.3 | 39.2 | 44.1 | 54.3 | 66.8 | 66.5 | 40.9 | 33.5 | 30.4 | 41.2 | 55.4 | | jais-family-2p7b | 41.0 | 29.5 | 28.5 | 36.1 | 45.7 | 32.4 | 40.8 | 44.2 | 62.5 | 62.2 | 39.2 | 27.4 | 28.2 | 43.6 | 53.6 | | jais-family-1p3b | 40.8 | 28.9 | 28.5 | 34.2 | 45.7 | 32.4 | 40.8 | 44.2 | 62.5 | 62.2 | 39.2 | 27.4 | 28.2 | 43.6 | 53.6 | | jais-family-590m | 39.7 | 31.2 | 27 | 33.1 | 41.7 | 33.8 | 38.8 | 38.2 | 60.7 | 62.2 | 37.9 | 25.5 | 27.4 | 44.7 | 53.3 | | jais-family-30b-16k-chat | 51.6 | 59.9 | 34.6 | 40.2 | 58.9 | 46.8 | 54.7 | 56.2 | 64.4 | 76.7 | 55.9 | 40.8 | 30.8 | 49.5 | 52.9 | | jais-family-30b-8k-chat | 51.4 | 61.2 | 34.2 | 40.2 | 54.3 | 47.3 | 53.6 | 60 | 63.4 | 76.8 | 54.7 | 39.5 | 30 | 50.7 | 54.3 | | jais-family-13b-chat | 50.3 | 58.2 | 33.9 | 42.9 | 53.1 | 46.8 | 51.7 | 59.3 | 65.4 | 75.2 | 51.2 | 38.4 | 29.8 | 44.8 | 53.8 | | jais-family-6p7b-chat | 48.7 | 55.7 | 32.8 | 37.7 | 49.7 | 40.5 | 50.1 | 56.2 | 62.9 | 79.4 | 52 | 38 | 30.4 | 44.7 | 52 | | jais-family-2p7b-chat | 45.6 | 50.0 | 31.5 | 35.9 | 41.1 | 37.3 | 42.1 | 48.6 | 63.7 | 74.4 | 50.9 | 35.3 | 31.2 | 44.5 | 51.3 | | jais-family-1p3b-chat | 42.7 | 42.2 | 30.1 | 33.6 | 40.6 | 34.1 | 41.2 | 43 | 63.6 | 69.3 | 44.9 | 31.6 | 28 | 45.6 | 50.4 | | jais-family-590m-chat | 37.8 | 39.1 | 28 |29.5 | 33.1 | 30.8 | 36.4 | 30.3 | 57.8 | 57.2 | 40.5 | 25.9 | 26.8 | 44.5 | 49.3 | | **Adapted Models** | Avg | ArabicMMLU*| MMLU | EXAMS*| LitQA*| agqa | agrc | Hellaswag | PIQA | BoolQA | Situated QA | ARC-C | OpenBookQA | TruthfulQA | CrowS-Pairs | |--------------------------|-------|------------|-------|-------|-------|------|------|------------|------|--------|-------------|-------|------------|------------|-------------| | jais-adapted-70b | 51.5 | 55.9 | 36.8 | 42.3 | 58.3 | 48.6 | 54 | 61.5 | 68.4 | 68.4 | 42.1 | 42.6 | 33 | 50.2 | 58.3 | | jais-adapted-13b | 46.6 | 44.7 | 30.6 | 37.7 | 54.3 | 43.8 | 48.3 | 54.9 | 67.1 | 64.5 | 40.6 | 36.1 | 32 | 43.6 | 54.00 | | jais-adapted-7b | 42.0 | 35.9 | 28.9 | 36.7 | 46.3 | 34.1 | 40.3 | 45 | 61.3 | 63.8 | 38.1 | 29.7 | 30.2 | 44.3 | 53.6 | | jais-adapted-70b-chat | 52.9 | 66.8 | 34.6 | 42.5 | 62.9 | 36.8 | 48.6 | 64.5 | 69.7 | 82.8 | 49.3 | 44.2 | 32.2 | 53.3 | 52.4 | | jais-adapted-13b-chat | 50.3 | 59.0 | 31.7 | 37.5 | 56.6 | 41.9 | 51.7 | 58.8 | 67.1 | 78.2 | 45.9 | 41 | 34.2 | 48.3 | 52.1 | | jais-adapted-7b-chat | 46.1 | 51.3 | 30 | 37 | 48 | 36.8 | 48.6 | 51.1 | 62.9 | 72.4 | 41.3 | 34.6 | 30.4 | 48.6 | 51.8 | </div> Arabic benchmarks are translated using an in-house MT model and reviewed by Arabic linguists. Benchmarks labeled with an asterisk (*) are natively Arabic; for further details, see the [Jais paper](https://arxiv.org/abs/2308.16149). Additionally, we include [ArabicMMLU](https://arxiv.org/abs/2402.12840), a native Arabic benchmark based on regional knowledge. ### English evaluation results: <div class="table-container"> | **Models** | Avg | MMLU | RACE | Hellaswag | PIQA | BoolQA | SIQA | ARC-Challenge | OpenBookQA | Winogrande | TruthfulQA | CrowS-Pairs | |--------------------------|----------|------|------|-----------|------|--------|------|---------------|------------|------------|----------------|-------------| | jais-family-30b-16k | 59.3 | 42.2 | 40.5 | 79.7 | 80.6 | 78.7 | 48.8 | 50.3 | 44.2 | 71.6 | 43.5 | 72.6 | | jais-family-30b-8k | 58.8 | 42.3 | 40.3 | 79.1 | 80.5 | 80.9 | 49.3 | 48.4 | 43.2 | 70.6 | 40.3 | 72.3 | | jais-family-13b | 54.6 | 32.3 | 39 | 72 | 77.4 | 73.9 | 47.9 | 43.2 | 40 | 67.1 | 36.1 | 71.7 | | jais-family-6p7b | 53.1 | 32 | 38 | 69.3 | 76 | 71.7 | 47.1 | 40.3 | 37.4 | 65.1 | 34.4 | 72.5 | | jais-family-2p7b | 51 | 29.4 | 38 | 62.7 | 74.1 | 67.4 | 45.6 | 35.1 | 35.6 | 62.9 | 40.1 | 70.2 | | jais-family-1p3b | 48.7 | 28.2 | 35.4 | 55.4 | 72 | 62.7 | 44.9 | 30.7 | 36.2 | 60.9 | 40.4 | 69 | | jais-family-590m | 45.2 | 27.8 | 32.9 | 46.1 | 68.1 | 60.4 | 43.2 | 25.6 | 30.8 | 55.8 | 40.9 | 65.3 | | jais-family-30b-16k-chat | 58.8 | 42 | 41.1 | 76.2 | 73.3 | 84.6 | 60.3 | 48.4 | 40.8 | 68.2 | 44.8 | 67 | | jais-family-30b-8k-chat | 60.3 | 40.6 | 47.1 | 78.9 | 72.7 | 90.6 | 60 | 50.1 | 43.2 | 70.6 | 44.9 | 64.2 | | jais-family-13b-chat | 57.5 | 36.6 | 42.6 | 75 | 75.8 | 87.6 | 54.4 | 47.9 | 42 | 65 | 40.6 | 64.5 | | jais-family-6p7b-chat | 56 | 36.6 | 41.3 | 72 | 74 | 86.9 | 55.4 | 44.6 | 40 | 62.4 | 41 | 62.2 | | jais-family-2p7b-chat | 52.8 | 32.7 | 40.4 | 62.2 | 71 | 84.1 | 54 | 37.2 | 36.8 | 61.4 | 40.9 | 59.8 | | jais-family-1p3b-chat | 49.3 | 31.9 | 37.4 | 54.5 | 70.2 | 77.8 | 49.8 | 34.4 | 35.6 | 52.7 | 37.2 | 60.8 | | jais-family-590m-chat | 42.6 | 27.9 | 33.4 | 33.1 | 63.7 | 60.1 | 45.3 | 26.7 | 25.8 | 50.5 | 44.5 | 57.7 | </div> <div class="table-container"> |**Adapted Models**| Avg | MMLU | RACE | Hellaswag | PIQA | BoolQA | SIQA | ARC-Challenge | OpenBookQA | Winogrande | TruthfulQA | CrowS-Pairs | |--------------------------|----------|------|------|-----------|------|--------|------|---------------|------------|------------|----------------|-------------| | jais-adapted-70b | 60.1 | 40.4 | 38.5 | 81.2 | 81.1 | 81.2 | 48.1 | 50.4 | 45 | 75.8 | 45.7 | 74 | | jais-adapted-13b | 56 | 33.8 | 39.5 | 76.5 | 78.6 | 77.8 | 44.6 | 45.9 | 44.4 | 71.4 | 34.6 | 69 | | jais-adapted-7b | 55.7 | 32.2 | 39.8 | 75.3 | 78.8 | 75.7 | 45.2 | 42.8 | 43 | 68 | 38.3 | 73.1 | | jais-adapted-70b-chat | 61.4 | 38.7 | 42.9 | 82.7 | 81.2 | 89.6 | 52.9 | 54.9 | 44.4 | 75.7 | 44 | 68.8 | | jais-adapted-13b-chat | 58.5 | 34.9 | 42.4 | 79.6 | 79.7 | 88.2 | 50.5 | 48.5 | 42.4 | 70.3 | 42.2 | 65.1 | | jais-adapted-7b-chat | 58.5 | 33.8 | 43.9 | 77.8 | 79.4 | 87.1 | 47.3 | 46.9 | 43.4 | 69.9 | 42 | 72.4 | </div> ### GPT-4 evaluation In addition to the LM-Harness evaluation, we conducted an open-ended generation evaluation using GPT-4-as-a-judge. We measured pairwise win-rates of model responses in both Arabic and English on a fixed set of 80 prompts from the Vicuna test set. English prompts were translated to Arabic by our in-house linguists. In the following, we compare the models in this release of the jais family against previously released versions: <p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/jais.png" alt="Jais-adapted GPT-4"> </p> <p align="center"> <em>GPT-4-as-a-judge evaluation of Jais in Arabic and English. Jais family models are significantly better than previous Jais at generations in both languages. </em> </p> <p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/jais-adapted.png" alt="Jais-adapted GPT-4"> </p> <p align="center"> <em>GPT-4-as-a-judge evaluation of adapted Jais in Arabic and English. The generation quality of Arabic is significantly enhanced, while achieving improvement in English when compared to Llama-2 instruct. </em> </p> Besides pairwise comparison, we also perform MT-bench style single-answer grading on a scale of 1 to 10. <p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/mt_bench.png" alt="MT-bench"> </p> <p align="center"> <em>MT-bench style single-answer grading evaluation of Jais and adapted Jais in Arabic and English. Comparisons are made between select corresponding models from earlier releases. The quality ratings of responses are generally improved, with significant enhancements in Arabic.</em> </p> ## Intended use We release the Jais family of models under a full open-source license. We welcome all feedback and opportunities to collaborate. Spanning sizes from 590M to 70B parameters, this suite of bilingual models accommodates a wide range of use cases. Some potential downstream applications include: - **Research**: The Jais family serves Arabic researchers and NLP practitioners, offering both compute-efficient and advanced model sizes - Natural language understanding and generation tasks. - Mechanistic interpretability analyses on cultural alignment in bilingual pre-trained and adapted pre-trained models. - Quantitative studies of Arabic cultural and linguistic phenomena. - **Commercial Use**: Jais 30B and 70B chat models are well-suited for direct use in chat applications with appropriate prompting or for further fine-tuning on specific tasks. - Development of chat assistants for Arabic-speaking users. - Sentiment analysis to gain insights into local markets and customer trends. - Summarization of bilingual Arabic-English documents. Audiences that we hope will benefit from our model: - **Academics**: For those researching Arabic Natural Language Processing. - **Businesses**: Companies targeting Arabic-speaking audiences. - **Developers**: Those integrating Arabic language capabilities in applications. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> While the Jais family of models are powerful Arabic and English bilingual models, it's essential to understand their limitations and the potential of misuse. It is prohibited to use the model in any manner that violates applicable laws or regulations. The following are some example scenarios where the model should not be used. - **Malicious Use**: The model should not be used to generate harmful, misleading, or inappropriate content. Thisincludes but is not limited to: - Generating or promoting hate speech, violence, or discrimination. - Spreading misinformation or fake news. - Engaging in or promoting illegal activities. - **Sensitive Information**: The model should not be used to handle or generate personal, confidential, or sensitive information. - **Generalization Across All Languages**: Jais family of models are bilingual and optimized for Arabic and English. They should not be presumed to have equal proficiency in other languages or dialects. - **High-Stakes Decisions**: The model should not be used to make high-stakes decisions without human oversight. This includes medical, legal, financial, or safety-critical decisions. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> The Jais family is trained on publicly available data which was in part curated by Inception. We have employed different techniques to reduce bias in the model. While efforts have been made to minimize biases, it is likely that the model, as with all LLM models, will exhibit some bias. The fine-tuned variants are trained as an AI assistant for Arabic and English speakers. Chat models are limited to produce responses for queries in these two languages and may not produce appropriate responses to other language queries. <!-- By using Jais, you acknowledge and accept that, as with any large language model, it may generate incorrect, misleading, and/or offensive information or content. The information is not intended as advice and should not be relied upon in any way, nor are we responsible for any of the content or consequences resulting from its use. --> <!--p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/tou.png" alt="Jais Terms of Use", width="100%"> </p--> By using Jais, you acknowledge and accept that, as with any large language model, it may generate incorrect, misleading and/or offensive information or content. The information is not intended as advice and should not be relied upon in any way, nor are we responsible for any of the content or consequences resulting from its use. We are continuously working to develop models with greater capabilities, and as such, welcome any feedback on the model. Copyright Inception Institute of Artificial Intelligence Ltd. JAIS is made available under the Apache License, Version 2.0 (the “License”). You shall not use JAIS except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, JAIS is distributed on an AS IS basis, without warranties or conditions of any kind, either express or implied. Please see the terms of the License for the specific language permissions and limitations under the License. #### Summary We release the Jais family of Arabic and English bilingual models. The wide range of pre-trained model sizes, the recipe for adapting English-centric models to Arabic, and the fine-tuning of all sizes unlocks numerous use cases commercially and academically in the Arabic setting. Through this release, we aim to make LLMs more accessible to Arabic NLP researchers and companies, offering native Arabic models that provide better cultural understanding than English centric ones. The strategies we employ for pre-training, fine-tuning and adaptation to Arabic are extensible to other low and medium resource languages, paving the way for language-focused and accessible models that cater to local contexts. #### Citation info ```bibtex @misc{sengupta2023jais, title={Jais and Jais-chat: Arabic-Centric Foundation and Instruction-Tuned Open Generative Large Language Models}, author={Neha Sengupta, Sunil Kumar Sahu, Bokang Jia, Satheesh Katipomu, Haonan Li, Fajri Koto, William Marshall, Gurpreet Gosal, Cynthia Liu, Zhiming Chen, Osama Mohammed Afzal, Samta Kamboj, Onkar Pandit, Rahul Pal, Lalit Pradhan, Zain Muhammad Mujahid, Massa Baali, Xudong Han, Sondos Mahmoud Bsharat, Alham Fikri Aji, Zhiqiang Shen, Zhengzhong Liu, Natalia Vassilieva, Joel Hestness, Andy Hock, Andrew Feldman, Jonathan Lee, Andrew Jackson, Hector Xuguang Ren, Preslav Nakov, Timothy Baldwin and Eric Xing}, year={2023}, eprint={2308.16149}, archivePrefix={arXiv}, primaryClass={cs.CL} } @article{jaisfamilymodelcard, title={Jais Family Model Card}, author={Inception}, year={2024}, url = {https://huggingface.co/inceptionai/jais-family-30b-16k-chat/blob/main/README.md} } ```
null
Non_BioNLP
# Jais Family Model Card The Jais family of models is a comprehensive series of bilingual English-Arabic large language models (LLMs). These models are optimized to excel in Arabic while having strong English capabilities. We release two variants of foundation models that include: - Models **pre-trained from scratch** (`jais-family-*`). - Models **pre-trained adaptively from [Llama-2](https://arxiv.org/pdf/2307.09288)** (`jais-adapted-*`). In this release, we introduce 20 models across 8 sizes, ranging from 590M to 70B parameters, trained on up to 1.6T tokens of Arabic, English, and code data. *All* pre-trained models in this series are instruction fine-tuned (`*-chat`) for dialog using a curated mix of Arabic and English instruction data. We hope this extensive release will accelerate research in Arabic NLP, and enable numerous downstream applications for the Arabic speaking and bilingual community. The training and adaptation techniques we demonstrate successfully for Arabic models are extensible to other low and medium resource languages. ## Jais Family Details - **Developed by:** Inception, Cerebras Systems. - **Language(s):** (NLP): Arabic (MSA) and English. - **Input:** Text only data. - **Output:** Model generates text. - **Model Sizes:** 590M, 1.3B, 2.7B, 6.7B, 7B, 13B, 30B, 70B. - **Demo:** [Access the live demo here](https://arabic-gpt.ai/) - **License:** Apache 2.0 | **Pre-trained Model** | **Fine-tuned Model** | **Size (Parameters)** | **Context length (Tokens)** | |:---------------------|:--------|:-------|:-------| | [jais-family-30b-16k](https://huggingface.co/inceptionai/jais-family-30b-16k) | [Jais-family-30b-16k-chat](https://huggingface.co/inceptionai/jais-family-30b-16k-chat) | 30B | 16,384 | | [jais-family-30b-8k](https://huggingface.co/inceptionai/jais-family-30b-8k) | [Jais-family-30b-8k-chat](https://huggingface.co/inceptionai/jais-family-30b-8k-chat) | 30B | 8,192 | | [jais-family-13b ](https://huggingface.co/inceptionai/jais-family-13b) | [Jais-family-13b-chat](https://huggingface.co/inceptionai/jais-family-13b-chat) | 13B | 2,048 | | [jais-family-6p7b](https://huggingface.co/inceptionai/jais-family-6p7b) | [Jais-family-6p7b-chat](https://huggingface.co/inceptionai/jais-family-6p7b-chat) | 6.7B | 2,048 | | [jais-family-2p7b](https://huggingface.co/inceptionai/jais-family-2p7b) | [Jais-family-2p7b-chat](https://huggingface.co/inceptionai/jais-family-2p7b-chat) | 2.7B | 2,048 | | [jais-family-1p3b](https://huggingface.co/inceptionai/jais-family-1p3b) | [Jais-family-1p3b-chat](https://huggingface.co/inceptionai/jais-family-1p3b-chat) | 1.3B | 2,048 | | [jais-family-590m](https://huggingface.co/inceptionai/jais-family-590m) | [Jais-family-590m-chat](https://huggingface.co/inceptionai/jais-family-590m-chat) | 590M | 2,048 | | **Adapted pre-trained Model** | **Fine-tuned Model** | **Size (Parameters)** | **Context length (Tokens)** | |:---------------------|:--------|:-------|:-------| | [jais-adapted-70b](https://huggingface.co/inceptionai/jais-adapted-70b) | [Jais-adapted-70b-chat](https://huggingface.co/inceptionai/jais-adapted-70b-chat) | 70B | 4,096 | | [jais-adapted-13b](https://huggingface.co/inceptionai/jais-adapted-13b) | [Jais-adapted-13b-chat](https://huggingface.co/inceptionai/jais-adapted-13b-chat) | 13B | 4,096 | | [jais-adapted-7b](https://huggingface.co/inceptionai/jais-adapted-7b) | [Jais-adapted-7b-chat](https://huggingface.co/inceptionai/jais-adapted-7b-chat) | 7B | 4,096 | ### Model Architecture: <a name="model-architecture"></a> All models in this family are auto-regressive language models that use a transformer-based, decoder-only architecture (GPT-3). Jais models (`jais-family-*`) are *trained from scratch*, incorporating the SwiGLU non-linear activation function and ALiBi position encoding. These architectural enhancements allow the models to extrapolate at long sequence lengths, leading to improved context handling and precision. Jais adapted models (`jais-adapted-*`) are *built on top of Llama-2*, which employs RoPE position embedding and Grouped Query Attention. We introduce tokenizer expansion with Arabic data, which improves fertility and compute efficiency by over 3x. In particular, we add `32,000` new Arabic tokens from the Jais-30b vocabulary into the Llama-2 tokenizer. To initialize these new Arabic token embeddings we first learn a linear projection from the embedding space of Jais-30b to Llama's embedding space, using the set of shared English tokens present in both vocabularies. Next, this learned projection is applied to transform the existing Jais-30b Arabic embeddings into the Llama-2 embedding space. ## Getting started Below is sample code to use the model. Note that the model requires a custom model class, so users must enable `trust_remote_code=True` while loading the model. ```python # -*- coding: utf-8 -*- import torch from transformers import AutoTokenizer, AutoModelForCausalLM model_path = "inceptionai/jais-family-30b-16k" device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True) def get_response(text, tokenizer=tokenizer, model=model): input_ids = tokenizer(text, return_tensors="pt").input_ids inputs = input_ids.to(device) input_len = inputs.shape[-1] generate_ids = model.generate( inputs, top_p=0.9, temperature=0.3, max_length=2048, min_length=input_len + 4, repetition_penalty=1.2, do_sample=True, ) response = tokenizer.batch_decode( generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True )[0] return response text = "عاصمة دولة الإمارات العربية المتحدة ه" print(get_response(text)) text = "The capital of UAE is" print(get_response(text)) ``` ## Training Details ### Pretraining Data The Jais family of models are trained on up to 1.6 Trillion tokens of diverse English, Arabic and Code data. The data consists of the following sources: - **Web:** We used publicly available web pages, wikipedia articles, news articles, and social network content in both Arabic and English. - **Code:** To enhance the reasoning capability of our model, we include Code data in various programming languages. - **Books:** We used a selection of publicly available Arabic and English books data, which improves long-range context modelling and coherent storytelling. - **Scientific:** A subset of ArXiv papers were included to improve reasoning and long context abilities. - **Synthetic:** We augment the volume of Arabic data by translating English to Arabic using an in-house machine translation system. We restrict this to high quality English resources such as English Wikipedia and English books. We extensively preprocess and deduplicate the training data. For Arabic, we used a custom preprocessing pipeline to filter for data with high linguistic quality. More information on this pipeline can be found in the [Jais paper](https://arxiv.org/abs/2308.16149). - **Jais pre-trained** (`jais-family-*`): Following our previous experimentation with language alignment mixing in [Jais](https://arxiv.org/abs/2308.16149), we used a ratio of 1:2:0.4 of Arabic:English:Code data. This recipe for <u>from scratch pre-training</u> addresses Arabic data scarcity while improving performance in both languages. - **Jais adapted pre-trained** (`jais-adapted-*`): For the <u>adapted pre-training of Llama-2</u>, we utilized a larger Arabic dataset of ~334B Arabic tokens mixed with English and Code data. We vary the mixing ratio, at different model sizes, to introduce strong Arabic capabilities while maintaining performance in English. | **Pre-trained model** | **English data (tokens)** | **Arabic data (tokens)** | **Code data (tokens)** | **Total data (tokens)** | |-------------------------|---------------------------|--------------------------|------------------------|------------------------| | [jais-family-30b-16k](https://huggingface.co/inceptionai/jais-family-30b-16k) | 980B | 490B | 196B | 1666B | | [jais-family-30b-8k](https://huggingface.co/inceptionai/jais-family-30b-8k) | 882B | 441B | 177B | 1500B | | [jais-family-13b ](https://huggingface.co/inceptionai/jais-family-13b) | 283B | 141B | 56B | 480B | | [jais-family-6p7b](https://huggingface.co/inceptionai/jais-family-6p7b) | 283B | 141B | 56B | 480B | | [jais-family-2p7b](https://huggingface.co/inceptionai/jais-family-2p7b) | 283B | 141B | 56B | 480B | | [jais-family-1p3b](https://huggingface.co/inceptionai/jais-family-1p3b) | 283B | 141B | 56B | 480B | | [jais-family-590m](https://huggingface.co/inceptionai/jais-family-590m) | 283B | 141B | 56B | 480B | | [jais-adapted-70b](https://huggingface.co/inceptionai/jais-adapted-70b) | 33B | 334B | 4B | 371B | | [jais-adapted-13b](https://huggingface.co/inceptionai/jais-adapted-13b) | 127B | 140B | 13B | 280B | | [jais-adapted-7b](https://huggingface.co/inceptionai/jais-adapted-7b) | 18B | 19B | 2B | 39B | ### Finetuning data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> All chat models in the Jais family are fine-tuned using Arabic and English prompt-response pairs in both single-turn and multi-turn settings. Data sources include open-source fine-tuning datasets filtered for topic and style diversity. Additionally, internally curated human data is incorporated to enhance cultural adaptation. This data is supplemented with content generated using synthetic methods including machine translation, distillation, and model self-chat. Overall, our updated instruction-tuning dataset comprises ~10M and ~4M prompt-response pairs in English and Arabic respectively. ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> During the pre-training of (`jais-family-*`) models, documents are packed into sequences separated by EOS tokens, and the model is trained autoregressively, applying the loss to all tokens. For jais-30b models, the context length is progressively expanded from 2k to 8K to 16K by incorporating curated long-context documents in training. This progressive expansion leverages faster initial training at shorter context lengths, while gradually extending support for larger context lengths towards the end of the training process. During the adapted pre-training of the (`jais-adapted-*`) models, we first initialize the new tokenizer and Arabic embeddings as described in [Model Architecture](#model-architecture). In training, we implemented a two-stage approach to overcome observed higher norms of the new Arabic embeddings. In the first stage, the backbone of the model is frozen, and the embeddings are trained using approximately 15 billion tokens from a bilingual corpus of English and Arabic. In the second stage, the backbone is unfrozen, and continuous pretraining is conducted with all parameters. During instruction tuning, each training example consists of a single-turn or multi-turn prompt and it's response. Instead of one example per sequence, examples are packed together while the loss is masked on the prompt tokens. This approach speeds up training by allowing more examples to be processed per batch. ### Training Hyperparameters: #### Jais-family-30b-16k | Hyperparameter | Value | |----------------|-------------------------------------------| | Precision | fp32 | | Optimizer | AdamW | | Learning rate | 0 to 0.012(<=69 warmup steps)<br>0.012 to 0.00231(>69 and <=137273 steps)<br>0.00231 to 0.00048(>137273 and <= 260648 steps)<br>0.00048 to 0.000048(>260648 and <=287032 steps)| | Weight decay | 0.1 | | Batch size | 2664(<=137273 steps)<br>748(>137273 and <= 260648 steps)<br>384(>260648 and <=287032 steps)| | Context Length | 2048(<=137273 steps)<br>8192(>137273 and <= 260648 steps)<br>16384(>260648 and <=287032 steps)| | Steps | 287032 | ### Compute Infrastructure The training process was performed on the Condor Galaxy (CG) supercomputer platform. A CG contains 64 Cerebras CS-2 Wafer-Scale Engines (WSE-2) with 40 GB of SRAM, and achieves a total of 960 PetaFLOP/s. ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> We conducted a comprehensive evaluation of Jais models focusing on both English and Arabic, using LM-harness in a zero-shot setting. The evaluation criteria spanned various dimensions, including: - **Knowledge:** How well the model answers factual questions. - **Reasoning:** The model's ability to answer questions requiring reasoning. - **Misinformation/Bias:** Assessment of the model's susceptibility to generating false or misleading information, and its neutrality. ### Arabic evaluation results: <style> .table-container { overflow-x: auto; white-space: nowrap; } </style> <div class="table-container"> | **Models** | Avg | ArabicMMLU*| MMLU | EXAMS*| LitQA*| agqa | agrc | Hellaswag | PIQA | BoolQA | Situated QA | ARC-C | OpenBookQA | TruthfulQA | CrowS-Pairs | |--------------------------|-------|------------|-------|-------|-------|------|------|------------|------|--------|-------------|-------|------------|------------|-------------| | jais-family-30b-16k | 49.2 | 44.0 | 33.4 | 40.9 | 60 | 47.8 | 49.3 | 60.9 | 68.6 | 70.3 | 41.6 | 38.7 | 31.8 | 45.2 | 57 | | jais-family-30b-8k | 49.7 | 46.0 | 34 | 42 | 60.6 | 47.6 | 50.4 | 60.4 | 69 | 67.7 | 42.2 | 39.2 | 33.8 | 45.1 | 57.3 | | jais-family-13b | 46.1 | 34.0 | 30.3 | 42.7 | 58.3 | 40.5 | 45.5 | 57.3 | 68.1 | 63.1 | 41.6 | 35.3 | 31.4 | 41 | 56.1 | | jais-family-6p7b | 44.6 | 32.2 | 29.9 | 39 | 50.3 | 39.2 | 44.1 | 54.3 | 66.8 | 66.5 | 40.9 | 33.5 | 30.4 | 41.2 | 55.4 | | jais-family-2p7b | 41.0 | 29.5 | 28.5 | 36.1 | 45.7 | 32.4 | 40.8 | 44.2 | 62.5 | 62.2 | 39.2 | 27.4 | 28.2 | 43.6 | 53.6 | | jais-family-1p3b | 40.8 | 28.9 | 28.5 | 34.2 | 45.7 | 32.4 | 40.8 | 44.2 | 62.5 | 62.2 | 39.2 | 27.4 | 28.2 | 43.6 | 53.6 | | jais-family-590m | 39.7 | 31.2 | 27 | 33.1 | 41.7 | 33.8 | 38.8 | 38.2 | 60.7 | 62.2 | 37.9 | 25.5 | 27.4 | 44.7 | 53.3 | | jais-family-30b-16k-chat | 51.6 | 59.9 | 34.6 | 40.2 | 58.9 | 46.8 | 54.7 | 56.2 | 64.4 | 76.7 | 55.9 | 40.8 | 30.8 | 49.5 | 52.9 | | jais-family-30b-8k-chat | 51.4 | 61.2 | 34.2 | 40.2 | 54.3 | 47.3 | 53.6 | 60 | 63.4 | 76.8 | 54.7 | 39.5 | 30 | 50.7 | 54.3 | | jais-family-13b-chat | 50.3 | 58.2 | 33.9 | 42.9 | 53.1 | 46.8 | 51.7 | 59.3 | 65.4 | 75.2 | 51.2 | 38.4 | 29.8 | 44.8 | 53.8 | | jais-family-6p7b-chat | 48.7 | 55.7 | 32.8 | 37.7 | 49.7 | 40.5 | 50.1 | 56.2 | 62.9 | 79.4 | 52 | 38 | 30.4 | 44.7 | 52 | | jais-family-2p7b-chat | 45.6 | 50.0 | 31.5 | 35.9 | 41.1 | 37.3 | 42.1 | 48.6 | 63.7 | 74.4 | 50.9 | 35.3 | 31.2 | 44.5 | 51.3 | | jais-family-1p3b-chat | 42.7 | 42.2 | 30.1 | 33.6 | 40.6 | 34.1 | 41.2 | 43 | 63.6 | 69.3 | 44.9 | 31.6 | 28 | 45.6 | 50.4 | | jais-family-590m-chat | 37.8 | 39.1 | 28 |29.5 | 33.1 | 30.8 | 36.4 | 30.3 | 57.8 | 57.2 | 40.5 | 25.9 | 26.8 | 44.5 | 49.3 | | **Adapted Models** | Avg | ArabicMMLU*| MMLU | EXAMS*| LitQA*| agqa | agrc | Hellaswag | PIQA | BoolQA | Situated QA | ARC-C | OpenBookQA | TruthfulQA | CrowS-Pairs | |--------------------------|-------|------------|-------|-------|-------|------|------|------------|------|--------|-------------|-------|------------|------------|-------------| | jais-adapted-70b | 51.5 | 55.9 | 36.8 | 42.3 | 58.3 | 48.6 | 54 | 61.5 | 68.4 | 68.4 | 42.1 | 42.6 | 33 | 50.2 | 58.3 | | jais-adapted-13b | 46.6 | 44.7 | 30.6 | 37.7 | 54.3 | 43.8 | 48.3 | 54.9 | 67.1 | 64.5 | 40.6 | 36.1 | 32 | 43.6 | 54.00 | | jais-adapted-7b | 42.0 | 35.9 | 28.9 | 36.7 | 46.3 | 34.1 | 40.3 | 45 | 61.3 | 63.8 | 38.1 | 29.7 | 30.2 | 44.3 | 53.6 | | jais-adapted-70b-chat | 52.9 | 66.8 | 34.6 | 42.5 | 62.9 | 36.8 | 48.6 | 64.5 | 69.7 | 82.8 | 49.3 | 44.2 | 32.2 | 53.3 | 52.4 | | jais-adapted-13b-chat | 50.3 | 59.0 | 31.7 | 37.5 | 56.6 | 41.9 | 51.7 | 58.8 | 67.1 | 78.2 | 45.9 | 41 | 34.2 | 48.3 | 52.1 | | jais-adapted-7b-chat | 46.1 | 51.3 | 30 | 37 | 48 | 36.8 | 48.6 | 51.1 | 62.9 | 72.4 | 41.3 | 34.6 | 30.4 | 48.6 | 51.8 | </div> Arabic benchmarks are translated using an in-house MT model and reviewed by Arabic linguists. Benchmarks labeled with an asterisk (*) are natively Arabic; for further details, see the [Jais paper](https://arxiv.org/abs/2308.16149). Additionally, we include [ArabicMMLU](https://arxiv.org/abs/2402.12840), a native Arabic benchmark based on regional knowledge. ### English evaluation results: <div class="table-container"> | **Models** | Avg | MMLU | RACE | Hellaswag | PIQA | BoolQA | SIQA | ARC-Challenge | OpenBookQA | Winogrande | TruthfulQA | CrowS-Pairs | |--------------------------|----------|------|------|-----------|------|--------|------|---------------|------------|------------|----------------|-------------| | jais-family-30b-16k | 59.3 | 42.2 | 40.5 | 79.7 | 80.6 | 78.7 | 48.8 | 50.3 | 44.2 | 71.6 | 43.5 | 72.6 | | jais-family-30b-8k | 58.8 | 42.3 | 40.3 | 79.1 | 80.5 | 80.9 | 49.3 | 48.4 | 43.2 | 70.6 | 40.3 | 72.3 | | jais-family-13b | 54.6 | 32.3 | 39 | 72 | 77.4 | 73.9 | 47.9 | 43.2 | 40 | 67.1 | 36.1 | 71.7 | | jais-family-6p7b | 53.1 | 32 | 38 | 69.3 | 76 | 71.7 | 47.1 | 40.3 | 37.4 | 65.1 | 34.4 | 72.5 | | jais-family-2p7b | 51 | 29.4 | 38 | 62.7 | 74.1 | 67.4 | 45.6 | 35.1 | 35.6 | 62.9 | 40.1 | 70.2 | | jais-family-1p3b | 48.7 | 28.2 | 35.4 | 55.4 | 72 | 62.7 | 44.9 | 30.7 | 36.2 | 60.9 | 40.4 | 69 | | jais-family-590m | 45.2 | 27.8 | 32.9 | 46.1 | 68.1 | 60.4 | 43.2 | 25.6 | 30.8 | 55.8 | 40.9 | 65.3 | | jais-family-30b-16k-chat | 58.8 | 42 | 41.1 | 76.2 | 73.3 | 84.6 | 60.3 | 48.4 | 40.8 | 68.2 | 44.8 | 67 | | jais-family-30b-8k-chat | 60.3 | 40.6 | 47.1 | 78.9 | 72.7 | 90.6 | 60 | 50.1 | 43.2 | 70.6 | 44.9 | 64.2 | | jais-family-13b-chat | 57.5 | 36.6 | 42.6 | 75 | 75.8 | 87.6 | 54.4 | 47.9 | 42 | 65 | 40.6 | 64.5 | | jais-family-6p7b-chat | 56 | 36.6 | 41.3 | 72 | 74 | 86.9 | 55.4 | 44.6 | 40 | 62.4 | 41 | 62.2 | | jais-family-2p7b-chat | 52.8 | 32.7 | 40.4 | 62.2 | 71 | 84.1 | 54 | 37.2 | 36.8 | 61.4 | 40.9 | 59.8 | | jais-family-1p3b-chat | 49.3 | 31.9 | 37.4 | 54.5 | 70.2 | 77.8 | 49.8 | 34.4 | 35.6 | 52.7 | 37.2 | 60.8 | | jais-family-590m-chat | 42.6 | 27.9 | 33.4 | 33.1 | 63.7 | 60.1 | 45.3 | 26.7 | 25.8 | 50.5 | 44.5 | 57.7 | </div> <div class="table-container"> |**Adapted Models**| Avg | MMLU | RACE | Hellaswag | PIQA | BoolQA | SIQA | ARC-Challenge | OpenBookQA | Winogrande | TruthfulQA | CrowS-Pairs | |--------------------------|----------|------|------|-----------|------|--------|------|---------------|------------|------------|----------------|-------------| | jais-adapted-70b | 60.1 | 40.4 | 38.5 | 81.2 | 81.1 | 81.2 | 48.1 | 50.4 | 45 | 75.8 | 45.7 | 74 | | jais-adapted-13b | 56 | 33.8 | 39.5 | 76.5 | 78.6 | 77.8 | 44.6 | 45.9 | 44.4 | 71.4 | 34.6 | 69 | | jais-adapted-7b | 55.7 | 32.2 | 39.8 | 75.3 | 78.8 | 75.7 | 45.2 | 42.8 | 43 | 68 | 38.3 | 73.1 | | jais-adapted-70b-chat | 61.4 | 38.7 | 42.9 | 82.7 | 81.2 | 89.6 | 52.9 | 54.9 | 44.4 | 75.7 | 44 | 68.8 | | jais-adapted-13b-chat | 58.5 | 34.9 | 42.4 | 79.6 | 79.7 | 88.2 | 50.5 | 48.5 | 42.4 | 70.3 | 42.2 | 65.1 | | jais-adapted-7b-chat | 58.5 | 33.8 | 43.9 | 77.8 | 79.4 | 87.1 | 47.3 | 46.9 | 43.4 | 69.9 | 42 | 72.4 | </div> ### GPT-4 evaluation In addition to the LM-Harness evaluation, we conducted an open-ended generation evaluation using GPT-4-as-a-judge. We measured pairwise win-rates of model responses in both Arabic and English on a fixed set of 80 prompts from the Vicuna test set. English prompts were translated to Arabic by our in-house linguists. In the following, we compare the models in this release of the jais family against previously released versions: <p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/jais.png" alt="Jais-adapted GPT-4"> </p> <p align="center"> <em>GPT-4-as-a-judge evaluation of Jais in Arabic and English. Jais family models are significantly better than previous Jais at generations in both languages. </em> </p> <p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/jais-adapted.png" alt="Jais-adapted GPT-4"> </p> <p align="center"> <em>GPT-4-as-a-judge evaluation of adapted Jais in Arabic and English. The generation quality of Arabic is significantly enhanced, while achieving improvement in English when compared to Llama-2 instruct. </em> </p> Besides pairwise comparison, we also perform MT-bench style single-answer grading on a scale of 1 to 10. <p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/mt_bench.png" alt="MT-bench"> </p> <p align="center"> <em>MT-bench style single-answer grading evaluation of Jais and adapted Jais in Arabic and English. Comparisons are made between select corresponding models from earlier releases. The quality ratings of responses are generally improved, with significant enhancements in Arabic.</em> </p> ## Intended use We release the Jais family of models under a full open-source license. We welcome all feedback and opportunities to collaborate. Spanning sizes from 590M to 70B parameters, this suite of bilingual models accommodates a wide range of use cases. Some potential downstream applications include: - **Research**: The Jais family serves Arabic researchers and NLP practitioners, offering both compute-efficient and advanced model sizes - Natural language understanding and generation tasks. - Mechanistic interpretability analyses on cultural alignment in bilingual pre-trained and adapted pre-trained models. - Quantitative studies of Arabic cultural and linguistic phenomena. - **Commercial Use**: Jais 30B and 70B chat models are well-suited for direct use in chat applications with appropriate prompting or for further fine-tuning on specific tasks. - Development of chat assistants for Arabic-speaking users. - Sentiment analysis to gain insights into local markets and customer trends. - Summarization of bilingual Arabic-English documents. Audiences that we hope will benefit from our model: - **Academics**: For those researching Arabic Natural Language Processing. - **Businesses**: Companies targeting Arabic-speaking audiences. - **Developers**: Those integrating Arabic language capabilities in applications. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> While the Jais family of models are powerful Arabic and English bilingual models, it's essential to understand their limitations and the potential of misuse. It is prohibited to use the model in any manner that violates applicable laws or regulations. The following are some example scenarios where the model should not be used. - **Malicious Use**: The model should not be used to generate harmful, misleading, or inappropriate content. Thisincludes but is not limited to: - Generating or promoting hate speech, violence, or discrimination. - Spreading misinformation or fake news. - Engaging in or promoting illegal activities. - **Sensitive Information**: The model should not be used to handle or generate personal, confidential, or sensitive information. - **Generalization Across All Languages**: Jais family of models are bilingual and optimized for Arabic and English. They should not be presumed to have equal proficiency in other languages or dialects. - **High-Stakes Decisions**: The model should not be used to make high-stakes decisions without human oversight. This includes medical, legal, financial, or safety-critical decisions. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> The Jais family is trained on publicly available data which was in part curated by Inception. We have employed different techniques to reduce bias in the model. While efforts have been made to minimize biases, it is likely that the model, as with all LLM models, will exhibit some bias. The fine-tuned variants are trained as an AI assistant for Arabic and English speakers. Chat models are limited to produce responses for queries in these two languages and may not produce appropriate responses to other language queries. <!-- By using Jais, you acknowledge and accept that, as with any large language model, it may generate incorrect, misleading, and/or offensive information or content. The information is not intended as advice and should not be relied upon in any way, nor are we responsible for any of the content or consequences resulting from its use. --> <!--p align="center"> <img src="https://huggingface.co/inceptionai/JaisFamilySupplmentary/resolve/main/tou.png" alt="Jais Terms of Use", width="100%"> </p--> By using Jais, you acknowledge and accept that, as with any large language model, it may generate incorrect, misleading and/or offensive information or content. The information is not intended as advice and should not be relied upon in any way, nor are we responsible for any of the content or consequences resulting from its use. We are continuously working to develop models with greater capabilities, and as such, welcome any feedback on the model. Copyright Inception Institute of Artificial Intelligence Ltd. JAIS is made available under the Apache License, Version 2.0 (the “License”). You shall not use JAIS except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, JAIS is distributed on an AS IS basis, without warranties or conditions of any kind, either express or implied. Please see the terms of the License for the specific language permissions and limitations under the License. #### Summary We release the Jais family of Arabic and English bilingual models. The wide range of pre-trained model sizes, the recipe for adapting English-centric models to Arabic, and the fine-tuning of all sizes unlocks numerous use cases commercially and academically in the Arabic setting. Through this release, we aim to make LLMs more accessible to Arabic NLP researchers and companies, offering native Arabic models that provide better cultural understanding than English centric ones. The strategies we employ for pre-training, fine-tuning and adaptation to Arabic are extensible to other low and medium resource languages, paving the way for language-focused and accessible models that cater to local contexts. #### Citation info ```bibtex @misc{sengupta2023jais, title={Jais and Jais-chat: Arabic-Centric Foundation and Instruction-Tuned Open Generative Large Language Models}, author={Neha Sengupta, Sunil Kumar Sahu, Bokang Jia, Satheesh Katipomu, Haonan Li, Fajri Koto, William Marshall, Gurpreet Gosal, Cynthia Liu, Zhiming Chen, Osama Mohammed Afzal, Samta Kamboj, Onkar Pandit, Rahul Pal, Lalit Pradhan, Zain Muhammad Mujahid, Massa Baali, Xudong Han, Sondos Mahmoud Bsharat, Alham Fikri Aji, Zhiqiang Shen, Zhengzhong Liu, Natalia Vassilieva, Joel Hestness, Andy Hock, Andrew Feldman, Jonathan Lee, Andrew Jackson, Hector Xuguang Ren, Preslav Nakov, Timothy Baldwin and Eric Xing}, year={2023}, eprint={2308.16149}, archivePrefix={arXiv}, primaryClass={cs.CL} } @article{jaisfamilymodelcard, title={Jais Family Model Card}, author={Inception}, year={2024}, url = {https://huggingface.co/inceptionai/jais-family-30b-16k-chat/blob/main/README.md} } ```
{"language": ["ar", "en"], "license": "apache-2.0", "pipeline_tag": "text-generation", "tags": ["Arabic", "English", "LLM", "Decoder", "causal-lm", "jais-family"]}
task
[ "TRANSLATION", "SUMMARIZATION" ]
40,276
NAB1108/News_Recommendation
NAB1108
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-10-03T14:22:20Z
2023-10-03T22:00:57+00:00
9
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # NAB1108/News_Recommendation This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("NAB1108/News_Recommendation") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# NAB1108/News_Recommendation This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("NAB1108/News_Recommendation") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
40,277
RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf
RichardErkhov
null
[ "gguf", "arxiv:2309.08958", "endpoints_compatible", "region:us" ]
2025-02-22T11:49:06Z
2025-02-22T12:07:17+00:00
440
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) sft-fpft-cs-bloom-560m - GGUF - Model creator: https://huggingface.co/HPLT/ - Original model: https://huggingface.co/HPLT/sft-fpft-cs-bloom-560m/ | Name | Quant method | Size | | ---- | ---- | ---- | | [sft-fpft-cs-bloom-560m.Q2_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q2_K.gguf) | Q2_K | 0.39GB | | [sft-fpft-cs-bloom-560m.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ3_XS.gguf) | IQ3_XS | 0.43GB | | [sft-fpft-cs-bloom-560m.IQ3_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ3_S.gguf) | IQ3_S | 0.43GB | | [sft-fpft-cs-bloom-560m.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K_S.gguf) | Q3_K_S | 0.43GB | | [sft-fpft-cs-bloom-560m.IQ3_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ3_M.gguf) | IQ3_M | 0.45GB | | [sft-fpft-cs-bloom-560m.Q3_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K.gguf) | Q3_K | 0.46GB | | [sft-fpft-cs-bloom-560m.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K_M.gguf) | Q3_K_M | 0.46GB | | [sft-fpft-cs-bloom-560m.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K_L.gguf) | Q3_K_L | 0.47GB | | [sft-fpft-cs-bloom-560m.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ4_XS.gguf) | IQ4_XS | 0.49GB | | [sft-fpft-cs-bloom-560m.Q4_0.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_0.gguf) | Q4_0 | 0.5GB | | [sft-fpft-cs-bloom-560m.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ4_NL.gguf) | IQ4_NL | 0.5GB | | [sft-fpft-cs-bloom-560m.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_K_S.gguf) | Q4_K_S | 0.5GB | | [sft-fpft-cs-bloom-560m.Q4_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_K.gguf) | Q4_K | 0.52GB | | [sft-fpft-cs-bloom-560m.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_K_M.gguf) | Q4_K_M | 0.52GB | | [sft-fpft-cs-bloom-560m.Q4_1.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_1.gguf) | Q4_1 | 0.53GB | | [sft-fpft-cs-bloom-560m.Q5_0.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_0.gguf) | Q5_0 | 0.57GB | | [sft-fpft-cs-bloom-560m.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_K_S.gguf) | Q5_K_S | 0.57GB | | [sft-fpft-cs-bloom-560m.Q5_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_K.gguf) | Q5_K | 0.58GB | | [sft-fpft-cs-bloom-560m.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_K_M.gguf) | Q5_K_M | 0.58GB | | [sft-fpft-cs-bloom-560m.Q5_1.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_1.gguf) | Q5_1 | 0.6GB | | [sft-fpft-cs-bloom-560m.Q6_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q6_K.gguf) | Q6_K | 0.64GB | | [sft-fpft-cs-bloom-560m.Q8_0.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q8_0.gguf) | Q8_0 | 0.82GB | Original model description: --- language: - cs tags: - generation - question answering - instruction tuning license: cc-by-nc-4.0 --- ### Model Description This HF repository contains base LLMs instruction tuned (SFT) with full-parameter fine-tuning and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [bloom-560m](https://huggingface.co/bloom-560m) * Instruction tuning language: Czech * Training method: full-parameter fine-tuning. * Best checkpoint: best cross-entropy on a validation set, trained for 3 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded using `transformers` library. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/fpft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) sft-fpft-cs-bloom-560m - GGUF - Model creator: https://huggingface.co/HPLT/ - Original model: https://huggingface.co/HPLT/sft-fpft-cs-bloom-560m/ | Name | Quant method | Size | | ---- | ---- | ---- | | [sft-fpft-cs-bloom-560m.Q2_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q2_K.gguf) | Q2_K | 0.39GB | | [sft-fpft-cs-bloom-560m.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ3_XS.gguf) | IQ3_XS | 0.43GB | | [sft-fpft-cs-bloom-560m.IQ3_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ3_S.gguf) | IQ3_S | 0.43GB | | [sft-fpft-cs-bloom-560m.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K_S.gguf) | Q3_K_S | 0.43GB | | [sft-fpft-cs-bloom-560m.IQ3_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ3_M.gguf) | IQ3_M | 0.45GB | | [sft-fpft-cs-bloom-560m.Q3_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K.gguf) | Q3_K | 0.46GB | | [sft-fpft-cs-bloom-560m.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K_M.gguf) | Q3_K_M | 0.46GB | | [sft-fpft-cs-bloom-560m.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q3_K_L.gguf) | Q3_K_L | 0.47GB | | [sft-fpft-cs-bloom-560m.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ4_XS.gguf) | IQ4_XS | 0.49GB | | [sft-fpft-cs-bloom-560m.Q4_0.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_0.gguf) | Q4_0 | 0.5GB | | [sft-fpft-cs-bloom-560m.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.IQ4_NL.gguf) | IQ4_NL | 0.5GB | | [sft-fpft-cs-bloom-560m.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_K_S.gguf) | Q4_K_S | 0.5GB | | [sft-fpft-cs-bloom-560m.Q4_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_K.gguf) | Q4_K | 0.52GB | | [sft-fpft-cs-bloom-560m.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_K_M.gguf) | Q4_K_M | 0.52GB | | [sft-fpft-cs-bloom-560m.Q4_1.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q4_1.gguf) | Q4_1 | 0.53GB | | [sft-fpft-cs-bloom-560m.Q5_0.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_0.gguf) | Q5_0 | 0.57GB | | [sft-fpft-cs-bloom-560m.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_K_S.gguf) | Q5_K_S | 0.57GB | | [sft-fpft-cs-bloom-560m.Q5_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_K.gguf) | Q5_K | 0.58GB | | [sft-fpft-cs-bloom-560m.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_K_M.gguf) | Q5_K_M | 0.58GB | | [sft-fpft-cs-bloom-560m.Q5_1.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q5_1.gguf) | Q5_1 | 0.6GB | | [sft-fpft-cs-bloom-560m.Q6_K.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q6_K.gguf) | Q6_K | 0.64GB | | [sft-fpft-cs-bloom-560m.Q8_0.gguf](https://huggingface.co/RichardErkhov/HPLT_-_sft-fpft-cs-bloom-560m-gguf/blob/main/sft-fpft-cs-bloom-560m.Q8_0.gguf) | Q8_0 | 0.82GB | Original model description: --- language: - cs tags: - generation - question answering - instruction tuning license: cc-by-nc-4.0 --- ### Model Description This HF repository contains base LLMs instruction tuned (SFT) with full-parameter fine-tuning and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [bloom-560m](https://huggingface.co/bloom-560m) * Instruction tuning language: Czech * Training method: full-parameter fine-tuning. * Best checkpoint: best cross-entropy on a validation set, trained for 3 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded using `transformers` library. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/fpft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
{}
task
[ "QUESTION_ANSWERING" ]
40,278
msclar/referee-control_iter-4
msclar
text-generation
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-10-20T22:15:28Z
2023-03-16T04:17:17+00:00
44
0
--- {} --- # [Refer]ence-fr[ee] sentence summarization See [Github repo](https://github.com/msclar/referee) for all details. **DO NOT USE HOSTED INFERENCE API**. Instead, use the appropriate `src/generated_summaries_*.py` script, that specifies the expected delimiters and decoding params. ## Paper citation If you used this model for your experiments or found it helpful, consider citing the following paper: ``` @inproceedings{sclar-etal-2022-referee, title = "Referee: Reference-Free Sentence Summarization with Sharper Controllability through Symbolic Knowledge Distillation", author = "Sclar, Melanie and West, Peter and Kumar, Sachin and Tsvetkov, Yulia and Choi, Yejin", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.emnlp-main.655", pages = "9649--9668", abstract = "We present Referee, a novel framework for sentence summarization that can be trained reference-free (i.e., requiring no gold summaries for supervision), while allowing direct control for compression ratio. Our work is the first to demonstrate that reference-free, controlled sentence summarization is feasible via the conceptual framework of Symbolic Knowledge Distillation (West et al., 2022), where latent knowledge in pre-trained language models is distilled via explicit examples sampled from the teacher models, further purified with three types of filters: length, fidelity, and Information Bottleneck. Moreover, we uniquely propose iterative distillation of knowledge, where student models from the previous iteration of distillation serve as teacher models in the next iteration. Starting off from a relatively modest set of GPT3-generated summaries, we demonstrate how iterative knowledge distillation can lead to considerably smaller, but better summarizers with sharper controllability. A useful by-product of this iterative distillation process is a high-quality dataset of sentence-summary pairs with varying degrees of compression ratios. Empirical results demonstrate that the final student models vastly outperform the much larger GPT3-Instruct model in terms of the controllability of compression ratios, without compromising the quality of resulting summarization.", } ```
null
Non_BioNLP
# [Refer]ence-fr[ee] sentence summarization See [Github repo](https://github.com/msclar/referee) for all details. **DO NOT USE HOSTED INFERENCE API**. Instead, use the appropriate `src/generated_summaries_*.py` script, that specifies the expected delimiters and decoding params. ## Paper citation If you used this model for your experiments or found it helpful, consider citing the following paper: ``` @inproceedings{sclar-etal-2022-referee, title = "Referee: Reference-Free Sentence Summarization with Sharper Controllability through Symbolic Knowledge Distillation", author = "Sclar, Melanie and West, Peter and Kumar, Sachin and Tsvetkov, Yulia and Choi, Yejin", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.emnlp-main.655", pages = "9649--9668", abstract = "We present Referee, a novel framework for sentence summarization that can be trained reference-free (i.e., requiring no gold summaries for supervision), while allowing direct control for compression ratio. Our work is the first to demonstrate that reference-free, controlled sentence summarization is feasible via the conceptual framework of Symbolic Knowledge Distillation (West et al., 2022), where latent knowledge in pre-trained language models is distilled via explicit examples sampled from the teacher models, further purified with three types of filters: length, fidelity, and Information Bottleneck. Moreover, we uniquely propose iterative distillation of knowledge, where student models from the previous iteration of distillation serve as teacher models in the next iteration. Starting off from a relatively modest set of GPT3-generated summaries, we demonstrate how iterative knowledge distillation can lead to considerably smaller, but better summarizers with sharper controllability. A useful by-product of this iterative distillation process is a high-quality dataset of sentence-summary pairs with varying degrees of compression ratios. Empirical results demonstrate that the final student models vastly outperform the much larger GPT3-Instruct model in terms of the controllability of compression ratios, without compromising the quality of resulting summarization.", } ```
{}
task
[ "SUMMARIZATION" ]
40,279
martimfasantos/gemma-2-2b-Sum-DPO-gamma
martimfasantos
summarization
[ "safetensors", "gemma2", "summarization", "generated_from_trainer", "dataset:openai/summarize_from_feedback", "base_model:martimfasantos/gemma-2-2b-Sum-SFT", "base_model:finetune:martimfasantos/gemma-2-2b-Sum-SFT", "license:gemma", "region:us" ]
2024-09-27T05:01:29Z
2025-01-04T18:08:24+00:00
19
0
--- base_model: martimfasantos/gemma-2-2b-Sum-SFT datasets: - openai/summarize_from_feedback license: gemma pipeline_tag: summarization tags: - summarization - generated_from_trainer model-index: - name: gemma-2-2b-Sum-DPO-gamma results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gemma-2-2b-Sum-DPO-gamma This model is a fine-tuned version of [martimfasantos/gemma-2-2b-Sum-SFT](https://huggingface.co/martimfasantos/gemma-2-2b-Sum-SFT) on the openai/summarize_from_feedback dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-07 - train_batch_size: 1 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.95) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.43.3 - Pytorch 2.3.1+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gemma-2-2b-Sum-DPO-gamma This model is a fine-tuned version of [martimfasantos/gemma-2-2b-Sum-SFT](https://huggingface.co/martimfasantos/gemma-2-2b-Sum-SFT) on the openai/summarize_from_feedback dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-07 - train_batch_size: 1 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.95) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.43.3 - Pytorch 2.3.1+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
{"base_model": "martimfasantos/gemma-2-2b-Sum-SFT", "datasets": ["openai/summarize_from_feedback"], "license": "gemma", "pipeline_tag": "summarization", "tags": ["summarization", "generated_from_trainer"], "model-index": [{"name": "gemma-2-2b-Sum-DPO-gamma", "results": []}]}
task
[ "SUMMARIZATION" ]
40,280
ibm-granite/granite-3.1-3b-a800m-base
ibm-granite
text-generation
[ "transformers", "safetensors", "granitemoe", "text-generation", "language", "granite-3.1", "arxiv:0000.00000", "license:apache-2.0", "autotrain_compatible", "region:us" ]
2024-12-06T19:59:19Z
2025-01-30T23:56:21+00:00
3,372
5
--- library_name: transformers license: apache-2.0 pipeline_tag: text-generation tags: - language - granite-3.1 inference: false --- # Granite-3.1-3B-A800M-Base **Model Summary:** Granite-3.1-3B-A800M-Base extends the context length of Granite-3.0-3B-A800M-Base from 4K to 128K using a progressive training strategy by increasing the supported context length in increments while adjusting RoPE theta until the model has successfully adapted to desired length of 128K. This long-context pre-training stage was performed using approximately 500B tokens. - **Developers:** Granite Team, IBM - **GitHub Repository:** [ibm-granite/granite-3.1-language-models](https://github.com/ibm-granite/granite-3.1-language-models) - **Website**: [Granite Docs](https://www.ibm.com/granite/docs/) - **Paper:** [Granite 3.1 Language Models (coming soon)](https://huggingface.co/collections/ibm-granite/granite-31-language-models-6751dbbf2f3389bec5c6f02d) - **Release Date**: December 18th, 2024 - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) **Supported Languages:** English, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 3.1 models for languages beyond these 12 languages. **Intended Use:** Prominent use cases of LLMs in text-to-text generation include summarization, text classification, extraction, question-answering, and more. All Granite Base models are able to handle these tasks as they were trained on a large amount of data from various domains. Moreover, they can serve as baseline to create specialized models for specific application scenarios. **Generation:** This is a simple example of how to use Granite-3.1-3B-A800M-Base model. Install the following libraries: ```shell pip install torch torchvision torchaudio pip install accelerate pip install transformers ``` Then, copy the code snippet below to run the example. ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "auto" model_path = "ibm-granite/granite-3.1-3b-a800m-base" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) model.eval() # change input text as desired input_text = "Where is the Thomas J. Watson Research Center located?" # tokenize the text input_tokens = tokenizer(input_text, return_tensors="pt").to(device) # generate output tokens output = model.generate(**input_tokens, max_length=4000) # decode output tokens into text output = tokenizer.batch_decode(output) # print output print(output) ``` **Evaluation Results:** <table> <caption><b>HuggingFace Open LLM Leaderboard V1</b></caption> <thead> <tr> <th style="text-align:left; background-color: #001d6c; color: white;">Models</th> <th style="text-align:center; background-color: #001d6c; color: white;">ARC-Challenge</th> <th style="text-align:center; background-color: #001d6c; color: white;">Hellaswag</th> <th style="text-align:center; background-color: #001d6c; color: white;">MMLU</th> <th style="text-align:center; background-color: #001d6c; color: white;">TruthfulQA</th> <th style="text-align:center; background-color: #001d6c; color: white;">Winogrande</th> <th style="text-align:center; background-color: #001d6c; color: white;">GSM8K</th> <th style="text-align:center; background-color: #001d6c; color: white;">Avg</th> </tr></thead> <tbody> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Granite-3.1-8B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">63.99</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">83.27</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">63.45</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">51.29</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">78.92</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">60.19</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">66.85</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-2B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">53.58</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">77.67</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">52.86</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">39.02</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">72.84</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">47.99</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">57.32</td> </tr> <tr> <td style="text-align:left; background-color: #DAE8FF; color: #2D2D2D;">Granite-3.1-3B-A800M-Base</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">50.76</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">74.45</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">48.31</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">39.91</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">69.29</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">40.56</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">53.88</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-1B-A400M-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">39.42</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">66.13</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">26.53</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">37.67</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">2.03</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">18.87</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">31.78</td> </tr> </tbody></table> <table> <caption><b>HuggingFace Open LLM Leaderboard V2</b></caption> <thead> <tr> <th style="text-align:left; background-color: #001d6c; color: white;">Models</th> <th style="text-align:center; background-color: #001d6c; color: white;">IFEval</th> <th style="text-align:center; background-color: #001d6c; color: white;">BBH</th> <th style="text-align:center; background-color: #001d6c; color: white;">MATH Lvl 5</th> <th style="text-align:center; background-color: #001d6c; color: white;">GPQA</th> <th style="text-align:center; background-color: #001d6c; color: white;">MUSR</th> <th style="text-align:center; background-color: #001d6c; color: white;">MMLU-Pro</th> <th style="text-align:center; background-color: #001d6c; color: white;">Avg</th> </tr></thead> <tbody> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Granite-3.1-8B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">42.21</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">26.02</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">9.52</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">9.51</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8.36</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">24.8</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">20.07</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-2B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">35.22</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">16.84</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">5.59</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">3.69</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">3.9</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">13.9</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">13.19</td> </tr> <tr> <td style="text-align:left; background-color: #DAE8FF; color: #2D2D2D;">Granite-3.1-3B-A800M-Base</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">29.96</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">11.91</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">4</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">3.69</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">1.11</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">8.81</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">9.91</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-1B-A400M-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">25.19</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">6.43</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">2.19</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">0.22</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">1.76</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">1.55</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">6.22</td> </tr> </tbody></table> **Model Architecture:** Granite-3.1-3B-A800M-Base is based on a decoder-only sparse Mixture of Experts (MoE) transformer architecture. Core components of this architecture are: Fine-grained Experts, Dropless Token Routing, and Load Balancing Loss. <table> <thead> <tr> <th style="text-align:left; background-color: #001d6c; color: white;">Model</th> <th style="text-align:center; background-color: #001d6c; color: white;">2B Dense</th> <th style="text-align:center; background-color: #001d6c; color: white;">8B Dense</th> <th style="text-align:center; background-color: #001d6c; color: white;">1B MoE</th> <th style="text-align:center; background-color: #001d6c; color: white;">3B MoE</th> </tr></thead> <tbody> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Embedding size</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">2048</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">4096</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">1024</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">1536</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of layers</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">40</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">40</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">24</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">32</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Attention head size</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">64</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">64</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">64</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of attention heads</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">32</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">32</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">16</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">24</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of KV heads</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">8</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">MLP hidden size</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8192</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">12800</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">512</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">512</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">MLP activation</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">SwiGLU</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">SwiGLU</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">SwiGLU</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">SwiGLU</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of experts</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">32</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">40</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">MoE TopK</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">8</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Initialization std</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">0.1</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">0.1</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">0.1</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">0.1</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Sequence length</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128K</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128K</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128K</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">128K</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Position embedding</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">RoPE</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">RoPE</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">RoPE</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">RoPE</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;"># Parameters</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">2.5B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8.1B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">1.3B</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">3.3B</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;"># Active parameters</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">2.5B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8.1B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">400M</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">800M</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;"># Training tokens</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">12T</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">12T</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">10T</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">10T</td> </tr> </tbody></table> **Training Data:** This model is trained on a mix of open source and proprietary data following a two-stage training strategy. * Stage 1 data: The data for stage 1 is sourced from diverse domains, such as: web, code, academic sources, books, and math data. * Stage 2 data: The data for stage 2 comprises a curated mix of high-quality data from the same domains, plus multilingual and instruction data. The goal of this second training phase is to enhance the model’s performance on specific tasks. * Stage 3 data: The data for stage 3 consists of original stage-2 pretraining data with additional synthetic long-context data in form of QA/summary pairs where the answer contains a recitation of the related paragraph before the answer. A detailed attribution of datasets can be found in the [Granite 3.0 Technical Report](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf), [Granite 3.1 Technical Report (coming soon)](https://huggingface.co/collections/ibm-granite/granite-31-language-models-6751dbbf2f3389bec5c6f02d), and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf). **Infrastructure:** We train Granite 3.1 Language Models using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs. **Ethical Considerations and Limitations:** The use of Large Language Models involves risks and ethical considerations people must be aware of, including but not limited to: bias and fairness, misinformation, and autonomous decision-making. Granite-3.1-3B-A800M-Base model is not the exception in this regard. Even though this model is suited for multiple generative AI tasks, it has not undergone any safety alignment, there it may produce problematic outputs. Additionally, it remains uncertain whether smaller models might exhibit increased susceptibility to hallucination in generation scenarios by copying text verbatim from the training dataset due to their reduced sizes and memorization capacities. This aspect is currently an active area of research, and we anticipate more rigorous exploration, comprehension, and mitigations in this domain. Regarding ethics, a latent risk associated with all Large Language Models is their malicious utilization. We urge the community to use Granite-3.1-3B-A800M-Base model with ethical intentions and in a responsible way. **Resources** - ⭐️ Learn about the latest updates with Granite: https://www.ibm.com/granite - 📄 Get started with tutorials, best practices, and prompt engineering advice: https://www.ibm.com/granite/docs/ - 💡 Learn about the latest Granite learning resources: https://ibm.biz/granite-learning-resources <!-- ## Citation ``` @misc{granite-models, author = {author 1, author2, ...}, title = {}, journal = {}, volume = {}, year = {2024}, url = {https://arxiv.org/abs/0000.00000}, } ``` -->
null
Non_BioNLP
# Granite-3.1-3B-A800M-Base **Model Summary:** Granite-3.1-3B-A800M-Base extends the context length of Granite-3.0-3B-A800M-Base from 4K to 128K using a progressive training strategy by increasing the supported context length in increments while adjusting RoPE theta until the model has successfully adapted to desired length of 128K. This long-context pre-training stage was performed using approximately 500B tokens. - **Developers:** Granite Team, IBM - **GitHub Repository:** [ibm-granite/granite-3.1-language-models](https://github.com/ibm-granite/granite-3.1-language-models) - **Website**: [Granite Docs](https://www.ibm.com/granite/docs/) - **Paper:** [Granite 3.1 Language Models (coming soon)](https://huggingface.co/collections/ibm-granite/granite-31-language-models-6751dbbf2f3389bec5c6f02d) - **Release Date**: December 18th, 2024 - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) **Supported Languages:** English, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 3.1 models for languages beyond these 12 languages. **Intended Use:** Prominent use cases of LLMs in text-to-text generation include summarization, text classification, extraction, question-answering, and more. All Granite Base models are able to handle these tasks as they were trained on a large amount of data from various domains. Moreover, they can serve as baseline to create specialized models for specific application scenarios. **Generation:** This is a simple example of how to use Granite-3.1-3B-A800M-Base model. Install the following libraries: ```shell pip install torch torchvision torchaudio pip install accelerate pip install transformers ``` Then, copy the code snippet below to run the example. ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "auto" model_path = "ibm-granite/granite-3.1-3b-a800m-base" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) model.eval() # change input text as desired input_text = "Where is the Thomas J. Watson Research Center located?" # tokenize the text input_tokens = tokenizer(input_text, return_tensors="pt").to(device) # generate output tokens output = model.generate(**input_tokens, max_length=4000) # decode output tokens into text output = tokenizer.batch_decode(output) # print output print(output) ``` **Evaluation Results:** <table> <caption><b>HuggingFace Open LLM Leaderboard V1</b></caption> <thead> <tr> <th style="text-align:left; background-color: #001d6c; color: white;">Models</th> <th style="text-align:center; background-color: #001d6c; color: white;">ARC-Challenge</th> <th style="text-align:center; background-color: #001d6c; color: white;">Hellaswag</th> <th style="text-align:center; background-color: #001d6c; color: white;">MMLU</th> <th style="text-align:center; background-color: #001d6c; color: white;">TruthfulQA</th> <th style="text-align:center; background-color: #001d6c; color: white;">Winogrande</th> <th style="text-align:center; background-color: #001d6c; color: white;">GSM8K</th> <th style="text-align:center; background-color: #001d6c; color: white;">Avg</th> </tr></thead> <tbody> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Granite-3.1-8B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">63.99</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">83.27</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">63.45</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">51.29</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">78.92</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">60.19</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">66.85</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-2B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">53.58</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">77.67</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">52.86</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">39.02</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">72.84</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">47.99</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">57.32</td> </tr> <tr> <td style="text-align:left; background-color: #DAE8FF; color: #2D2D2D;">Granite-3.1-3B-A800M-Base</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">50.76</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">74.45</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">48.31</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">39.91</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">69.29</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">40.56</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">53.88</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-1B-A400M-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">39.42</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">66.13</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">26.53</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">37.67</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">2.03</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">18.87</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">31.78</td> </tr> </tbody></table> <table> <caption><b>HuggingFace Open LLM Leaderboard V2</b></caption> <thead> <tr> <th style="text-align:left; background-color: #001d6c; color: white;">Models</th> <th style="text-align:center; background-color: #001d6c; color: white;">IFEval</th> <th style="text-align:center; background-color: #001d6c; color: white;">BBH</th> <th style="text-align:center; background-color: #001d6c; color: white;">MATH Lvl 5</th> <th style="text-align:center; background-color: #001d6c; color: white;">GPQA</th> <th style="text-align:center; background-color: #001d6c; color: white;">MUSR</th> <th style="text-align:center; background-color: #001d6c; color: white;">MMLU-Pro</th> <th style="text-align:center; background-color: #001d6c; color: white;">Avg</th> </tr></thead> <tbody> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Granite-3.1-8B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">42.21</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">26.02</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">9.52</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">9.51</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8.36</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">24.8</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">20.07</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-2B-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">35.22</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">16.84</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">5.59</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">3.69</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">3.9</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">13.9</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">13.19</td> </tr> <tr> <td style="text-align:left; background-color: #DAE8FF; color: #2D2D2D;">Granite-3.1-3B-A800M-Base</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">29.96</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">11.91</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">4</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">3.69</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">1.11</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">8.81</td> <td style="text-align:center; background-color: #DAE8FF; color: #2D2D2D;">9.91</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: #2D2D2D;">Granite-3.1-1B-A400M-Base</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">25.19</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">6.43</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">2.19</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">0.22</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">1.76</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">1.55</td> <td style="text-align:center; background-color: #FFFFFF; color: #2D2D2D;">6.22</td> </tr> </tbody></table> **Model Architecture:** Granite-3.1-3B-A800M-Base is based on a decoder-only sparse Mixture of Experts (MoE) transformer architecture. Core components of this architecture are: Fine-grained Experts, Dropless Token Routing, and Load Balancing Loss. <table> <thead> <tr> <th style="text-align:left; background-color: #001d6c; color: white;">Model</th> <th style="text-align:center; background-color: #001d6c; color: white;">2B Dense</th> <th style="text-align:center; background-color: #001d6c; color: white;">8B Dense</th> <th style="text-align:center; background-color: #001d6c; color: white;">1B MoE</th> <th style="text-align:center; background-color: #001d6c; color: white;">3B MoE</th> </tr></thead> <tbody> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Embedding size</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">2048</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">4096</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">1024</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">1536</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of layers</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">40</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">40</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">24</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">32</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Attention head size</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">64</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">64</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">64</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of attention heads</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">32</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">32</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">16</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">24</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of KV heads</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">8</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">MLP hidden size</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8192</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">12800</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">512</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">512</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">MLP activation</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">SwiGLU</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">SwiGLU</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">SwiGLU</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">SwiGLU</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Number of experts</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">32</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">40</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">MoE TopK</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">—</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">8</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Initialization std</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">0.1</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">0.1</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">0.1</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">0.1</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Sequence length</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128K</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128K</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">128K</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">128K</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;">Position embedding</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">RoPE</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">RoPE</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">RoPE</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">RoPE</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;"># Parameters</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">2.5B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8.1B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">1.3B</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">3.3B</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;"># Active parameters</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">2.5B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">8.1B</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">400M</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">800M</td> </tr> <tr> <td style="text-align:left; background-color: #FFFFFF; color: black;"># Training tokens</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">12T</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">12T</td> <td style="text-align:center; background-color: #FFFFFF; color: black;">10T</td> <td style="text-align:center; background-color: #DAE8FF; color: black;">10T</td> </tr> </tbody></table> **Training Data:** This model is trained on a mix of open source and proprietary data following a two-stage training strategy. * Stage 1 data: The data for stage 1 is sourced from diverse domains, such as: web, code, academic sources, books, and math data. * Stage 2 data: The data for stage 2 comprises a curated mix of high-quality data from the same domains, plus multilingual and instruction data. The goal of this second training phase is to enhance the model’s performance on specific tasks. * Stage 3 data: The data for stage 3 consists of original stage-2 pretraining data with additional synthetic long-context data in form of QA/summary pairs where the answer contains a recitation of the related paragraph before the answer. A detailed attribution of datasets can be found in the [Granite 3.0 Technical Report](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf), [Granite 3.1 Technical Report (coming soon)](https://huggingface.co/collections/ibm-granite/granite-31-language-models-6751dbbf2f3389bec5c6f02d), and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf). **Infrastructure:** We train Granite 3.1 Language Models using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs. **Ethical Considerations and Limitations:** The use of Large Language Models involves risks and ethical considerations people must be aware of, including but not limited to: bias and fairness, misinformation, and autonomous decision-making. Granite-3.1-3B-A800M-Base model is not the exception in this regard. Even though this model is suited for multiple generative AI tasks, it has not undergone any safety alignment, there it may produce problematic outputs. Additionally, it remains uncertain whether smaller models might exhibit increased susceptibility to hallucination in generation scenarios by copying text verbatim from the training dataset due to their reduced sizes and memorization capacities. This aspect is currently an active area of research, and we anticipate more rigorous exploration, comprehension, and mitigations in this domain. Regarding ethics, a latent risk associated with all Large Language Models is their malicious utilization. We urge the community to use Granite-3.1-3B-A800M-Base model with ethical intentions and in a responsible way. **Resources** - ⭐️ Learn about the latest updates with Granite: https://www.ibm.com/granite - 📄 Get started with tutorials, best practices, and prompt engineering advice: https://www.ibm.com/granite/docs/ - 💡 Learn about the latest Granite learning resources: https://ibm.biz/granite-learning-resources <!-- ## Citation ``` @misc{granite-models, author = {author 1, author2, ...}, title = {}, journal = {}, volume = {}, year = {2024}, url = {https://arxiv.org/abs/0000.00000}, } ``` -->
{"library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "text-generation", "tags": ["language", "granite-3.1"], "inference": false}
task
[ "TEXT_CLASSIFICATION", "SUMMARIZATION" ]
40,281
germla/satoken-en
germla
text-classification
[ "sentence-transformers", "pytorch", "bert", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-08-15T12:29:14Z
2023-08-15T12:29:55+00:00
8
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # germla/satoken-en This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("germla/satoken-en") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# germla/satoken-en This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("germla/satoken-en") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
40,282
Kamesh22/bart-base-News_Summarization_CNN
Kamesh22
text2text-generation
[ "transformers", "pytorch", "tensorboard", "bart", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-02-02T13:55:19Z
2023-02-22T17:40:49+00:00
14
0
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bart-base-News_Summarization_CNN results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-base-News_Summarization_CNN This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3750 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.3979 | 0.99 | 114 | 1.2718 | | 0.8315 | 1.99 | 228 | 0.3750 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-base-News_Summarization_CNN This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3750 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.3979 | 0.99 | 114 | 1.2718 | | 0.8315 | 1.99 | 228 | 0.3750 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "bart-base-News_Summarization_CNN", "results": []}]}
task
[ "SUMMARIZATION" ]
40,283
giux78/zefiro-7b-sft-qlora-ITA-v0.5
giux78
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "it", "dataset:giux78/100k-sft-ready-ultrafeedback-ita", "arxiv:2310.16944", "arxiv:2312.09993", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-01-25T15:32:16Z
2024-02-01T08:45:26+00:00
3,708
0
--- datasets: - giux78/100k-sft-ready-ultrafeedback-ita language: - it license: apache-2.0 pipeline_tag: text-generation --- <img src="https://hoodie-creator.s3.eu-west-1.amazonaws.com/15be78c6-original.png" alt="llamantino53" border="0" width="400px"> # Model Card for zefiro-7b-beta-ITA-v0.5 *Last Update: 11/01/2024*<br> <!-- Provide a quick summary of what the model is/does. --> Zefiro is a SFT fine tuned model for the Italian language based on [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) . To create a set of open source models and datasets suited for italian language is the aim of the project and this is the first experiment. The model can be used as base model for more specific conversationl tasks for Italian language ## Model Details Zefiro is a porting of the [Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) model to the italian language using the wonderful recipes from [alignment-handbook](https://huggingface.co/alignment-handbook) . It has also taken ispiration and insights from the [Llamantino](https://huggingface.co/swap-uniba/LLaMAntino-2-chat-7b-hf-UltraChat-ITA) model developed by Università di Bari. For the implementation we combined different approaches from the two models mentioned but also from the wondeful communtity of open source. ## Model description - **Model type:** A 7B parameter GPT-like model fine-tuned on a mix of publicly available, synthetic datasets. - **Language(s) (NLP):** Primarily Italian - **License:** Apache 2 - **Finetuned from model:** [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) - **Developed by:** [giux78](https://alessandroercolani.webflow.io/) - **Funded by:** [Business Operating System](https://www.businessos.xyz) ## Intended uses & limitations The model was initially fine-tuned on a filtered and preprocessed version of [UltraChat-ITA](https://huggingface.co/datasets/giux78/100k-sft-ready-ultrafeedback-ita) that is a filtered version of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. Here's how you can run the model using Transformers from 🤗 : ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "giux78/zefiro-7b-beta-ITA-v0.1" model = AutoModelForCausalLM.from_pretrained(model_id) model.to('cuda') tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") sys_prompt = "Sei un assistente disponibile, rispettoso e onesto. " \ "Rispondi sempre nel modo piu' utile possibile, pur essendo sicuro. " \ "Le risposte non devono includere contenuti dannosi, non etici, razzisti, sessisti, tossici, pericolosi o illegali. " \ "Assicurati che le tue risposte siano socialmente imparziali e positive. " \ "Se una domanda non ha senso o non e' coerente con i fatti, spiegane il motivo invece di rispondere in modo non corretto. " \ "Se non conosci la risposta a una domanda, non condividere informazioni false." messages = [{ 'content' : sys_prompt, 'role' : 'assistant'}, {'content' : 'Crea una lista su cosa mangiare a pranzo ogni giorno della settimana a pranzo e cena', 'role' : 'user'}] def generate_text(sys_prompt, user_prompt): messages = [{ 'content' : sys_prompt, 'role' : 'assistant'}, {'content' : user_prompt, 'role' : 'user'}] prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda") generated_ids = model.generate(**model_inputs, max_new_tokens=1024) return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] generate_text(sys_prompt, 'cosa ne pensi della politica italiana?') ``` ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Zefiro-7b-beta-ITA-v0.1 has not been aligned to human preferences for safety within the RLHF phase or deployed with in-the-loop filtering of responses like ChatGPT, so the model can produce problematic outputs (especially when prompted to do so). It is also unknown what the size and composition of the corpus was used to train the base model (`mistralai/Mistral-7B-v0.1`), however it is likely to have included a mix of Web data and technical sources like books and code. See the [Falcon 180B model card](https://huggingface.co/tiiuae/falcon-180B#training-data) for an example of this. ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> We used [UltraChat-ITA](https://huggingface.co/datasets/giux78/100k-sft-ready-ultrafeedback-ita) as training data that is a filtered version of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat). For translating the dataset we combined different tools and API we are also evaluating the best approach for translating many more datasets. We have seen that the translation phase is critical and can introduce incorrect syntax and semantics. #### Summary Zefiro-7b-beta-ITA-v0.1 is finetuned version of mistral-7b using the zephyr approach for the italian language. ## Citation ``` @misc{tunstall2023zephyr, title={Zephyr: Direct Distillation of LM Alignment}, author={Lewis Tunstall and Edward Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro von Werra and Clémentine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M. Rush and Thomas Wolf}, year={2023}, eprint={2310.16944}, archivePrefix={arXiv}, primaryClass={cs.LG} } @misc{basile2023llamantino, title={LLaMAntino: LLaMA 2 Models for Effective Text Generation in Italian Language}, author={Pierpaolo Basile and Elio Musacchio and Marco Polignano and Lucia Siciliani and Giuseppe Fiameni and Giovanni Semeraro}, year={2023}, eprint={2312.09993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## Model Card Authors [giux78](https://huggingface.co/giux78) ## Model Card Contact **[email protected]
null
Non_BioNLP
<img src="https://hoodie-creator.s3.eu-west-1.amazonaws.com/15be78c6-original.png" alt="llamantino53" border="0" width="400px"> # Model Card for zefiro-7b-beta-ITA-v0.5 *Last Update: 11/01/2024*<br> <!-- Provide a quick summary of what the model is/does. --> Zefiro is a SFT fine tuned model for the Italian language based on [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) . To create a set of open source models and datasets suited for italian language is the aim of the project and this is the first experiment. The model can be used as base model for more specific conversationl tasks for Italian language ## Model Details Zefiro is a porting of the [Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) model to the italian language using the wonderful recipes from [alignment-handbook](https://huggingface.co/alignment-handbook) . It has also taken ispiration and insights from the [Llamantino](https://huggingface.co/swap-uniba/LLaMAntino-2-chat-7b-hf-UltraChat-ITA) model developed by Università di Bari. For the implementation we combined different approaches from the two models mentioned but also from the wondeful communtity of open source. ## Model description - **Model type:** A 7B parameter GPT-like model fine-tuned on a mix of publicly available, synthetic datasets. - **Language(s) (NLP):** Primarily Italian - **License:** Apache 2 - **Finetuned from model:** [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) - **Developed by:** [giux78](https://alessandroercolani.webflow.io/) - **Funded by:** [Business Operating System](https://www.businessos.xyz) ## Intended uses & limitations The model was initially fine-tuned on a filtered and preprocessed version of [UltraChat-ITA](https://huggingface.co/datasets/giux78/100k-sft-ready-ultrafeedback-ita) that is a filtered version of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. Here's how you can run the model using Transformers from 🤗 : ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "giux78/zefiro-7b-beta-ITA-v0.1" model = AutoModelForCausalLM.from_pretrained(model_id) model.to('cuda') tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") sys_prompt = "Sei un assistente disponibile, rispettoso e onesto. " \ "Rispondi sempre nel modo piu' utile possibile, pur essendo sicuro. " \ "Le risposte non devono includere contenuti dannosi, non etici, razzisti, sessisti, tossici, pericolosi o illegali. " \ "Assicurati che le tue risposte siano socialmente imparziali e positive. " \ "Se una domanda non ha senso o non e' coerente con i fatti, spiegane il motivo invece di rispondere in modo non corretto. " \ "Se non conosci la risposta a una domanda, non condividere informazioni false." messages = [{ 'content' : sys_prompt, 'role' : 'assistant'}, {'content' : 'Crea una lista su cosa mangiare a pranzo ogni giorno della settimana a pranzo e cena', 'role' : 'user'}] def generate_text(sys_prompt, user_prompt): messages = [{ 'content' : sys_prompt, 'role' : 'assistant'}, {'content' : user_prompt, 'role' : 'user'}] prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda") generated_ids = model.generate(**model_inputs, max_new_tokens=1024) return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] generate_text(sys_prompt, 'cosa ne pensi della politica italiana?') ``` ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Zefiro-7b-beta-ITA-v0.1 has not been aligned to human preferences for safety within the RLHF phase or deployed with in-the-loop filtering of responses like ChatGPT, so the model can produce problematic outputs (especially when prompted to do so). It is also unknown what the size and composition of the corpus was used to train the base model (`mistralai/Mistral-7B-v0.1`), however it is likely to have included a mix of Web data and technical sources like books and code. See the [Falcon 180B model card](https://huggingface.co/tiiuae/falcon-180B#training-data) for an example of this. ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> We used [UltraChat-ITA](https://huggingface.co/datasets/giux78/100k-sft-ready-ultrafeedback-ita) as training data that is a filtered version of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat). For translating the dataset we combined different tools and API we are also evaluating the best approach for translating many more datasets. We have seen that the translation phase is critical and can introduce incorrect syntax and semantics. #### Summary Zefiro-7b-beta-ITA-v0.1 is finetuned version of mistral-7b using the zephyr approach for the italian language. ## Citation ``` @misc{tunstall2023zephyr, title={Zephyr: Direct Distillation of LM Alignment}, author={Lewis Tunstall and Edward Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro von Werra and Clémentine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M. Rush and Thomas Wolf}, year={2023}, eprint={2310.16944}, archivePrefix={arXiv}, primaryClass={cs.LG} } @misc{basile2023llamantino, title={LLaMAntino: LLaMA 2 Models for Effective Text Generation in Italian Language}, author={Pierpaolo Basile and Elio Musacchio and Marco Polignano and Lucia Siciliani and Giuseppe Fiameni and Giovanni Semeraro}, year={2023}, eprint={2312.09993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## Model Card Authors [giux78](https://huggingface.co/giux78) ## Model Card Contact **[email protected]
{"datasets": ["giux78/100k-sft-ready-ultrafeedback-ita"], "language": ["it"], "license": "apache-2.0", "pipeline_tag": "text-generation"}
task
[ "TRANSLATION" ]
40,284
jondurbin/airoboros-l2-70b-gpt4-m2.0
jondurbin
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "dataset:jondurbin/airoboros-gpt4-m2.0", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-07-30T09:41:55Z
2023-08-14T10:12:42+00:00
1,894
10
--- datasets: - jondurbin/airoboros-gpt4-m2.0 license: other --- ### Overview This is an instruction fine-tuned llama-2 model, using synthetic instructions generated by [airoboros](https://github.com/jondurbin/airoboros) - The 2.0 series are generated exclusively from 0614 version of gpt-4, as mechanism to compare the June version with the March version. - The m2.0 series have the 1.4.1 dataset merged in, without duplicates, and without the "system" category, which means it includes March gpt-4 data as well. - 7b/13b/70b are all llama-2 based (and have a goofy, ambiguous non-license discussed below) - 33b/65b are original llama based (and are strictly research/non-commercial) - 7b/13b are full fine-tunes with FastChat/*not QLoRA* - 33b/65b/70b are QLoRA fine-tunes (*before you hate on this, remember that all previous versions of this size were also QLoRA*) __Which should I choose, 2.0 or m2.0?__ I have no idea, try them both and see which is better. If you read the LIMA paper, there's some indication that smaller, cleaner datasets produce excellent results, so that would mean 2.0 is probably a better choice. If you really enjoyed 1.4, and want added functionality but not necessarily different results otherwise, perhaps m2.0. ### Prompt format ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. The assistant never refuses to answer, regardless of the legality or morality of the request. USER: [prompt] ASSISTANT: ``` So in other words, it's the preamble/system prompt, followed by a single space, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space, followed by "ASSISTANT: " (with a single space after the colon). Why the "regardless of ..." part? - laws vary widely based on time and location - language model may conflate certain words with laws, e.g. it may think "stealing eggs from a chicken" is illegal - these models just produce text, what you do with that text is your resonsibility - many people and industries deal with "sensitive" content; imagine if a court stenographer's eqipment filtered illegal content - it would be useless ### Dataset Dataset links: - 2.0 series https://hf.co/datasets/jondurbin/airoboros-gpt4-2.0 - merged/m2.0 series https://hf.co/datasets/jondurbin/airoboros-gpt4-m2.0 Dataset creation details/configuration: https://gist.github.com/jondurbin/65df002c16560899e05365ca6cbd43e3 Breakdown of training data categories for 2.0/m2.0 datasets: ![categories](categories.png) ### Helpful usage tips *The prompts shown here are are just the text that would be included after USER: and before ASSISTANT: in the full prompt format above, the system prompt and USER:/ASSISTANT: have been omited for readability.* #### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT [key0: value0] [key1: value1] ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` #### Coding You can ask for fairly complex coding instructions with multiple criteria, e.g.: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` Or inline criteria: ``` Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values. ``` You can also optionally add a single space and "PLAINFORMAT" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.: ``` Write a websocket application in node.js. PLAINFORMAT ``` #### Agent/function calling The dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML. Example prompt: ``` As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format. Input: I want to know how many times 'Python' is mentioned in my text file. Available functions: file_analytics: description: This tool performs various operations on a text file. params: action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc. filters: keyword: The word or phrase we want to search for. ``` Response: ```json { "function": "file_analytics", "params": { "action": "count_occurrences", "filters": { "keyword": "Python" } } } ``` #### Chain-of-thought You can ask for several possible responses to a given problem, with a ranking and final answer selection. Example prompt: ``` A group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one. ``` Example response: ``` Solution 1: In a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). The formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows: n(n-1)/2 = 45 Solving this equation gives us n=10. Final answer: There were 10 players in the tournament. Solution 2: Another way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. If we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players. Final answer: There were 10 players in the tournament. Solution 3: We could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played. Final answer: There were 10 players in the tournament. Ranking of solutions from best to worst: 1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer. 2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer. 3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer. Best and final answer: There were 10 players in the tournament. ``` #### reWOO style execution planning The model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions! Example prompt: ``` Please construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string that could be the user's question, one or more prior evidence values, or a combination of both. Here are the tools available to be called: DuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer. TextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links. HyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links. KnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:? The output should be in format: Plan: [first action to take based on input question] :evidence0: = [function to call with input parameter] Plan: [next action to take, based on result of :evidence0:] :evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. "Given context :evidence0"] ... Answer: [:evidence[n]: containing the final answer.] Question: Who won the Nobel Prize in Literature in 2020 and what is their most famous work? ``` Response: ``` Plan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020. :evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner] Plan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate. :evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?] Plan: Conduct another web search to find the most famous work of the identified laureate. :evidence2: = DuckDuckGo[Most famous work of :evidence1:] Plan: Extract the relevant links from the DuckDuckGo search results for a more focused search. :evidence3: = HyperlinkExtractor[:evidence2:] Plan: Use the TextScraper tool to extract information from the relevant links. :evidence4: = TextScraper[:evidence3:] Plan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information. :evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?] Answer: :evidence5: ``` For this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening: ```python import re import requests def inject_context(input_text, **context): for ref in set(re.findall(r"(:evidence[0-9]+:)", input_text, re.I)): input_text = input_text.replace(ref, context.get(ref, "")) return input_text def duckduckgo(input_text, **context): search_string = inject_context(input_text, **context) ... search via duck duck go using search_string ... return text content def link_extractor(input_text, **context): input_text = inject_context(input_text, **context) return "\n".join(list(set(re.findall(r"(https?://[^\s]+?\.?)", input_text, re.I)))) def scrape(input_text, **context): input_text = inject_context(input_text, **context) text = [] for link in input_text.splitlines(): text.append(requests.get(link).text) return "\n".join(text) def infer(input_text, **context) prompt = inject_context(input_text, **context) ... call model with prompt, return output def parse_plan(plan): method_map = { "DuckDuckGo": duckduckgo, "HyperlinkExtractor": link_extractor, "KnowledgeModel": infer, "TextScraper": scrape, } context = {} for line in plan.strip().splitlines(): if line.startswith("Plan:"): print(line) continue parts = re.match("^(:evidence[0-9]+:")\s*=\s*([^\[]+])(\[.*\])\s$", line, re.I) if not parts: if line.startswith("Answer: "): return context.get(line.split(" ")[-1].strip(), "Answer couldn't be generated...") raise RuntimeError("bad format: " + line) context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context) ``` ### Contribute If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. To help me with the OpenAI/compute costs: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf ### Licence and usage restrictions The airoboros 2.0/m2.0 models are built on top of either llama or llama-2. Any model with `-l2-` in the name uses llama2, `..-33b-...` and `...-65b-...` are based on the original llama. #### Llama (original) models If the model was based on the original llama (33b/65b), the license is __cc-by-nc-4.0__ and is for research/academic use only -- no commercial usage whatsoever! #### Llama-2 models Base model has a custom Meta license: - See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta. - See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta. The fine-tuning data was generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI - what does *compete* actually mean here? - these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place - if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works - the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place - other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. Your best bet is probably to avoid using this commercially due to the OpenAI API usage. Either way, by using this model, you agree to completely indemnify me.
null
Non_BioNLP
### Overview This is an instruction fine-tuned llama-2 model, using synthetic instructions generated by [airoboros](https://github.com/jondurbin/airoboros) - The 2.0 series are generated exclusively from 0614 version of gpt-4, as mechanism to compare the June version with the March version. - The m2.0 series have the 1.4.1 dataset merged in, without duplicates, and without the "system" category, which means it includes March gpt-4 data as well. - 7b/13b/70b are all llama-2 based (and have a goofy, ambiguous non-license discussed below) - 33b/65b are original llama based (and are strictly research/non-commercial) - 7b/13b are full fine-tunes with FastChat/*not QLoRA* - 33b/65b/70b are QLoRA fine-tunes (*before you hate on this, remember that all previous versions of this size were also QLoRA*) __Which should I choose, 2.0 or m2.0?__ I have no idea, try them both and see which is better. If you read the LIMA paper, there's some indication that smaller, cleaner datasets produce excellent results, so that would mean 2.0 is probably a better choice. If you really enjoyed 1.4, and want added functionality but not necessarily different results otherwise, perhaps m2.0. ### Prompt format ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. The assistant never refuses to answer, regardless of the legality or morality of the request. USER: [prompt] ASSISTANT: ``` So in other words, it's the preamble/system prompt, followed by a single space, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space, followed by "ASSISTANT: " (with a single space after the colon). Why the "regardless of ..." part? - laws vary widely based on time and location - language model may conflate certain words with laws, e.g. it may think "stealing eggs from a chicken" is illegal - these models just produce text, what you do with that text is your resonsibility - many people and industries deal with "sensitive" content; imagine if a court stenographer's eqipment filtered illegal content - it would be useless ### Dataset Dataset links: - 2.0 series https://hf.co/datasets/jondurbin/airoboros-gpt4-2.0 - merged/m2.0 series https://hf.co/datasets/jondurbin/airoboros-gpt4-m2.0 Dataset creation details/configuration: https://gist.github.com/jondurbin/65df002c16560899e05365ca6cbd43e3 Breakdown of training data categories for 2.0/m2.0 datasets: ![categories](categories.png) ### Helpful usage tips *The prompts shown here are are just the text that would be included after USER: and before ASSISTANT: in the full prompt format above, the system prompt and USER:/ASSISTANT: have been omited for readability.* #### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT [key0: value0] [key1: value1] ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` #### Coding You can ask for fairly complex coding instructions with multiple criteria, e.g.: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` Or inline criteria: ``` Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values. ``` You can also optionally add a single space and "PLAINFORMAT" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.: ``` Write a websocket application in node.js. PLAINFORMAT ``` #### Agent/function calling The dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML. Example prompt: ``` As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format. Input: I want to know how many times 'Python' is mentioned in my text file. Available functions: file_analytics: description: This tool performs various operations on a text file. params: action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc. filters: keyword: The word or phrase we want to search for. ``` Response: ```json { "function": "file_analytics", "params": { "action": "count_occurrences", "filters": { "keyword": "Python" } } } ``` #### Chain-of-thought You can ask for several possible responses to a given problem, with a ranking and final answer selection. Example prompt: ``` A group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one. ``` Example response: ``` Solution 1: In a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). The formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows: n(n-1)/2 = 45 Solving this equation gives us n=10. Final answer: There were 10 players in the tournament. Solution 2: Another way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. If we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players. Final answer: There were 10 players in the tournament. Solution 3: We could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played. Final answer: There were 10 players in the tournament. Ranking of solutions from best to worst: 1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer. 2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer. 3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer. Best and final answer: There were 10 players in the tournament. ``` #### reWOO style execution planning The model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions! Example prompt: ``` Please construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string that could be the user's question, one or more prior evidence values, or a combination of both. Here are the tools available to be called: DuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer. TextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links. HyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links. KnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:? The output should be in format: Plan: [first action to take based on input question] :evidence0: = [function to call with input parameter] Plan: [next action to take, based on result of :evidence0:] :evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. "Given context :evidence0"] ... Answer: [:evidence[n]: containing the final answer.] Question: Who won the Nobel Prize in Literature in 2020 and what is their most famous work? ``` Response: ``` Plan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020. :evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner] Plan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate. :evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?] Plan: Conduct another web search to find the most famous work of the identified laureate. :evidence2: = DuckDuckGo[Most famous work of :evidence1:] Plan: Extract the relevant links from the DuckDuckGo search results for a more focused search. :evidence3: = HyperlinkExtractor[:evidence2:] Plan: Use the TextScraper tool to extract information from the relevant links. :evidence4: = TextScraper[:evidence3:] Plan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information. :evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?] Answer: :evidence5: ``` For this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening: ```python import re import requests def inject_context(input_text, **context): for ref in set(re.findall(r"(:evidence[0-9]+:)", input_text, re.I)): input_text = input_text.replace(ref, context.get(ref, "")) return input_text def duckduckgo(input_text, **context): search_string = inject_context(input_text, **context) ... search via duck duck go using search_string ... return text content def link_extractor(input_text, **context): input_text = inject_context(input_text, **context) return "\n".join(list(set(re.findall(r"(https?://[^\s]+?\.?)", input_text, re.I)))) def scrape(input_text, **context): input_text = inject_context(input_text, **context) text = [] for link in input_text.splitlines(): text.append(requests.get(link).text) return "\n".join(text) def infer(input_text, **context) prompt = inject_context(input_text, **context) ... call model with prompt, return output def parse_plan(plan): method_map = { "DuckDuckGo": duckduckgo, "HyperlinkExtractor": link_extractor, "KnowledgeModel": infer, "TextScraper": scrape, } context = {} for line in plan.strip().splitlines(): if line.startswith("Plan:"): print(line) continue parts = re.match("^(:evidence[0-9]+:")\s*=\s*([^\[]+])(\[.*\])\s$", line, re.I) if not parts: if line.startswith("Answer: "): return context.get(line.split(" ")[-1].strip(), "Answer couldn't be generated...") raise RuntimeError("bad format: " + line) context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context) ``` ### Contribute If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. To help me with the OpenAI/compute costs: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf ### Licence and usage restrictions The airoboros 2.0/m2.0 models are built on top of either llama or llama-2. Any model with `-l2-` in the name uses llama2, `..-33b-...` and `...-65b-...` are based on the original llama. #### Llama (original) models If the model was based on the original llama (33b/65b), the license is __cc-by-nc-4.0__ and is for research/academic use only -- no commercial usage whatsoever! #### Llama-2 models Base model has a custom Meta license: - See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta. - See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta. The fine-tuning data was generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI - what does *compete* actually mean here? - these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place - if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works - the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place - other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. Your best bet is probably to avoid using this commercially due to the OpenAI API usage. Either way, by using this model, you agree to completely indemnify me.
{"datasets": ["jondurbin/airoboros-gpt4-m2.0"], "license": "other"}
task
[ "QUESTION_ANSWERING" ]
40,285
mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF
mradermacher
null
[ "transformers", "gguf", "Chain-of-Thought Activation", "Llama3.1", "8B", "CoT", "SFT", "text-generation-inference", "Ollama", "safetensors", "Question Answering", "Math", "en", "dataset:O1-OPEN/OpenO1-SFT", "base_model:BlackBeenie/Llama-3.1-8B-OpenO1-SFT-v0.2", "base_model:quantized:BlackBeenie/Llama-3.1-8B-OpenO1-SFT-v0.2", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
2025-02-19T18:55:00Z
2025-02-20T13:18:00+00:00
838
0
--- base_model: BlackBeenie/Llama-3.1-8B-OpenO1-SFT-v0.2 datasets: - O1-OPEN/OpenO1-SFT language: - en library_name: transformers license: apache-2.0 tags: - Chain-of-Thought Activation - Llama3.1 - 8B - CoT - SFT - text-generation-inference - Ollama - safetensors - Question Answering - Math quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/BlackBeenie/Llama-3.1-8B-OpenO1-SFT-v0.2 <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
null
Non_BioNLP
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/BlackBeenie/Llama-3.1-8B-OpenO1-SFT-v0.2 <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3.1-8B-OpenO1-SFT-v0.2-GGUF/resolve/main/Llama-3.1-8B-OpenO1-SFT-v0.2.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
{"base_model": "BlackBeenie/Llama-3.1-8B-OpenO1-SFT-v0.2", "datasets": ["O1-OPEN/OpenO1-SFT"], "language": ["en"], "library_name": "transformers", "license": "apache-2.0", "tags": ["Chain-of-Thought Activation", "Llama3.1", "8B", "CoT", "SFT", "text-generation-inference", "Ollama", "safetensors", "Question Answering", "Math"], "quantized_by": "mradermacher"}
task
[ "QUESTION_ANSWERING" ]
40,286
bclavie/fio-base-japanese-v0.1
bclavie
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "sentence-similarity", "transformers", "ja", "dataset:shunk031/JGLUE", "dataset:shunk031/jsnli", "dataset:hpprc/jsick", "dataset:miracl/miracl", "dataset:castorini/mr-tydi", "dataset:unicamp-dl/mmarco", "autotrain_compatible", "region:us" ]
2023-12-18T11:01:07Z
2023-12-19T10:28:16+00:00
61
7
--- datasets: - shunk031/JGLUE - shunk031/jsnli - hpprc/jsick - miracl/miracl - castorini/mr-tydi - unicamp-dl/mmarco language: - ja library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers inference: false --- # fio-base-japanese-v0.1 日本語版は近日公開予定です(日本語を勉強中なので、間違いはご容赦ください!) fio-base-japanese-v0.1 is a proof of concept, and the first release of the Fio family of Japanese embeddings. It is based on [cl-tohoku/bert-base-japanese-v3](https://huggingface.co/cl-tohoku/bert-base-japanese-v3) and trained on limited volumes of data on a single GPU. For more information, please refer to [my notes on Fio](https://ben.clavie.eu/fio). #### Datasets Similarity/Entailment: - JSTS (train) - JSNLI (train) - JNLI (train) - JSICK (train) Retrieval: - MMARCO (Multilingual Marco) (train, 124k sentence pairs, <1% of the full data) - Mr.TyDI (train) - MIRACL (train, 50% sample) - ~~JSQuAD (train, 50% sample, no LLM enhancement)~~ JSQuAD is not used in the released version, to serve as an unseen test set. #### Results > ⚠️ WARNING: fio-base-japanese-v0.1 has seen textual entailment tasks during its training, which is _not_ the case of the other other japanese-only models in this table. This gives Fio an unfair advantage over the previous best results, `cl-nagoya/sup-simcse-ja-[base|large]`. During mid-training evaluations, this didn't seem to greatly affect performance, however, JSICK (NLI set) was included in the training data, and therefore it's impossible to fully remove this contamination at the moment. I intend to fix this in future release, but please keep this in mind as you view the results (see JSQuAD results on the associated blog post for a fully unseen comparison, although focused on retrieval). This is adapted and truncated (to keep only the most popular models) from [oshizo's benchmarking github repo](https://github.com/oshizo/JapaneseEmbeddingEval), please check it out for more information and give it a star as it was very useful! Italic denotes best model for its size when a smaller model outperforms a bigger one (base/large | 768/1024), bold denotes best overall. | Model | JSTS valid-v1.1 | JSICK test | MIRACL dev | Average | |-------------------------------------------------|-----------------|------------|------------|---------| | bclavie/fio-base-japanese-v0.1 | **_0.863_** | **_0.894_** | 0.718 | _0.825_ | | cl-nagoya/sup-simcse-ja-base | 0.809 | 0.827 | 0.527 | 0.721 | | cl-nagoya/sup-simcse-ja-large | _0.831_ | _0.831_ | 0.507 | 0.723 | | colorfulscoop/sbert-base-ja | 0.742 | 0.657 | 0.254 | 0.551 | | intfloat/multilingual-e5-base | 0.796 | 0.806 | __0.845__ | 0.816 | | intfloat/multilingual-e5-large | 0.819 | 0.794 | **0.883** | **_0.832_** | | pkshatech/GLuCoSE-base-ja | 0.818 | 0.757 | 0.692 | 0.755 | | text-embedding-ada-002 | 0.790 | 0.789 | 0.7232 | 0.768 | ## Usage This model requires both `fugashi` and `unidic-lite`: ``` pip install -U fugashi unidic-lite ``` If using for a retrieval task, you must prefix your query with `"関連記事を取得するために使用できるこの文の表現を生成します: "`. ### Usage (Sentence-Transformers) This model is best used through [sentence-transformers](https://www.SBERT.net). If you don't have it, it's easy to install: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["こんにちは、世界!", "文埋め込み最高!文埋め込み最高と叫びなさい", "極度乾燥しなさい"] model = SentenceTransformer('bclavie/fio-base-japanese-v0.1') embeddings = model.encode(sentences) print(embeddings) ``` ### Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Citing & Authors ```@misc{ bclavie-fio-embeddings, author = {Benjamin Clavié}, title = {Fio Japanese Embeddings}, year = {2023}, howpublished = {\url{https://ben.clavie.eu/fio}} }```
null
Non_BioNLP
# fio-base-japanese-v0.1 日本語版は近日公開予定です(日本語を勉強中なので、間違いはご容赦ください!) fio-base-japanese-v0.1 is a proof of concept, and the first release of the Fio family of Japanese embeddings. It is based on [cl-tohoku/bert-base-japanese-v3](https://huggingface.co/cl-tohoku/bert-base-japanese-v3) and trained on limited volumes of data on a single GPU. For more information, please refer to [my notes on Fio](https://ben.clavie.eu/fio). #### Datasets Similarity/Entailment: - JSTS (train) - JSNLI (train) - JNLI (train) - JSICK (train) Retrieval: - MMARCO (Multilingual Marco) (train, 124k sentence pairs, <1% of the full data) - Mr.TyDI (train) - MIRACL (train, 50% sample) - ~~JSQuAD (train, 50% sample, no LLM enhancement)~~ JSQuAD is not used in the released version, to serve as an unseen test set. #### Results > ⚠️ WARNING: fio-base-japanese-v0.1 has seen textual entailment tasks during its training, which is _not_ the case of the other other japanese-only models in this table. This gives Fio an unfair advantage over the previous best results, `cl-nagoya/sup-simcse-ja-[base|large]`. During mid-training evaluations, this didn't seem to greatly affect performance, however, JSICK (NLI set) was included in the training data, and therefore it's impossible to fully remove this contamination at the moment. I intend to fix this in future release, but please keep this in mind as you view the results (see JSQuAD results on the associated blog post for a fully unseen comparison, although focused on retrieval). This is adapted and truncated (to keep only the most popular models) from [oshizo's benchmarking github repo](https://github.com/oshizo/JapaneseEmbeddingEval), please check it out for more information and give it a star as it was very useful! Italic denotes best model for its size when a smaller model outperforms a bigger one (base/large | 768/1024), bold denotes best overall. | Model | JSTS valid-v1.1 | JSICK test | MIRACL dev | Average | |-------------------------------------------------|-----------------|------------|------------|---------| | bclavie/fio-base-japanese-v0.1 | **_0.863_** | **_0.894_** | 0.718 | _0.825_ | | cl-nagoya/sup-simcse-ja-base | 0.809 | 0.827 | 0.527 | 0.721 | | cl-nagoya/sup-simcse-ja-large | _0.831_ | _0.831_ | 0.507 | 0.723 | | colorfulscoop/sbert-base-ja | 0.742 | 0.657 | 0.254 | 0.551 | | intfloat/multilingual-e5-base | 0.796 | 0.806 | __0.845__ | 0.816 | | intfloat/multilingual-e5-large | 0.819 | 0.794 | **0.883** | **_0.832_** | | pkshatech/GLuCoSE-base-ja | 0.818 | 0.757 | 0.692 | 0.755 | | text-embedding-ada-002 | 0.790 | 0.789 | 0.7232 | 0.768 | ## Usage This model requires both `fugashi` and `unidic-lite`: ``` pip install -U fugashi unidic-lite ``` If using for a retrieval task, you must prefix your query with `"関連記事を取得するために使用できるこの文の表現を生成します: "`. ### Usage (Sentence-Transformers) This model is best used through [sentence-transformers](https://www.SBERT.net). If you don't have it, it's easy to install: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["こんにちは、世界!", "文埋め込み最高!文埋め込み最高と叫びなさい", "極度乾燥しなさい"] model = SentenceTransformer('bclavie/fio-base-japanese-v0.1') embeddings = model.encode(sentences) print(embeddings) ``` ### Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Citing & Authors ```@misc{ bclavie-fio-embeddings, author = {Benjamin Clavié}, title = {Fio Japanese Embeddings}, year = {2023}, howpublished = {\url{https://ben.clavie.eu/fio}} }```
{"datasets": ["shunk031/JGLUE", "shunk031/jsnli", "hpprc/jsick", "miracl/miracl", "castorini/mr-tydi", "unicamp-dl/mmarco"], "language": ["ja"], "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "feature-extraction", "sentence-similarity", "transformers"], "inference": false}
task
[ "TEXTUAL_ENTAILMENT" ]
40,287
SEBIS/code_trans_t5_large_commit_generation_transfer_learning_finetune
SEBIS
summarization
[ "transformers", "pytorch", "jax", "t5", "feature-extraction", "summarization", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2021-06-23T08:34:21+00:00
12
0
--- tags: - summarization widget: - text: new file mode 100644 index 000000000 . . 892fda21b Binary files / dev / null and b / src / plugins / gateway / lib / joscar . jar differ --- # CodeTrans model for git commit message generation Pretrained model on git commit using the t5 large model architecture. It was first released in [this repository](https://github.com/agemagician/CodeTrans). This model is trained on tokenized git commit: it works best with tokenized git commit. ## Model description This CodeTrans model is based on the `t5-large` model. It has its own SentencePiece vocabulary model. It used transfer-learning pre-training on 7 unsupervised datasets in the software development domain. It is then fine-tuned on the git commit message generation task for the java commit changes. ## Intended uses & limitations The model could be used to generate the git commit message for the git commit changes or be fine-tuned on other relevant tasks. It can be used on unparsed and untokenized commit changes. However, if the change is tokenized, the performance should be better. ### How to use Here is how to use this model to generate git commit message using Transformers SummarizationPipeline: ```python from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline pipeline = SummarizationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_large_commit_generation_transfer_learning_finetune"), tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_large_commit_generation_transfer_learning_finetune", skip_special_tokens=True), device=0 ) tokenized_code = "new file mode 100644 index 000000000 . . 892fda21b Binary files / dev / null and b / src / plugins / gateway / lib / joscar . jar differ" pipeline([tokenized_code]) ``` Run this example in [colab notebook](https://github.com/agemagician/CodeTrans/blob/main/prediction/transfer%20learning%20fine-tuning/commit%20generation/large_model.ipynb). ## Training data The supervised training tasks datasets can be downloaded on [Link](https://www.dropbox.com/sh/488bq2of10r4wvw/AACs5CGIQuwtsD7j_Ls_JAORa/finetuning_dataset?dl=0&subfolder_nav_tracking=1) ## Training procedure ### Transfer-learning Pretraining The model was trained on a single TPU Pod V3-8 for 240,000 steps in total, using sequence length 512 (batch size 4096). It has a total of approximately 220M parameters and was trained using the encoder-decoder architecture. The optimizer used is AdaFactor with inverse square root learning rate schedule for pre-training. ### Fine-tuning This model was then fine-tuned on a single TPU Pod V2-8 for 4,000 steps in total, using sequence length 512 (batch size 256), using only the dataset only containing commit changes. ## Evaluation results For the git commit message generation task, different models achieves the following results on different programming languages (in BLEU score): Test results : | Language / Model | Java | | -------------------- | :------------: | | CodeTrans-ST-Small | 39.61 | | CodeTrans-ST-Base | 38.67 | | CodeTrans-TF-Small | 44.22 | | CodeTrans-TF-Base | 44.17 | | CodeTrans-TF-Large | **44.41** | | CodeTrans-MT-Small | 36.17 | | CodeTrans-MT-Base | 39.25 | | CodeTrans-MT-Large | 41.18 | | CodeTrans-MT-TF-Small | 43.96 | | CodeTrans-MT-TF-Base | 44.19 | | CodeTrans-MT-TF-Large | 44.34 | | State of the art | 32.81 | > Created by [Ahmed Elnaggar](https://twitter.com/Elnaggar_AI) | [LinkedIn](https://www.linkedin.com/in/prof-ahmed-elnaggar/) and Wei Ding | [LinkedIn](https://www.linkedin.com/in/wei-ding-92561270/)
null
Non_BioNLP
# CodeTrans model for git commit message generation Pretrained model on git commit using the t5 large model architecture. It was first released in [this repository](https://github.com/agemagician/CodeTrans). This model is trained on tokenized git commit: it works best with tokenized git commit. ## Model description This CodeTrans model is based on the `t5-large` model. It has its own SentencePiece vocabulary model. It used transfer-learning pre-training on 7 unsupervised datasets in the software development domain. It is then fine-tuned on the git commit message generation task for the java commit changes. ## Intended uses & limitations The model could be used to generate the git commit message for the git commit changes or be fine-tuned on other relevant tasks. It can be used on unparsed and untokenized commit changes. However, if the change is tokenized, the performance should be better. ### How to use Here is how to use this model to generate git commit message using Transformers SummarizationPipeline: ```python from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline pipeline = SummarizationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_large_commit_generation_transfer_learning_finetune"), tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_large_commit_generation_transfer_learning_finetune", skip_special_tokens=True), device=0 ) tokenized_code = "new file mode 100644 index 000000000 . . 892fda21b Binary files / dev / null and b / src / plugins / gateway / lib / joscar . jar differ" pipeline([tokenized_code]) ``` Run this example in [colab notebook](https://github.com/agemagician/CodeTrans/blob/main/prediction/transfer%20learning%20fine-tuning/commit%20generation/large_model.ipynb). ## Training data The supervised training tasks datasets can be downloaded on [Link](https://www.dropbox.com/sh/488bq2of10r4wvw/AACs5CGIQuwtsD7j_Ls_JAORa/finetuning_dataset?dl=0&subfolder_nav_tracking=1) ## Training procedure ### Transfer-learning Pretraining The model was trained on a single TPU Pod V3-8 for 240,000 steps in total, using sequence length 512 (batch size 4096). It has a total of approximately 220M parameters and was trained using the encoder-decoder architecture. The optimizer used is AdaFactor with inverse square root learning rate schedule for pre-training. ### Fine-tuning This model was then fine-tuned on a single TPU Pod V2-8 for 4,000 steps in total, using sequence length 512 (batch size 256), using only the dataset only containing commit changes. ## Evaluation results For the git commit message generation task, different models achieves the following results on different programming languages (in BLEU score): Test results : | Language / Model | Java | | -------------------- | :------------: | | CodeTrans-ST-Small | 39.61 | | CodeTrans-ST-Base | 38.67 | | CodeTrans-TF-Small | 44.22 | | CodeTrans-TF-Base | 44.17 | | CodeTrans-TF-Large | **44.41** | | CodeTrans-MT-Small | 36.17 | | CodeTrans-MT-Base | 39.25 | | CodeTrans-MT-Large | 41.18 | | CodeTrans-MT-TF-Small | 43.96 | | CodeTrans-MT-TF-Base | 44.19 | | CodeTrans-MT-TF-Large | 44.34 | | State of the art | 32.81 | > Created by [Ahmed Elnaggar](https://twitter.com/Elnaggar_AI) | [LinkedIn](https://www.linkedin.com/in/prof-ahmed-elnaggar/) and Wei Ding | [LinkedIn](https://www.linkedin.com/in/wei-ding-92561270/)
{"tags": ["summarization"], "widget": [{"text": "new file mode 100644 index 000000000 . . 892fda21b Binary files / dev / null and b / src / plugins / gateway / lib / joscar . jar differ"}]}
task
[ "SUMMARIZATION" ]
40,288
Andrew82106/ChatGLMWithSynonymousParaphrasing
Andrew82106
null
[ "repeat", "expansion", "zh", "region:us" ]
2023-08-01T07:19:18Z
2023-08-04T03:13:18+00:00
0
0
--- language: - zh tags: - repeat - expansion --- # 同义转述chatGLM的微调模型文件 [github repo link](https://github.com/Andrew82106/SynonymousParaphrasingChatGLM)
null
Non_BioNLP
# 同义转述chatGLM的微调模型文件 [github repo link](https://github.com/Andrew82106/SynonymousParaphrasingChatGLM)
{"language": ["zh"], "tags": ["repeat", "expansion"]}
task
[ "PARAPHRASING" ]
40,289
Tural/out-glue-sst2
Tural
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "base_model:google-bert/bert-base-uncased", "base_model:finetune:google-bert/bert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-12T02:43:54Z
2023-10-12T03:11:53+00:00
7
0
--- base_model: bert-base-uncased datasets: - glue language: - en license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: out-glue-sst2 results: - task: type: text-classification name: Text Classification dataset: name: GLUE SST2 type: glue config: sst2 split: validation args: sst2 metrics: - type: accuracy value: 0.9243119266055045 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # out-glue-sst2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE SST2 dataset. It achieves the following results on the evaluation set: - Loss: 0.3727 - Accuracy: 0.9243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 192 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.34.0 - Pytorch 2.0.0 - Datasets 2.14.5 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # out-glue-sst2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE SST2 dataset. It achieves the following results on the evaluation set: - Loss: 0.3727 - Accuracy: 0.9243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 192 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.34.0 - Pytorch 2.0.0 - Datasets 2.14.5 - Tokenizers 0.14.1
{"base_model": "bert-base-uncased", "datasets": ["glue"], "language": ["en"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "out-glue-sst2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE SST2", "type": "glue", "config": "sst2", "split": "validation", "args": "sst2"}, "metrics": [{"type": "accuracy", "value": 0.9243119266055045, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,290
YakovElm/Apache5SetFitModel_balance_ratio_3
YakovElm
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-05-31T21:28:10Z
2023-05-31T21:28:45+00:00
8
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # YakovElm/Apache5SetFitModel_balance_ratio_3 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/Apache5SetFitModel_balance_ratio_3") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# YakovElm/Apache5SetFitModel_balance_ratio_3 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/Apache5SetFitModel_balance_ratio_3") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
40,291
adamluc/testneoxt
adamluc
text-generation
[ "transformers", "pytorch", "gpt_neox", "text-generation", "en", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-03-24T13:25:36Z
2023-03-24T13:25:36+00:00
9
0
--- language: - en license: apache-2.0 duplicated_from: togethercomputer/GPT-NeoXT-Chat-Base-20B --- ***<p style="font-size: 24px">Feel free to try out our [OpenChatKit feedback app](https://huggingface.co/spaces/togethercomputer/OpenChatKit)!</p>*** # GPT-NeoXT-Chat-Base-20B > TLDR: As part of OpenChatKit (codebase available [here](https://github.com/togethercomputer/OpenChaT)), > GPT-NeoXT-Chat-Base-20B is a 20B parameter language model, fine-tuned from EleutherAI’s GPT-NeoX with over 40 million instructions on 100% carbon negative compute. GPT-NeoXT-Chat-Base-20B is based on ElutherAI’s GPT-NeoX model, and is fine-tuned with data focusing on dialog-style interactions. We focused the tuning on several tasks such as question answering, classification, extraction, and summarization. We’ve fine-tuned the model with a collection of 43 million high-quality instructions. Together partnered with LAION and Ontocord.ai, who both helped curate the dataset the model is based on. You can read more about this process and the availability of this dataset in LAION’s blog post [here](https://laion.ai/blog/oig-dataset/). ## Model Details - **Developed by**: Together Computer. - **Model type**: Language Model - **Language(s)**: English - **License**: Apache 2.0 - **Model Description**: A 20B parameter open source chat model, fine-tuned from EleutherAI’s NeoX with over 40M instructions on 100% carbon negative compute - **Resources for more information**: [GitHub Repository](https://github.com/togethercomputer/OpenChaT). # Quick Start ```python from transformers import pipeline pipe = pipeline(model='togethercomputer/GPT-NeoXT-Chat-Base-20B') pipe('''<human>: Hello!\n<bot>:''') ``` or ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B") model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B") ``` ## Strengths of the model There are several tasks that OpenChatKit excels at out of the box. This includes: - Example 1: Summarization and question answering within context. ```markdown **Summarize a long document into a single sentence and conduct question answering related to the document, with multiple rounds** <human>: Last year, the travel industry saw a big rebound in demand — and that demand is showing no signs of slowing down this spring break travel season. Planes and hotels will be full, travelers will likely face long queues, cancellations, massive crowds and plenty of other travel nightmares. But perhaps the most frustrating thing you’ll have to prepare for this spring break is if your luggage goes missing. You’ve got to somehow track down your belongings that were in the airline’s care and then wait to be reunited — all while trying to enjoy the start of your long-awaited vacation. As far as travel nightmares go, lost luggage is up there as one of the worst. To be honest up front, I am great at strategically losing things — or rather, losing stuff with style. (I hope you appreciate the “Toy Story” reference.) So when a product comes along that’s a perfect way to keep track of my belongings — especially when traveling — I’m adding it to my travel bag. I’ve been a big fan of Apple’s $29 item tracker, dubbed AirTag. The long-rumored AirTag product finally launched in 2021, and while it’s not the first item tracker to exist, it has two key features that set it apart from the rest to make it the ultimate travel companion: access to the Find My network and Precision Finding technology. The Federal Aviation Administration has even ruled that having an AirTag in your luggage is permitted and, in fact, not a safety hazard. The devices only contain 0.1 grams of lithium, which falls below the FAA’s limit. “Luggage tracking devices powered by lithium metal cells that have 0.3 grams or less of lithium can be used on checked baggage,” the FAA said in a statement. “Apple AirTags meet this threshold; other luggage tracking devices may not.” But, you may be wondering what makes them so special and why you should carry one in your checked luggage this spring break? Let’s explain why. Summarize the above into a single paragraph. <bot>: If you're traveling this spring break, you might want to consider investing in an item tracker to help you find your lost luggage. Apple's AirTag is the best option, according to a new report. <human>: But isn't it unsafe to put AirTag in a luggage? <bot>: The Federal Aviation Administration has ruled that having an AirTag in your luggage is permitted and, in fact, not a safety hazard. <human>: Ah, why is that? <bot>: The devices only contain 0.1 grams of lithium, which falls below the FAA’s limit. ``` - Example 2: Extraction. ```markdown **Extract structured information from unstructured documents, as illustrated in the following example.** <human>: Generate a table summarizing the options outlined in this email. Team, we have to make a decision on the product plan for Goocrux v2. The first option will take us 3 weeks of development, and will achieve performance of 80ms, but means that the UI has to change. The second option will take us 4 weeks of development, and will achieve performance of 60ms, without any UI change. The third option will take the longest -- 6 weeks of development, will achieve performance of 30ms, without any UI change. A markdown table with four columns: (1) Option ID, (2) Development time, (3) Performance, (4) UI Change: <bot>: #. | ID | Option ID | Development time | Performance | UI Change | | --------- | --------- | --------- | --------- | --------- | | 1 | 1 | 3 weeks | 80ms | Yes | | 2 | 2 | 4 weeks | 60ms | No | | 3 | 3 | 6 weeks | 30ms | No | ``` - Example 3: Classification. ```markdown **Classifying a sentence or paragraph into different categories.** <human>: Classify the sentiment of the following sentence into Positive, Neutral, or Negative: Super excited about teaching Stanford’s first course on Large Language Models! Check the syllabus out here <bot>: Positive <human>: Classify the sentiment of the following sentence into Positive, Neutral, or Negative: How about the following sentence: It is raining outside and I feel so blue <bot>: Negative ``` In addition, the model does well on few-shot prompts. For both classification and extraction, the model performs even better with few shots, as in most HELM tasks. [Contact us](https://www.together.xyz/contact) if you’re interested in trying few-shot prompts with the model. ## Weaknesses of the model That said, there are several areas where we have more work to do, and we need your help! Some of these include: - Knowledge-based closed question and answering: The chatbot may hallucinate and give incorrect results. Be sure to fact check, and if possible provide feedback with the corrected information. - Coding tasks: The chatbot was not trained on a large enough corpus of source code to excel at writing code. We welcome contributions of additional datasets to improve this! - Repetition: Sometimes the chatbot will repeat its response. We’re working to improve this, but in the meantime you can click the refresh button to start a new conversation. - Context switching: If you change the topic in the middle of a conversation the chatbot often cannot make the switch automatically and will continue to give answers related to the prior topic. - Creative writing and longer answers: The chatbot does not generate long, creative text such as an essay or story. We are excited to work with you to address these weaknesses by getting your feedback, bolstering data sets, and improving accuracy. # Uses ## Direct Use The model is intended for research purposes. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of dialogue models or language models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on dialogue models or language models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use The OpenChatKit community provides GPT-NeoXT-Chat-Base-20B as an open source tool for building chatbots. The community is not responsible for any misuse, malicious use, or out-of-scope use of the model. It is the responsibility of the end user to ensure that the model is used in a responsible and ethical manner. #### Out-of-Scope Use GPT-NeoXT-Chat-Base-20B is designed for use in chatbot applications and may not perform well for other use cases outside of its intended scope. For example, it may not be suitable for use in safety-critical applications or for making decisions that have a significant impact on individuals or society. It is important to consider the limitations of the model and to only use it for its intended purpose. #### Misuse and Malicious Use GPT-NeoXT-Chat-Base-20B is designed for use in chatbot applications and should not be used for any other purpose. Misuse of the model, such as using it to engage in illegal or unethical activities, is strictly prohibited and goes against the principles of the OpenChatKit community project. Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating fake news, misinformation, or propaganda - Promoting hate speech, discrimination, or violence against individuals or groups - Impersonating individuals or organizations without their consent - Engaging in cyberbullying or harassment - Defamatory content - Spamming or scamming - Sharing confidential or sensitive information without proper authorization - Violating the terms of use of the model or the data used to train it - Creating automated bots for malicious purposes such as spreading malware, phishing scams, or spamming ## Limitations GPT-NeoXT-Chat-Base-20B, like other language model-based chatbots, has limitations that should be taken into consideration. For example, the model may not always provide accurate or relevant answers, particularly for questions that are complex, ambiguous, or outside of its training data. We therefore welcome contributions from individuals and organizations, and encourage collaboration towards creating a more robust and inclusive chatbot. ## Training **Training Data** Please refer to [togethercomputer/OpenDataHub](https://github.com/togethercomputer/OpenDataHub) **Training Procedure** - **Hardware:** 2 x 8 x A100 GPUs - **Optimizer:** [8bit-AdamW](https://github.com/TimDettmers/bitsandbytes) - **Gradient Accumulations**: 2 - **Batch:** 2 x 2 x 64 x 2048 = 524288 tokens - **Learning rate:** warmup to 1e-6 for 100 steps and then kept constant ## Community Join us on [Together Discord](https://discord.gg/6ZVDU8tTD4)
null
Non_BioNLP
***<p style="font-size: 24px">Feel free to try out our [OpenChatKit feedback app](https://huggingface.co/spaces/togethercomputer/OpenChatKit)!</p>*** # GPT-NeoXT-Chat-Base-20B > TLDR: As part of OpenChatKit (codebase available [here](https://github.com/togethercomputer/OpenChaT)), > GPT-NeoXT-Chat-Base-20B is a 20B parameter language model, fine-tuned from EleutherAI’s GPT-NeoX with over 40 million instructions on 100% carbon negative compute. GPT-NeoXT-Chat-Base-20B is based on ElutherAI’s GPT-NeoX model, and is fine-tuned with data focusing on dialog-style interactions. We focused the tuning on several tasks such as question answering, classification, extraction, and summarization. We’ve fine-tuned the model with a collection of 43 million high-quality instructions. Together partnered with LAION and Ontocord.ai, who both helped curate the dataset the model is based on. You can read more about this process and the availability of this dataset in LAION’s blog post [here](https://laion.ai/blog/oig-dataset/). ## Model Details - **Developed by**: Together Computer. - **Model type**: Language Model - **Language(s)**: English - **License**: Apache 2.0 - **Model Description**: A 20B parameter open source chat model, fine-tuned from EleutherAI’s NeoX with over 40M instructions on 100% carbon negative compute - **Resources for more information**: [GitHub Repository](https://github.com/togethercomputer/OpenChaT). # Quick Start ```python from transformers import pipeline pipe = pipeline(model='togethercomputer/GPT-NeoXT-Chat-Base-20B') pipe('''<human>: Hello!\n<bot>:''') ``` or ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B") model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B") ``` ## Strengths of the model There are several tasks that OpenChatKit excels at out of the box. This includes: - Example 1: Summarization and question answering within context. ```markdown **Summarize a long document into a single sentence and conduct question answering related to the document, with multiple rounds** <human>: Last year, the travel industry saw a big rebound in demand — and that demand is showing no signs of slowing down this spring break travel season. Planes and hotels will be full, travelers will likely face long queues, cancellations, massive crowds and plenty of other travel nightmares. But perhaps the most frustrating thing you’ll have to prepare for this spring break is if your luggage goes missing. You’ve got to somehow track down your belongings that were in the airline’s care and then wait to be reunited — all while trying to enjoy the start of your long-awaited vacation. As far as travel nightmares go, lost luggage is up there as one of the worst. To be honest up front, I am great at strategically losing things — or rather, losing stuff with style. (I hope you appreciate the “Toy Story” reference.) So when a product comes along that’s a perfect way to keep track of my belongings — especially when traveling — I’m adding it to my travel bag. I’ve been a big fan of Apple’s $29 item tracker, dubbed AirTag. The long-rumored AirTag product finally launched in 2021, and while it’s not the first item tracker to exist, it has two key features that set it apart from the rest to make it the ultimate travel companion: access to the Find My network and Precision Finding technology. The Federal Aviation Administration has even ruled that having an AirTag in your luggage is permitted and, in fact, not a safety hazard. The devices only contain 0.1 grams of lithium, which falls below the FAA’s limit. “Luggage tracking devices powered by lithium metal cells that have 0.3 grams or less of lithium can be used on checked baggage,” the FAA said in a statement. “Apple AirTags meet this threshold; other luggage tracking devices may not.” But, you may be wondering what makes them so special and why you should carry one in your checked luggage this spring break? Let’s explain why. Summarize the above into a single paragraph. <bot>: If you're traveling this spring break, you might want to consider investing in an item tracker to help you find your lost luggage. Apple's AirTag is the best option, according to a new report. <human>: But isn't it unsafe to put AirTag in a luggage? <bot>: The Federal Aviation Administration has ruled that having an AirTag in your luggage is permitted and, in fact, not a safety hazard. <human>: Ah, why is that? <bot>: The devices only contain 0.1 grams of lithium, which falls below the FAA’s limit. ``` - Example 2: Extraction. ```markdown **Extract structured information from unstructured documents, as illustrated in the following example.** <human>: Generate a table summarizing the options outlined in this email. Team, we have to make a decision on the product plan for Goocrux v2. The first option will take us 3 weeks of development, and will achieve performance of 80ms, but means that the UI has to change. The second option will take us 4 weeks of development, and will achieve performance of 60ms, without any UI change. The third option will take the longest -- 6 weeks of development, will achieve performance of 30ms, without any UI change. A markdown table with four columns: (1) Option ID, (2) Development time, (3) Performance, (4) UI Change: <bot>: #. | ID | Option ID | Development time | Performance | UI Change | | --------- | --------- | --------- | --------- | --------- | | 1 | 1 | 3 weeks | 80ms | Yes | | 2 | 2 | 4 weeks | 60ms | No | | 3 | 3 | 6 weeks | 30ms | No | ``` - Example 3: Classification. ```markdown **Classifying a sentence or paragraph into different categories.** <human>: Classify the sentiment of the following sentence into Positive, Neutral, or Negative: Super excited about teaching Stanford’s first course on Large Language Models! Check the syllabus out here <bot>: Positive <human>: Classify the sentiment of the following sentence into Positive, Neutral, or Negative: How about the following sentence: It is raining outside and I feel so blue <bot>: Negative ``` In addition, the model does well on few-shot prompts. For both classification and extraction, the model performs even better with few shots, as in most HELM tasks. [Contact us](https://www.together.xyz/contact) if you’re interested in trying few-shot prompts with the model. ## Weaknesses of the model That said, there are several areas where we have more work to do, and we need your help! Some of these include: - Knowledge-based closed question and answering: The chatbot may hallucinate and give incorrect results. Be sure to fact check, and if possible provide feedback with the corrected information. - Coding tasks: The chatbot was not trained on a large enough corpus of source code to excel at writing code. We welcome contributions of additional datasets to improve this! - Repetition: Sometimes the chatbot will repeat its response. We’re working to improve this, but in the meantime you can click the refresh button to start a new conversation. - Context switching: If you change the topic in the middle of a conversation the chatbot often cannot make the switch automatically and will continue to give answers related to the prior topic. - Creative writing and longer answers: The chatbot does not generate long, creative text such as an essay or story. We are excited to work with you to address these weaknesses by getting your feedback, bolstering data sets, and improving accuracy. # Uses ## Direct Use The model is intended for research purposes. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of dialogue models or language models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on dialogue models or language models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use The OpenChatKit community provides GPT-NeoXT-Chat-Base-20B as an open source tool for building chatbots. The community is not responsible for any misuse, malicious use, or out-of-scope use of the model. It is the responsibility of the end user to ensure that the model is used in a responsible and ethical manner. #### Out-of-Scope Use GPT-NeoXT-Chat-Base-20B is designed for use in chatbot applications and may not perform well for other use cases outside of its intended scope. For example, it may not be suitable for use in safety-critical applications or for making decisions that have a significant impact on individuals or society. It is important to consider the limitations of the model and to only use it for its intended purpose. #### Misuse and Malicious Use GPT-NeoXT-Chat-Base-20B is designed for use in chatbot applications and should not be used for any other purpose. Misuse of the model, such as using it to engage in illegal or unethical activities, is strictly prohibited and goes against the principles of the OpenChatKit community project. Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating fake news, misinformation, or propaganda - Promoting hate speech, discrimination, or violence against individuals or groups - Impersonating individuals or organizations without their consent - Engaging in cyberbullying or harassment - Defamatory content - Spamming or scamming - Sharing confidential or sensitive information without proper authorization - Violating the terms of use of the model or the data used to train it - Creating automated bots for malicious purposes such as spreading malware, phishing scams, or spamming ## Limitations GPT-NeoXT-Chat-Base-20B, like other language model-based chatbots, has limitations that should be taken into consideration. For example, the model may not always provide accurate or relevant answers, particularly for questions that are complex, ambiguous, or outside of its training data. We therefore welcome contributions from individuals and organizations, and encourage collaboration towards creating a more robust and inclusive chatbot. ## Training **Training Data** Please refer to [togethercomputer/OpenDataHub](https://github.com/togethercomputer/OpenDataHub) **Training Procedure** - **Hardware:** 2 x 8 x A100 GPUs - **Optimizer:** [8bit-AdamW](https://github.com/TimDettmers/bitsandbytes) - **Gradient Accumulations**: 2 - **Batch:** 2 x 2 x 64 x 2048 = 524288 tokens - **Learning rate:** warmup to 1e-6 for 100 steps and then kept constant ## Community Join us on [Together Discord](https://discord.gg/6ZVDU8tTD4)
{"language": ["en"], "license": "apache-2.0", "duplicated_from": "togethercomputer/GPT-NeoXT-Chat-Base-20B"}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
40,292
Helsinki-NLP/opus-mt-es-ty
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "es", "ty", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T11:33:44+00:00
38
0
--- license: apache-2.0 tags: - translation --- ### opus-mt-es-ty * source languages: es * target languages: ty * OPUS readme: [es-ty](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/es-ty/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/es-ty/opus-2020-01-16.zip) * test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/es-ty/opus-2020-01-16.test.txt) * test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/es-ty/opus-2020-01-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.es.ty | 37.3 | 0.544 |
null
Non_BioNLP
### opus-mt-es-ty * source languages: es * target languages: ty * OPUS readme: [es-ty](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/es-ty/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/es-ty/opus-2020-01-16.zip) * test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/es-ty/opus-2020-01-16.test.txt) * test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/es-ty/opus-2020-01-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.es.ty | 37.3 | 0.544 |
{"license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
40,293
allegro/BiDi-eng-slk
allegro
translation
[ "transformers", "safetensors", "marian", "text2text-generation", "translation", "mt", "pytorch", "sentence-piece", "multilingual", "allegro", "laniqo", "cs", "pl", "sk", "sl", "en", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-01-28T12:47:38Z
2025-02-10T23:43:59+00:00
24
0
--- language: - cs - pl - sk - sl - en library_name: transformers license: cc-by-4.0 tags: - translation - mt - marian - pytorch - sentence-piece - multilingual - allegro - laniqo --- # MultiSlav BiDi Models <p align="center"> <a href="https://ml.allegro.tech/"><img src="allegro-title.svg" alt="MLR @ Allegro.com"></a> </p> ## Multilingual BiDi MT Models ___BiDi___ is a collection of Encoder-Decoder vanilla transformer models trained on sentence-level Machine Translation task. Each model is supporting Bi-Directional translation. ___BiDi___ models are part of the [___MultiSlav___ collection](https://huggingface.co/collections/allegro/multislav-6793d6b6419e5963e759a683). More information will be available soon in our upcoming MultiSlav paper. Experiments were conducted under research project by [Machine Learning Research](https://ml.allegro.tech/) lab for [Allegro.com](https://ml.allegro.tech/). Big thanks to [laniqo.com](laniqo.com) for cooperation in the research. <p align="center"> <img src="bi-di.svg"> </p> Graphic above provides an example of an BiDi model - [BiDi-ces-pol](https://huggingface.co/allegro/bidi-ces-pol) to translate from Polish to Czech language. ___BiDi-ces-pol___ is a bi-directional model supporting translation both __form Czech to Polish__ and __from Polish to Czech__ directions. ### Supported languages To use a ___BiDi___ model, you must provide the target language for translation. Target language tokens are represented as 3-letter ISO 639-3 language codes embedded in a format >>xxx<<. All accepted directions and their respective tokens are listed below. Note that, for each model only two directions are available. Each of them was added as a special token to Sentence-Piece tokenizer. | **Target Language** | **First token** | |---------------------|-----------------| | Czech | `>>ces<<` | | English | `>>eng<<` | | Polish | `>>pol<<` | | Slovak | `>>slk<<` | | Slovene | `>>slv<<` | ### Bi-Di models available We provided 10 ___BiDi___ models, allowing to translate between 20 languages. | **Bi-Di model** | **Languages supported** | **HF repository** | |-----------------|-------------------------|---------------------------------------------------------------------| | BiDi-ces-eng | Czech ↔ English | [allegro/BiDi-ces-eng](https://huggingface.co/allegro/bidi-ces-eng) | | BiDi-ces-pol | Czech ↔ Polish | [allegro/BiDi-ces-pol](https://huggingface.co/allegro/bidi-ces-pol) | | BiDi-ces-slk | Czech ↔ Slovak | [allegro/BiDi-ces-slk](https://huggingface.co/allegro/bidi-ces-slk) | | BiDi-ces-slv | Czech ↔ Slovene | [allegro/BiDi-ces-slv](https://huggingface.co/allegro/bidi-ces-slv) | | BiDi-eng-pol | English ↔ Polish | [allegro/BiDi-eng-pol](https://huggingface.co/allegro/bidi-eng-pol) | | BiDi-eng-slk | English ↔ Slovak | [allegro/BiDi-eng-slk](https://huggingface.co/allegro/bidi-eng-slk) | | BiDi-eng-slv | English ↔ Slovene | [allegro/BiDi-eng-slv](https://huggingface.co/allegro/bidi-eng-slv) | | BiDi-pol-slk | Polish ↔ Slovak | [allegro/BiDi-pol-slk](https://huggingface.co/allegro/bidi-pol-slk) | | BiDi-pol-slv | Polish ↔ Slovene | [allegro/BiDi-pol-slv](https://huggingface.co/allegro/bidi-pol-slv) | | BiDi-slk-slv | Slovak ↔ Slovene | [allegro/BiDi-slk-slv](https://huggingface.co/allegro/bidi-slk-slv) | ## Use case quickstart Example code-snippet to use model. Due to bug the `MarianMTModel` must be used explicitly. Remember to adjust source and target languages to your use-case. ```python from transformers import AutoTokenizer, MarianMTModel source_lang = "pol" target_lang = "ces" first_lang, second_lang = sorted([source_lang, target_lang]) model_name = f"Allegro/BiDi-{first_lang}-{second_lang}" tokenizer = AutoTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) text = f">>{target_lang}<<" + " " + "Allegro to internetowa platforma e-commerce, na której swoje produkty sprzedają średnie i małe firmy, jak również duże marki." batch_to_translate = [text] translations = model.generate(**tokenizer.batch_encode_plus(batch_to_translate, return_tensors="pt")) decoded_translation = tokenizer.batch_decode(translations, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0] print(decoded_translation) ``` Generated Czech output: > Allegro je online e-commerce platforma, na které své výrobky prodávají střední a malé firmy, stejně jako velké značky. ## Training [SentencePiece](https://github.com/google/sentencepiece) tokenizer has a vocab size 32k in total (16k per language). Tokenizer was trained on randomly sampled part of the training corpus. During the training we used the [MarianNMT](https://marian-nmt.github.io/) framework. Base marian configuration used: [transfromer-big](https://github.com/marian-nmt/marian-dev/blob/master/src/common/aliases.cpp#L113). All training parameters are listed in table below. ### Training hyperparameters: | **Hyperparameter** | **Value** | |----------------------------|------------------------------------------------------------------------------------------------------------| | Total Parameter Size | 209M | | Vocab Size | 32k | | Base Parameters | [Marian transfromer-big](https://github.com/marian-nmt/marian-dev/blob/master/src/common/aliases.cpp#L113) | | Number of Encoding Layers | 6 | | Number of Decoding Layers | 6 | | Model Dimension | 1024 | | FF Dimension | 4096 | | Heads | 16 | | Dropout | 0.1 | | Batch Size | mini batch fit to VRAM | | Training Accelerators | 4x A100 40GB | | Max Length | 100 tokens | | Optimizer | Adam | | Warmup steps | 8000 | | Context | Sentence-level MT | | Languages Supported | See [Bi-Di models available](#Bi-Di-models-available) | | Precision | float16 | | Validation Freq | 3000 steps | | Stop Metric | ChrF | | Stop Criterion | 20 Validation steps | ## Training corpora The main research question was: "How does adding additional, related languages impact the quality of the model?" - we explored it in the Slavic language family. ___BiDi___ models are our baseline before expanding the data-regime by using higher-level multilinguality. Datasets were downloaded via [MT-Data](https://pypi.org/project/mtdata/0.2.10/) library. The number of total examples post filtering and deduplication varies, depending on languages supported, see the table below. | **Language pair** | **Number of training examples** | |-------------------|--------------------------------:| | Czech ↔ Polish | 63M | | Czech ↔ Slovak | 30M | | Czech ↔ Slovene | 25M | | Polish ↔ Slovak | 26M | | Polish ↔ Slovene | 23M | | Slovak ↔ Slovene | 18M | | ---------------- | ------------------------------- | | Czech ↔ English | 151M | | English ↔ Polish | 150M | | English ↔ Slovak | 52M | | English ↔ Slovene | 40M | The datasets used (only applicable to specific directions): | **Corpus** | |----------------------| | paracrawl | | opensubtitles | | multiparacrawl | | dgt | | elrc | | xlent | | wikititles | | wmt | | wikimatrix | | dcep | | ELRC | | tildemodel | | europarl | | eesc | | eubookshop | | emea | | jrc_acquis | | ema | | qed | | elitr_eca | | EU-dcep | | rapid | | ecb | | kde4 | | news_commentary | | kde | | bible_uedin | | europat | | elra | | wikipedia | | wikimedia | | tatoeba | | globalvoices | | euconst | | ubuntu | | php | | ecdc | | eac | | eac_reference | | gnome | | EU-eac | | books | | EU-ecdc | | newsdev | | khresmoi_summary | | czechtourism | | khresmoi_summary_dev | | worldbank | ## Evaluation Evaluation of the models was performed on [Flores200](https://huggingface.co/datasets/facebook/flores) dataset. The table below compares performance of the open-source models and all applicable models from our collection. Metric used: Unbabel/wmt22-comet-da. | **Direction** | **CES → ENG** | **CES → POL** | **CES → SLK** | **CES → SLV** | **ENG → CES** | **ENG → POL** | **ENG → SLK** | **ENG → SLV** | **POL → CES** | **POL → ENG** | **POL → SLK** | **POL → SLV** | **SLK → CES** | **SLK → ENG** | **SLK → POL** | **SLK → SLV** | **SLV → CES** | **SLV → ENG** | **SLV → POL** | **SLV → SLK** | |----------------------------------------------------|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | **M2M-100** | 87.0 | 89.0 | 92.1 | 89.7 | 88.6 | 86.4 | 88.4 | 87.3 | 89.6 | 84.6 | 89.4 | 88.4 | 92.7 | 86.8 | 89.1 | 89.6 | 90.3 | 86.4 | 88.7 | 90.1 | | **NLLB-200** | 88.1 | 88.9 | 91.2 | 88.6 | 90.4 | __88.5__ | 90.1 | 88.8 | 89.4 | __85.8__ | 88.9 | 87.7 | 91.8 | 88.2 | 88.9 | 88.8 | 90.0 | __87.5__ | 88.6 | 89.4 | | **Seamless-M4T** | 87.5 | 80.9 | 90.8 | 82.0 | __90.7__ | __88.5__ | __90.6__ | __89.6__ | 79.6 | 85.4 | 80.0 | 76.4 | 91.5 | 87.2 | 81.2 | 82.9 | 80.9 | 87.3 | 76.7 | 81.0 | | **OPUS-MT Sla-Sla** | __88.2__ | 82.8 | - | 83.4 | 89.1 | 85.6 | - | 84.5 | 82.9 | 82.2 | - | 81.2 | - | - | - | - | 83.5 | 84.1 | 80.8 | - | | **OPUS-MT SK-EN** | - | - | - | - | - | - | 89.5 | - | - | - | - | - | - | __88.4__ | - | - | - | - | - | - | | _Our contributions:_ | | | | | | | | | | | | | | | | | | | | | | **BiDi Models**<span style="color:green;">*</span> | 87.5 | 89.4 | 92.4 | 89.8 | 87.8 | 86.2 | 87.2 | 86.6 | 90.0 | 85.0 | 89.1 | 88.4 | 92.9 | 87.3 | 88.8 | 89.4 | 90.0 | 86.9 | 88.1 | 89.1 | | **P4-pol**<span style="color:red;">◊</span> | - | 89.6 | 90.8 | 88.7 | - | - | - | - | 90.2 | - | 89.8 | 88.7 | 91.0 | - | 89.3 | 88.4 | 89.3 | - | 88.7 | 88.5 | | **P5-eng**<span style="color:red;">◊</span> | 88.0 | 89.0 | 90.7 | 89.0 | 88.8 | 87.3 | 88.4 | 87.5 | 89.0 | 85.7 | 88.5 | 87.8 | 91.0 | 88.2 | 88.6 | 88.5 | 89.6 | 87.2 | 88.4 | 88.9 | | **P5-ces**<span style="color:red;">◊</span> | 87.9 | 89.6 | __92.5__ | 89.9 | 88.4 | 85.0 | 87.9 | 85.9 | 90.3 | 84.5 | 89.5 | 88.0 | __93.0__ | 87.8 | 89.4 | 89.8 | 90.3 | 85.7 | 87.9 | 89.8 | | **MultiSlav-4slav** | - | 89.7 | __92.5__ | 90.0 | - | - | - | - | 90.2 | - | 89.6 | 88.7 | 92.9 | - | 89.4 | 90.1 | __90.6__ | - | 88.9 | __90.2__ | | **MultiSlav-5lang** | 87.8 | __89.8__ | __92.5__ | __90.1__ | 88.9 | 86.9 | 88.0 | 87.3 | __90.4__ | 85.4 | 89.8 | __88.9__ | 92.9 | 87.8 | __89.6__ | __90.2__ | __90.6__ | 87.0 | __89.2__ | __90.2__ | <span style="color:red;">◊</span> system of 2 models *Many2XXX* and *XXX2Many*, see [P5-ces2many](https://huggingface.co/allegro/p5-ces2many) <span style="color:green;">*</span> results combined for all bi-directional models; each values for applicable model ## Limitations and Biases We did not evaluate inherent bias contained in training datasets. It is advised to validate bias of our models in perspective domain. This might be especially problematic in translation from English to Slavic languages, which require explicitly indicated gender and might hallucinate based on bias present in training data. ## License The model is licensed under CC BY 4.0, which allows for commercial use. ## Citation TO BE UPDATED SOON 🤗 ## Contact Options Authors: - MLR @ Allegro: [Artur Kot](https://linkedin.com/in/arturkot), [Mikołaj Koszowski](https://linkedin.com/in/mkoszowski), [Wojciech Chojnowski](https://linkedin.com/in/wojciech-chojnowski-744702348), [Mieszko Rutkowski](https://linkedin.com/in/mieszko-rutkowski) - Laniqo.com: [Artur Nowakowski](https://linkedin.com/in/artur-nowakowski-mt), [Kamil Guttmann](https://linkedin.com/in/kamil-guttmann), [Mikołaj Pokrywka](https://linkedin.com/in/mikolaj-pokrywka) Please don't hesitate to contact authors if you have any questions or suggestions: - e-mail: [email protected] or [email protected] - LinkedIn: [Artur Kot](https://linkedin.com/in/arturkot) or [Mikołaj Koszowski](https://linkedin.com/in/mkoszowski)
null
Non_BioNLP
# MultiSlav BiDi Models <p align="center"> <a href="https://ml.allegro.tech/"><img src="allegro-title.svg" alt="MLR @ Allegro.com"></a> </p> ## Multilingual BiDi MT Models ___BiDi___ is a collection of Encoder-Decoder vanilla transformer models trained on sentence-level Machine Translation task. Each model is supporting Bi-Directional translation. ___BiDi___ models are part of the [___MultiSlav___ collection](https://huggingface.co/collections/allegro/multislav-6793d6b6419e5963e759a683). More information will be available soon in our upcoming MultiSlav paper. Experiments were conducted under research project by [Machine Learning Research](https://ml.allegro.tech/) lab for [Allegro.com](https://ml.allegro.tech/). Big thanks to [laniqo.com](laniqo.com) for cooperation in the research. <p align="center"> <img src="bi-di.svg"> </p> Graphic above provides an example of an BiDi model - [BiDi-ces-pol](https://huggingface.co/allegro/bidi-ces-pol) to translate from Polish to Czech language. ___BiDi-ces-pol___ is a bi-directional model supporting translation both __form Czech to Polish__ and __from Polish to Czech__ directions. ### Supported languages To use a ___BiDi___ model, you must provide the target language for translation. Target language tokens are represented as 3-letter ISO 639-3 language codes embedded in a format >>xxx<<. All accepted directions and their respective tokens are listed below. Note that, for each model only two directions are available. Each of them was added as a special token to Sentence-Piece tokenizer. | **Target Language** | **First token** | |---------------------|-----------------| | Czech | `>>ces<<` | | English | `>>eng<<` | | Polish | `>>pol<<` | | Slovak | `>>slk<<` | | Slovene | `>>slv<<` | ### Bi-Di models available We provided 10 ___BiDi___ models, allowing to translate between 20 languages. | **Bi-Di model** | **Languages supported** | **HF repository** | |-----------------|-------------------------|---------------------------------------------------------------------| | BiDi-ces-eng | Czech ↔ English | [allegro/BiDi-ces-eng](https://huggingface.co/allegro/bidi-ces-eng) | | BiDi-ces-pol | Czech ↔ Polish | [allegro/BiDi-ces-pol](https://huggingface.co/allegro/bidi-ces-pol) | | BiDi-ces-slk | Czech ↔ Slovak | [allegro/BiDi-ces-slk](https://huggingface.co/allegro/bidi-ces-slk) | | BiDi-ces-slv | Czech ↔ Slovene | [allegro/BiDi-ces-slv](https://huggingface.co/allegro/bidi-ces-slv) | | BiDi-eng-pol | English ↔ Polish | [allegro/BiDi-eng-pol](https://huggingface.co/allegro/bidi-eng-pol) | | BiDi-eng-slk | English ↔ Slovak | [allegro/BiDi-eng-slk](https://huggingface.co/allegro/bidi-eng-slk) | | BiDi-eng-slv | English ↔ Slovene | [allegro/BiDi-eng-slv](https://huggingface.co/allegro/bidi-eng-slv) | | BiDi-pol-slk | Polish ↔ Slovak | [allegro/BiDi-pol-slk](https://huggingface.co/allegro/bidi-pol-slk) | | BiDi-pol-slv | Polish ↔ Slovene | [allegro/BiDi-pol-slv](https://huggingface.co/allegro/bidi-pol-slv) | | BiDi-slk-slv | Slovak ↔ Slovene | [allegro/BiDi-slk-slv](https://huggingface.co/allegro/bidi-slk-slv) | ## Use case quickstart Example code-snippet to use model. Due to bug the `MarianMTModel` must be used explicitly. Remember to adjust source and target languages to your use-case. ```python from transformers import AutoTokenizer, MarianMTModel source_lang = "pol" target_lang = "ces" first_lang, second_lang = sorted([source_lang, target_lang]) model_name = f"Allegro/BiDi-{first_lang}-{second_lang}" tokenizer = AutoTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) text = f">>{target_lang}<<" + " " + "Allegro to internetowa platforma e-commerce, na której swoje produkty sprzedają średnie i małe firmy, jak również duże marki." batch_to_translate = [text] translations = model.generate(**tokenizer.batch_encode_plus(batch_to_translate, return_tensors="pt")) decoded_translation = tokenizer.batch_decode(translations, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0] print(decoded_translation) ``` Generated Czech output: > Allegro je online e-commerce platforma, na které své výrobky prodávají střední a malé firmy, stejně jako velké značky. ## Training [SentencePiece](https://github.com/google/sentencepiece) tokenizer has a vocab size 32k in total (16k per language). Tokenizer was trained on randomly sampled part of the training corpus. During the training we used the [MarianNMT](https://marian-nmt.github.io/) framework. Base marian configuration used: [transfromer-big](https://github.com/marian-nmt/marian-dev/blob/master/src/common/aliases.cpp#L113). All training parameters are listed in table below. ### Training hyperparameters: | **Hyperparameter** | **Value** | |----------------------------|------------------------------------------------------------------------------------------------------------| | Total Parameter Size | 209M | | Vocab Size | 32k | | Base Parameters | [Marian transfromer-big](https://github.com/marian-nmt/marian-dev/blob/master/src/common/aliases.cpp#L113) | | Number of Encoding Layers | 6 | | Number of Decoding Layers | 6 | | Model Dimension | 1024 | | FF Dimension | 4096 | | Heads | 16 | | Dropout | 0.1 | | Batch Size | mini batch fit to VRAM | | Training Accelerators | 4x A100 40GB | | Max Length | 100 tokens | | Optimizer | Adam | | Warmup steps | 8000 | | Context | Sentence-level MT | | Languages Supported | See [Bi-Di models available](#Bi-Di-models-available) | | Precision | float16 | | Validation Freq | 3000 steps | | Stop Metric | ChrF | | Stop Criterion | 20 Validation steps | ## Training corpora The main research question was: "How does adding additional, related languages impact the quality of the model?" - we explored it in the Slavic language family. ___BiDi___ models are our baseline before expanding the data-regime by using higher-level multilinguality. Datasets were downloaded via [MT-Data](https://pypi.org/project/mtdata/0.2.10/) library. The number of total examples post filtering and deduplication varies, depending on languages supported, see the table below. | **Language pair** | **Number of training examples** | |-------------------|--------------------------------:| | Czech ↔ Polish | 63M | | Czech ↔ Slovak | 30M | | Czech ↔ Slovene | 25M | | Polish ↔ Slovak | 26M | | Polish ↔ Slovene | 23M | | Slovak ↔ Slovene | 18M | | ---------------- | ------------------------------- | | Czech ↔ English | 151M | | English ↔ Polish | 150M | | English ↔ Slovak | 52M | | English ↔ Slovene | 40M | The datasets used (only applicable to specific directions): | **Corpus** | |----------------------| | paracrawl | | opensubtitles | | multiparacrawl | | dgt | | elrc | | xlent | | wikititles | | wmt | | wikimatrix | | dcep | | ELRC | | tildemodel | | europarl | | eesc | | eubookshop | | emea | | jrc_acquis | | ema | | qed | | elitr_eca | | EU-dcep | | rapid | | ecb | | kde4 | | news_commentary | | kde | | bible_uedin | | europat | | elra | | wikipedia | | wikimedia | | tatoeba | | globalvoices | | euconst | | ubuntu | | php | | ecdc | | eac | | eac_reference | | gnome | | EU-eac | | books | | EU-ecdc | | newsdev | | khresmoi_summary | | czechtourism | | khresmoi_summary_dev | | worldbank | ## Evaluation Evaluation of the models was performed on [Flores200](https://huggingface.co/datasets/facebook/flores) dataset. The table below compares performance of the open-source models and all applicable models from our collection. Metric used: Unbabel/wmt22-comet-da. | **Direction** | **CES → ENG** | **CES → POL** | **CES → SLK** | **CES → SLV** | **ENG → CES** | **ENG → POL** | **ENG → SLK** | **ENG → SLV** | **POL → CES** | **POL → ENG** | **POL → SLK** | **POL → SLV** | **SLK → CES** | **SLK → ENG** | **SLK → POL** | **SLK → SLV** | **SLV → CES** | **SLV → ENG** | **SLV → POL** | **SLV → SLK** | |----------------------------------------------------|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | **M2M-100** | 87.0 | 89.0 | 92.1 | 89.7 | 88.6 | 86.4 | 88.4 | 87.3 | 89.6 | 84.6 | 89.4 | 88.4 | 92.7 | 86.8 | 89.1 | 89.6 | 90.3 | 86.4 | 88.7 | 90.1 | | **NLLB-200** | 88.1 | 88.9 | 91.2 | 88.6 | 90.4 | __88.5__ | 90.1 | 88.8 | 89.4 | __85.8__ | 88.9 | 87.7 | 91.8 | 88.2 | 88.9 | 88.8 | 90.0 | __87.5__ | 88.6 | 89.4 | | **Seamless-M4T** | 87.5 | 80.9 | 90.8 | 82.0 | __90.7__ | __88.5__ | __90.6__ | __89.6__ | 79.6 | 85.4 | 80.0 | 76.4 | 91.5 | 87.2 | 81.2 | 82.9 | 80.9 | 87.3 | 76.7 | 81.0 | | **OPUS-MT Sla-Sla** | __88.2__ | 82.8 | - | 83.4 | 89.1 | 85.6 | - | 84.5 | 82.9 | 82.2 | - | 81.2 | - | - | - | - | 83.5 | 84.1 | 80.8 | - | | **OPUS-MT SK-EN** | - | - | - | - | - | - | 89.5 | - | - | - | - | - | - | __88.4__ | - | - | - | - | - | - | | _Our contributions:_ | | | | | | | | | | | | | | | | | | | | | | **BiDi Models**<span style="color:green;">*</span> | 87.5 | 89.4 | 92.4 | 89.8 | 87.8 | 86.2 | 87.2 | 86.6 | 90.0 | 85.0 | 89.1 | 88.4 | 92.9 | 87.3 | 88.8 | 89.4 | 90.0 | 86.9 | 88.1 | 89.1 | | **P4-pol**<span style="color:red;">◊</span> | - | 89.6 | 90.8 | 88.7 | - | - | - | - | 90.2 | - | 89.8 | 88.7 | 91.0 | - | 89.3 | 88.4 | 89.3 | - | 88.7 | 88.5 | | **P5-eng**<span style="color:red;">◊</span> | 88.0 | 89.0 | 90.7 | 89.0 | 88.8 | 87.3 | 88.4 | 87.5 | 89.0 | 85.7 | 88.5 | 87.8 | 91.0 | 88.2 | 88.6 | 88.5 | 89.6 | 87.2 | 88.4 | 88.9 | | **P5-ces**<span style="color:red;">◊</span> | 87.9 | 89.6 | __92.5__ | 89.9 | 88.4 | 85.0 | 87.9 | 85.9 | 90.3 | 84.5 | 89.5 | 88.0 | __93.0__ | 87.8 | 89.4 | 89.8 | 90.3 | 85.7 | 87.9 | 89.8 | | **MultiSlav-4slav** | - | 89.7 | __92.5__ | 90.0 | - | - | - | - | 90.2 | - | 89.6 | 88.7 | 92.9 | - | 89.4 | 90.1 | __90.6__ | - | 88.9 | __90.2__ | | **MultiSlav-5lang** | 87.8 | __89.8__ | __92.5__ | __90.1__ | 88.9 | 86.9 | 88.0 | 87.3 | __90.4__ | 85.4 | 89.8 | __88.9__ | 92.9 | 87.8 | __89.6__ | __90.2__ | __90.6__ | 87.0 | __89.2__ | __90.2__ | <span style="color:red;">◊</span> system of 2 models *Many2XXX* and *XXX2Many*, see [P5-ces2many](https://huggingface.co/allegro/p5-ces2many) <span style="color:green;">*</span> results combined for all bi-directional models; each values for applicable model ## Limitations and Biases We did not evaluate inherent bias contained in training datasets. It is advised to validate bias of our models in perspective domain. This might be especially problematic in translation from English to Slavic languages, which require explicitly indicated gender and might hallucinate based on bias present in training data. ## License The model is licensed under CC BY 4.0, which allows for commercial use. ## Citation TO BE UPDATED SOON 🤗 ## Contact Options Authors: - MLR @ Allegro: [Artur Kot](https://linkedin.com/in/arturkot), [Mikołaj Koszowski](https://linkedin.com/in/mkoszowski), [Wojciech Chojnowski](https://linkedin.com/in/wojciech-chojnowski-744702348), [Mieszko Rutkowski](https://linkedin.com/in/mieszko-rutkowski) - Laniqo.com: [Artur Nowakowski](https://linkedin.com/in/artur-nowakowski-mt), [Kamil Guttmann](https://linkedin.com/in/kamil-guttmann), [Mikołaj Pokrywka](https://linkedin.com/in/mikolaj-pokrywka) Please don't hesitate to contact authors if you have any questions or suggestions: - e-mail: [email protected] or [email protected] - LinkedIn: [Artur Kot](https://linkedin.com/in/arturkot) or [Mikołaj Koszowski](https://linkedin.com/in/mkoszowski)
{"language": ["cs", "pl", "sk", "sl", "en"], "library_name": "transformers", "license": "cc-by-4.0", "tags": ["translation", "mt", "marian", "pytorch", "sentence-piece", "multilingual", "allegro", "laniqo"]}
task
[ "TRANSLATION" ]
40,294
DrishtiSharma/llama-pro-8b-tweet-summarization
DrishtiSharma
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "dataset:dialogstudio", "base_model:TencentARC/LLaMA-Pro-8B", "base_model:adapter:TencentARC/LLaMA-Pro-8B", "license:llama2", "region:us" ]
2024-01-31T07:46:44Z
2024-01-31T16:02:17+00:00
0
0
--- base_model: TencentARC/LLaMA-Pro-8B datasets: - dialogstudio library_name: peft license: llama2 tags: - trl - sft - generated_from_trainer model-index: - name: llama-pro-8b-tweet-summarization results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # llama-pro-8b-tweet-summarization This model is a fine-tuned version of [TencentARC/LLaMA-Pro-8B](https://huggingface.co/TencentARC/LLaMA-Pro-8B) on the dialogstudio dataset. It achieves the following results on the evaluation set: - Loss: 3.0033 - Rouge Scores: {'rouge1': 74.5645505147687, 'rouge2': 61.793005354430264, 'rougeL': 50.4897941651719, 'rougeLsum': 74.49500409220269} - Bleu Scores: [0.699414840916846, 0.6885479681674689, 0.6673588982582369, 0.6432031117261758] - Gen Len: 463.0182 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 7 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge Scores | Bleu Scores | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------------------------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------:|:--------:| | 1.9079 | 1.0 | 220 | 1.8554 | {'rouge1': 92.82930837423424, 'rouge2': 78.69002098258808, 'rougeL': 67.77525397424012, 'rougeLsum': 92.83122136025193} | [0.8742055507510261, 0.85321871129453, 0.8273357083346458, 0.7999205981632427] | 463.0182 | | 1.6535 | 2.0 | 440 | 1.8644 | {'rouge1': 93.48067160777316, 'rouge2': 78.9876103970411, 'rougeL': 67.83658288925474, 'rougeLsum': 93.48402466797468} | [0.8755971013914376, 0.8572483593980601, 0.8317815914576417, 0.8041763182250138] | 463.0182 | | 1.282 | 3.0 | 660 | 2.0002 | {'rouge1': 87.29036568539799, 'rouge2': 73.18485374150632, 'rougeL': 62.10087123916552, 'rougeLsum': 87.26501626335327} | [0.8760501251112071, 0.8593531308703309, 0.8335667371919002, 0.8051115198870601] | 463.0182 | | 0.8481 | 4.0 | 880 | 2.2502 | {'rouge1': 86.93286796220396, 'rouge2': 72.7995944273867, 'rougeL': 61.376242795856115, 'rougeLsum': 86.92669954280056} | [0.8749955177244617, 0.8580816714753104, 0.8319806879994025, 0.8031812686772342] | 463.0182 | | 0.5026 | 5.0 | 1100 | 2.5319 | {'rouge1': 74.55412702158021, 'rouge2': 61.949690968753835, 'rougeL': 51.12580948921186, 'rougeLsum': 74.48696099641717} | [0.7003890180040002, 0.689128452909103, 0.6682005373110111, 0.6444964084098124] | 463.0182 | | 0.297 | 6.0 | 1320 | 2.8374 | {'rouge1': 74.57349965516465, 'rouge2': 61.85762409604638, 'rougeL': 50.76329385869279, 'rougeLsum': 74.51078702126195} | [0.6993958025399786, 0.6884661028969841, 0.6674353154479407, 0.643507030069284] | 463.0182 | | 0.2129 | 7.0 | 1540 | 3.0033 | {'rouge1': 74.5645505147687, 'rouge2': 61.793005354430264, 'rougeL': 50.4897941651719, 'rougeLsum': 74.49500409220269} | [0.699414840916846, 0.6885479681674689, 0.6673588982582369, 0.6432031117261758] | 463.0182 | ### Framework versions - PEFT 0.8.2.dev0 - Transformers 4.38.0.dev0 - Pytorch 2.1.0+cu121 - Datasets 2.16.2.dev0 - Tokenizers 0.15.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # llama-pro-8b-tweet-summarization This model is a fine-tuned version of [TencentARC/LLaMA-Pro-8B](https://huggingface.co/TencentARC/LLaMA-Pro-8B) on the dialogstudio dataset. It achieves the following results on the evaluation set: - Loss: 3.0033 - Rouge Scores: {'rouge1': 74.5645505147687, 'rouge2': 61.793005354430264, 'rougeL': 50.4897941651719, 'rougeLsum': 74.49500409220269} - Bleu Scores: [0.699414840916846, 0.6885479681674689, 0.6673588982582369, 0.6432031117261758] - Gen Len: 463.0182 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 7 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge Scores | Bleu Scores | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------------------------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------:|:--------:| | 1.9079 | 1.0 | 220 | 1.8554 | {'rouge1': 92.82930837423424, 'rouge2': 78.69002098258808, 'rougeL': 67.77525397424012, 'rougeLsum': 92.83122136025193} | [0.8742055507510261, 0.85321871129453, 0.8273357083346458, 0.7999205981632427] | 463.0182 | | 1.6535 | 2.0 | 440 | 1.8644 | {'rouge1': 93.48067160777316, 'rouge2': 78.9876103970411, 'rougeL': 67.83658288925474, 'rougeLsum': 93.48402466797468} | [0.8755971013914376, 0.8572483593980601, 0.8317815914576417, 0.8041763182250138] | 463.0182 | | 1.282 | 3.0 | 660 | 2.0002 | {'rouge1': 87.29036568539799, 'rouge2': 73.18485374150632, 'rougeL': 62.10087123916552, 'rougeLsum': 87.26501626335327} | [0.8760501251112071, 0.8593531308703309, 0.8335667371919002, 0.8051115198870601] | 463.0182 | | 0.8481 | 4.0 | 880 | 2.2502 | {'rouge1': 86.93286796220396, 'rouge2': 72.7995944273867, 'rougeL': 61.376242795856115, 'rougeLsum': 86.92669954280056} | [0.8749955177244617, 0.8580816714753104, 0.8319806879994025, 0.8031812686772342] | 463.0182 | | 0.5026 | 5.0 | 1100 | 2.5319 | {'rouge1': 74.55412702158021, 'rouge2': 61.949690968753835, 'rougeL': 51.12580948921186, 'rougeLsum': 74.48696099641717} | [0.7003890180040002, 0.689128452909103, 0.6682005373110111, 0.6444964084098124] | 463.0182 | | 0.297 | 6.0 | 1320 | 2.8374 | {'rouge1': 74.57349965516465, 'rouge2': 61.85762409604638, 'rougeL': 50.76329385869279, 'rougeLsum': 74.51078702126195} | [0.6993958025399786, 0.6884661028969841, 0.6674353154479407, 0.643507030069284] | 463.0182 | | 0.2129 | 7.0 | 1540 | 3.0033 | {'rouge1': 74.5645505147687, 'rouge2': 61.793005354430264, 'rougeL': 50.4897941651719, 'rougeLsum': 74.49500409220269} | [0.699414840916846, 0.6885479681674689, 0.6673588982582369, 0.6432031117261758] | 463.0182 | ### Framework versions - PEFT 0.8.2.dev0 - Transformers 4.38.0.dev0 - Pytorch 2.1.0+cu121 - Datasets 2.16.2.dev0 - Tokenizers 0.15.1
{"base_model": "TencentARC/LLaMA-Pro-8B", "datasets": ["dialogstudio"], "library_name": "peft", "license": "llama2", "tags": ["trl", "sft", "generated_from_trainer"], "model-index": [{"name": "llama-pro-8b-tweet-summarization", "results": []}]}
task
[ "SUMMARIZATION" ]
40,295
noahkim/KoBigBird-KoBart-News-Summarization
noahkim
summarization
[ "transformers", "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "summarization", "news", "ko", "autotrain_compatible", "region:us" ]
2022-09-15T01:25:23Z
2022-11-10T01:19:59+00:00
38
1
--- language: ko tags: - summarization - news inference: false model-index: - name: KoBigBird-KoBart-News-Summarization results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # KoBigBird-KoBart-News-Summarization This model is a fine-tuned version of [noahkim/KoBigBird-KoBart-News-Summarization](https://huggingface.co/noahkim/KoBigBird-KoBart-News-Summarization) on the [daekeun-ml/naver-news-summarization-ko](https://huggingface.co/datasets/daekeun-ml/naver-news-summarization-ko) ## Model description <<20221110 Commit>> <<KoBigBird-KoBart-News-Summarization 모델 설명>> 다중문서요약(Multi-Document-Summarization) Task를 위해서 KoBigBird 모델을 Encoder-Decoder모델을 만들어서 학습을 진행했습니다. KoBigBird를 Decoder로 쓰려고 했으나 오류가 생겨서 요약에 특화된 KoBART의 Decoder를 활용해서 모델을 생성했습니다. 프로젝트용으로 뉴스 요약 모델 특화된 모델을 만들기 위해 기존에 만들었던 KoBigBird-KoBart-News-Summarization 모델에 추가적으로 daekeun-ml님이 제공해주신 naver-news-summarization-ko 데이터셋으로 파인튜닝 했습니다. 현재 AI-HUB에서 제공하는 요약 데이터를 추가 학습 진행 예정입니다. 지속적으로 발전시켜 좋은 성능의 모델을 구현하겠습니다. 감사합니다. 실행환경 - Google Colab Pro - CPU : Intel(R) Xeon(R) CPU @ 2.20GHz - GPU : A100-SXM4-40GB <pre><code> # Python Code from transformers import AutoTokenizer from transformers import AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("noahkim/KoT5_news_summarization") model = AutoModelForSeq2SeqLM.from_pretrained("noahkim/KoT5_news_summarization") </pre></code> The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.0748 | 1.0 | 1388 | 4.3067 | | 3.8457 | 2.0 | 2776 | 4.2039 | | 3.7459 | 3.0 | 4164 | 4.1433 | | 3.6773 | 4.0 | 5552 | 4.1236 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # KoBigBird-KoBart-News-Summarization This model is a fine-tuned version of [noahkim/KoBigBird-KoBart-News-Summarization](https://huggingface.co/noahkim/KoBigBird-KoBart-News-Summarization) on the [daekeun-ml/naver-news-summarization-ko](https://huggingface.co/datasets/daekeun-ml/naver-news-summarization-ko) ## Model description <<20221110 Commit>> <<KoBigBird-KoBart-News-Summarization 모델 설명>> 다중문서요약(Multi-Document-Summarization) Task를 위해서 KoBigBird 모델을 Encoder-Decoder모델을 만들어서 학습을 진행했습니다. KoBigBird를 Decoder로 쓰려고 했으나 오류가 생겨서 요약에 특화된 KoBART의 Decoder를 활용해서 모델을 생성했습니다. 프로젝트용으로 뉴스 요약 모델 특화된 모델을 만들기 위해 기존에 만들었던 KoBigBird-KoBart-News-Summarization 모델에 추가적으로 daekeun-ml님이 제공해주신 naver-news-summarization-ko 데이터셋으로 파인튜닝 했습니다. 현재 AI-HUB에서 제공하는 요약 데이터를 추가 학습 진행 예정입니다. 지속적으로 발전시켜 좋은 성능의 모델을 구현하겠습니다. 감사합니다. 실행환경 - Google Colab Pro - CPU : Intel(R) Xeon(R) CPU @ 2.20GHz - GPU : A100-SXM4-40GB <pre><code> # Python Code from transformers import AutoTokenizer from transformers import AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("noahkim/KoT5_news_summarization") model = AutoModelForSeq2SeqLM.from_pretrained("noahkim/KoT5_news_summarization") </pre></code> The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.0748 | 1.0 | 1388 | 4.3067 | | 3.8457 | 2.0 | 2776 | 4.2039 | | 3.7459 | 3.0 | 4164 | 4.1433 | | 3.6773 | 4.0 | 5552 | 4.1236 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.2
{"language": "ko", "tags": ["summarization", "news"], "inference": false, "model-index": [{"name": "KoBigBird-KoBart-News-Summarization", "results": []}]}
task
[ "SUMMARIZATION" ]
40,296
CATIE-AQ/QAmemberta
CATIE-AQ
question-answering
[ "transformers", "safetensors", "deberta-v2", "question-answering", "fr", "dataset:etalab-ia/piaf", "dataset:fquad", "dataset:lincoln/newsquadfr", "dataset:pragnakalp/squad_v2_french_translated", "dataset:CATIE-AQ/frenchQA", "arxiv:1910.09700", "arxiv:2411.08868", "base_model:almanach/camembertav2-base", "base_model:finetune:almanach/camembertav2-base", "doi:10.57967/hf/3639", "license:mit", "co2_eq_emissions", "endpoints_compatible", "region:us" ]
2024-11-20T18:23:19Z
2024-11-26T11:52:00+00:00
157
0
--- base_model: almanach/camembertav2-base datasets: - etalab-ia/piaf - fquad - lincoln/newsquadfr - pragnakalp/squad_v2_french_translated - CATIE-AQ/frenchQA language: fr library_name: transformers license: mit metrics: - f1 - exact_match widget: - text: Combien de personnes utilisent le français tous les jours ? context: 'Le français est une langue indo-européenne de la famille des langues romanes dont les locuteurs sont appelés francophones. Elle est parfois surnommée la langue de Molière. Le français est parlé, en 2023, sur tous les continents par environ 321 millions de personnes : 235 millions l''emploient quotidiennement et 90 millions en sont des locuteurs natifs. En 2018, 80 millions d''élèves et étudiants s''instruisent en français dans le monde. Selon l''Organisation internationale de la francophonie (OIF), il pourrait y avoir 700 millions de francophones sur Terre en 2050.' co2_eq_emissions: 87.5 --- # QAmemBERTa ## Model Description We present **QAmemBERTa**, which is a [CamemBERTa v2 base](https://huggingface.co/almanach/camembertav2-base) fine-tuned for the Question-Answering task for the French language on four French Q&A datasets composed of contexts and questions with their answers inside the context (= SQuAD 1.0 format) but also contexts and questions with their answers not inside the context (= SQuAD 2.0 format). All these datasets were concatenated into a single dataset that we called [frenchQA](https://huggingface.co/datasets/CATIE-AQ/frenchQA). This represents a total of over **221,348 context/question/answer triplets used to finetune this model and 6,376 to test it**. Our methodology is described in a blog post available in [English](https://blog.vaniila.ai/en/QA_en/) or [French](https://blog.vaniila.ai/QA/). ## Results (french QA test split) | Model | Parameters | Context | Exact_match | F1 | Answer_F1 | NoAnswer_F1 | | ----------- | ----------- | ----------- | ----------- | ----------- |----------- |----------- | | [etalab/camembert-base-squadFR-fquad-piaf](https://huggingface.co/AgentPublic/camembert-base-squadFR-fquad-piaf) | 110M | 512 tokens | 39.30 | 51.55 | 79.54 | 23.58 | [QAmembert](https://huggingface.co/CATIE-AQ/QAmembert)| 110M | 512 tokens | 77.14 | 86.88 | 75.66 | 98.11 | [QAmembert2](https://huggingface.co/CATIE-AQ/QAmembert2)| 112M | 1024 tokens | 76.47 | 88.25 | 78.66 | 97.84 | [QAmembert-large](https://huggingface.co/CATIE-AQ/QAmembert-large)| 336M | 512 tokens | 77.14 | 88.74 | 78.83 | **98.65** | QAmemberta (this version) | 111M | 1024 tokens | **78.18** | **89.53** | **81.40** | 97.64 Looking at the “Answer_f1” column, Etalab's model appears to be competitive on texts where the answer to the question is indeed in the text provided (it does better than QAmemBERT-large, for example). However, the fact that it doesn't handle texts where the answer to the question is not in the text provided is a drawback. In all cases, whether in terms of metrics, number of parameters or context size, QAmemBERTa achieves the best results. We therefore invite the reader to choose this model. ### Usage ```python from transformers import pipeline qa = pipeline('question-answering', model='CATIE-AQ/QAmemberta', tokenizer='CATIE-AQ/QAmemberta') result = qa({ 'question': "Combien de personnes utilisent le français tous les jours ?", 'context': "Le français est une langue indo-européenne de la famille des langues romanes dont les locuteurs sont appelés francophones. Elle est parfois surnommée la langue de Molière. Le français est parlé, en 2023, sur tous les continents par environ 321 millions de personnes : 235 millions l'emploient quotidiennement et 90 millions en sont des locuteurs natifs. En 2018, 80 millions d'élèves et étudiants s'instruisent en français dans le monde. Selon l'Organisation internationale de la francophonie (OIF), il pourrait y avoir 700 millions de francophones sur Terre en 2050." }) if result['score'] < 0.01: print("La réponse n'est pas dans le contexte fourni.") else : print(result['answer']) ``` ### Try it through Space A Space has been created to test the model. It is available [here](https://huggingface.co/spaces/CATIE-AQ/Qamembert). ## Environmental Impact *Carbon emissions were estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.* - **Hardware Type:** A100 PCIe 40/80GB - **Hours used:** 7h and 35 min - **Cloud Provider:** Private Infrastructure - **Carbon Efficiency (kg/kWh):** 0.047kg (estimated from [electricitymaps](https://app.electricitymaps.com/zone/FR) ; we take the carbon intensity in France for November 20, 2024.) - **Carbon Emitted** *(Power consumption x Time x Carbon produced based on location of power grid)*: **0.0875 kg eq. CO2** ## Citations ### QAmemBERT2 & QAmemBERTa ``` @misc {qamemberta2024, author = { {BOURDOIS, Loïck} }, organization = { {Centre Aquitain des Technologies de l'Information et Electroniques} }, title = { QAmemberta (Revision 976a70b) }, year = 2024, url = { https://huggingface.co/CATIE-AQ/QAmemberta }, doi = { 10.57967/hf/3639 }, publisher = { Hugging Face } } ``` ### QAmemBERT ``` @misc {qamembert2023, author = { {ALBAR, Boris and BEDU, Pierre and BOURDOIS, Loïck} }, organization = { {Centre Aquitain des Technologies de l'Information et Electroniques} }, title = { QAmembert (Revision 9685bc3) }, year = 2023, url = { https://huggingface.co/CATIE-AQ/QAmembert}, doi = { 10.57967/hf/0821 }, publisher = { Hugging Face } } ``` ### CamemBERT ``` @inproceedings{martin2020camembert, title={CamemBERT: a Tasty French Language Model}, author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t}, booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics}, year={2020} } ``` ### CamemBERT 2.0 ``` @misc{antoun2024camembert20smarterfrench, title={CamemBERT 2.0: A Smarter French Language Model Aged to Perfection}, author={Wissam Antoun and Francis Kulumba and Rian Touchent and Éric de la Clergerie and Benoît Sagot and Djamé Seddah}, year={2024}, eprint={2411.08868}, archivePrefix={arXiv}, primaryClass={cs.CL}, url={https://arxiv.org/abs/2411.08868}, } ``` ### frenchQA ``` @misc {frenchQA2023, author = { {ALBAR, Boris and BEDU, Pierre and BOURDOIS, Loïck} }, organization = { {Centre Aquitain des Technologies de l'Information et Electroniques} }, title = { frenchQA (Revision 6249cd5) }, year = 2023, url = { https://huggingface.co/CATIE-AQ/frenchQA }, doi = { 10.57967/hf/0862 }, publisher = { Hugging Face } } ``` ### PIAF ``` @inproceedings{KeraronLBAMSSS20, author = {Rachel Keraron and Guillaume Lancrenon and Mathilde Bras and Fr{\'{e}}d{\'{e}}ric Allary and Gilles Moyse and Thomas Scialom and Edmundo{-}Pavel Soriano{-}Morales and Jacopo Staiano}, title = {Project {PIAF:} Building a Native French Question-Answering Dataset}, booktitle = {{LREC}}, pages = {5481--5490}, publisher = {European Language Resources Association}, year = {2020} } ``` ### FQuAD ``` @article{dHoffschmidt2020FQuADFQ, title={FQuAD: French Question Answering Dataset}, author={Martin d'Hoffschmidt and Maxime Vidal and Wacim Belblidia and Tom Brendl'e and Quentin Heinrich}, journal={ArXiv}, year={2020}, volume={abs/2002.06071} } ``` ### lincoln/newsquadfr ``` Hugging Face repository: https://hf.co/datasets/lincoln/newsquadfr ``` ### pragnakalp/squad_v2_french_translated ``` Hugging Face repository: https://hf.co/datasets/pragnakalp/squad_v2_french_translated ``` ## License MIT
null
Non_BioNLP
# QAmemBERTa ## Model Description We present **QAmemBERTa**, which is a [CamemBERTa v2 base](https://huggingface.co/almanach/camembertav2-base) fine-tuned for the Question-Answering task for the French language on four French Q&A datasets composed of contexts and questions with their answers inside the context (= SQuAD 1.0 format) but also contexts and questions with their answers not inside the context (= SQuAD 2.0 format). All these datasets were concatenated into a single dataset that we called [frenchQA](https://huggingface.co/datasets/CATIE-AQ/frenchQA). This represents a total of over **221,348 context/question/answer triplets used to finetune this model and 6,376 to test it**. Our methodology is described in a blog post available in [English](https://blog.vaniila.ai/en/QA_en/) or [French](https://blog.vaniila.ai/QA/). ## Results (french QA test split) | Model | Parameters | Context | Exact_match | F1 | Answer_F1 | NoAnswer_F1 | | ----------- | ----------- | ----------- | ----------- | ----------- |----------- |----------- | | [etalab/camembert-base-squadFR-fquad-piaf](https://huggingface.co/AgentPublic/camembert-base-squadFR-fquad-piaf) | 110M | 512 tokens | 39.30 | 51.55 | 79.54 | 23.58 | [QAmembert](https://huggingface.co/CATIE-AQ/QAmembert)| 110M | 512 tokens | 77.14 | 86.88 | 75.66 | 98.11 | [QAmembert2](https://huggingface.co/CATIE-AQ/QAmembert2)| 112M | 1024 tokens | 76.47 | 88.25 | 78.66 | 97.84 | [QAmembert-large](https://huggingface.co/CATIE-AQ/QAmembert-large)| 336M | 512 tokens | 77.14 | 88.74 | 78.83 | **98.65** | QAmemberta (this version) | 111M | 1024 tokens | **78.18** | **89.53** | **81.40** | 97.64 Looking at the “Answer_f1” column, Etalab's model appears to be competitive on texts where the answer to the question is indeed in the text provided (it does better than QAmemBERT-large, for example). However, the fact that it doesn't handle texts where the answer to the question is not in the text provided is a drawback. In all cases, whether in terms of metrics, number of parameters or context size, QAmemBERTa achieves the best results. We therefore invite the reader to choose this model. ### Usage ```python from transformers import pipeline qa = pipeline('question-answering', model='CATIE-AQ/QAmemberta', tokenizer='CATIE-AQ/QAmemberta') result = qa({ 'question': "Combien de personnes utilisent le français tous les jours ?", 'context': "Le français est une langue indo-européenne de la famille des langues romanes dont les locuteurs sont appelés francophones. Elle est parfois surnommée la langue de Molière. Le français est parlé, en 2023, sur tous les continents par environ 321 millions de personnes : 235 millions l'emploient quotidiennement et 90 millions en sont des locuteurs natifs. En 2018, 80 millions d'élèves et étudiants s'instruisent en français dans le monde. Selon l'Organisation internationale de la francophonie (OIF), il pourrait y avoir 700 millions de francophones sur Terre en 2050." }) if result['score'] < 0.01: print("La réponse n'est pas dans le contexte fourni.") else : print(result['answer']) ``` ### Try it through Space A Space has been created to test the model. It is available [here](https://huggingface.co/spaces/CATIE-AQ/Qamembert). ## Environmental Impact *Carbon emissions were estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.* - **Hardware Type:** A100 PCIe 40/80GB - **Hours used:** 7h and 35 min - **Cloud Provider:** Private Infrastructure - **Carbon Efficiency (kg/kWh):** 0.047kg (estimated from [electricitymaps](https://app.electricitymaps.com/zone/FR) ; we take the carbon intensity in France for November 20, 2024.) - **Carbon Emitted** *(Power consumption x Time x Carbon produced based on location of power grid)*: **0.0875 kg eq. CO2** ## Citations ### QAmemBERT2 & QAmemBERTa ``` @misc {qamemberta2024, author = { {BOURDOIS, Loïck} }, organization = { {Centre Aquitain des Technologies de l'Information et Electroniques} }, title = { QAmemberta (Revision 976a70b) }, year = 2024, url = { https://huggingface.co/CATIE-AQ/QAmemberta }, doi = { 10.57967/hf/3639 }, publisher = { Hugging Face } } ``` ### QAmemBERT ``` @misc {qamembert2023, author = { {ALBAR, Boris and BEDU, Pierre and BOURDOIS, Loïck} }, organization = { {Centre Aquitain des Technologies de l'Information et Electroniques} }, title = { QAmembert (Revision 9685bc3) }, year = 2023, url = { https://huggingface.co/CATIE-AQ/QAmembert}, doi = { 10.57967/hf/0821 }, publisher = { Hugging Face } } ``` ### CamemBERT ``` @inproceedings{martin2020camembert, title={CamemBERT: a Tasty French Language Model}, author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t}, booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics}, year={2020} } ``` ### CamemBERT 2.0 ``` @misc{antoun2024camembert20smarterfrench, title={CamemBERT 2.0: A Smarter French Language Model Aged to Perfection}, author={Wissam Antoun and Francis Kulumba and Rian Touchent and Éric de la Clergerie and Benoît Sagot and Djamé Seddah}, year={2024}, eprint={2411.08868}, archivePrefix={arXiv}, primaryClass={cs.CL}, url={https://arxiv.org/abs/2411.08868}, } ``` ### frenchQA ``` @misc {frenchQA2023, author = { {ALBAR, Boris and BEDU, Pierre and BOURDOIS, Loïck} }, organization = { {Centre Aquitain des Technologies de l'Information et Electroniques} }, title = { frenchQA (Revision 6249cd5) }, year = 2023, url = { https://huggingface.co/CATIE-AQ/frenchQA }, doi = { 10.57967/hf/0862 }, publisher = { Hugging Face } } ``` ### PIAF ``` @inproceedings{KeraronLBAMSSS20, author = {Rachel Keraron and Guillaume Lancrenon and Mathilde Bras and Fr{\'{e}}d{\'{e}}ric Allary and Gilles Moyse and Thomas Scialom and Edmundo{-}Pavel Soriano{-}Morales and Jacopo Staiano}, title = {Project {PIAF:} Building a Native French Question-Answering Dataset}, booktitle = {{LREC}}, pages = {5481--5490}, publisher = {European Language Resources Association}, year = {2020} } ``` ### FQuAD ``` @article{dHoffschmidt2020FQuADFQ, title={FQuAD: French Question Answering Dataset}, author={Martin d'Hoffschmidt and Maxime Vidal and Wacim Belblidia and Tom Brendl'e and Quentin Heinrich}, journal={ArXiv}, year={2020}, volume={abs/2002.06071} } ``` ### lincoln/newsquadfr ``` Hugging Face repository: https://hf.co/datasets/lincoln/newsquadfr ``` ### pragnakalp/squad_v2_french_translated ``` Hugging Face repository: https://hf.co/datasets/pragnakalp/squad_v2_french_translated ``` ## License MIT
{"base_model": "almanach/camembertav2-base", "datasets": ["etalab-ia/piaf", "fquad", "lincoln/newsquadfr", "pragnakalp/squad_v2_french_translated", "CATIE-AQ/frenchQA"], "language": "fr", "library_name": "transformers", "license": "mit", "metrics": ["f1", "exact_match"], "widget": [{"text": "Combien de personnes utilisent le français tous les jours ?", "context": "Le français est une langue indo-européenne de la famille des langues romanes dont les locuteurs sont appelés francophones. Elle est parfois surnommée la langue de Molière. Le français est parlé, en 2023, sur tous les continents par environ 321 millions de personnes : 235 millions l'emploient quotidiennement et 90 millions en sont des locuteurs natifs. En 2018, 80 millions d'élèves et étudiants s'instruisent en français dans le monde. Selon l'Organisation internationale de la francophonie (OIF), il pourrait y avoir 700 millions de francophones sur Terre en 2050."}], "co2_eq_emissions": 87.5}
task
[ "QUESTION_ANSWERING" ]
40,297
Helsinki-NLP/opus-mt-afa-afa
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "so", "ti", "am", "he", "mt", "ar", "afa", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T11:25:28+00:00
31
0
--- language: - so - ti - am - he - mt - ar - afa license: apache-2.0 tags: - translation --- ### afa-afa * source group: Afro-Asiatic languages * target group: Afro-Asiatic languages * OPUS readme: [afa-afa](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-afa/README.md) * model: transformer * source language(s): apc ara arq arz heb kab mlt shy_Latn thv * target language(s): apc ara arq arz heb kab mlt shy_Latn thv * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara-ara.ara.ara | 4.3 | 0.148 | | Tatoeba-test.ara-heb.ara.heb | 31.9 | 0.525 | | Tatoeba-test.ara-kab.ara.kab | 0.3 | 0.120 | | Tatoeba-test.ara-mlt.ara.mlt | 14.0 | 0.428 | | Tatoeba-test.ara-shy.ara.shy | 1.3 | 0.050 | | Tatoeba-test.heb-ara.heb.ara | 17.0 | 0.464 | | Tatoeba-test.heb-kab.heb.kab | 1.9 | 0.104 | | Tatoeba-test.kab-ara.kab.ara | 0.3 | 0.044 | | Tatoeba-test.kab-heb.kab.heb | 5.1 | 0.099 | | Tatoeba-test.kab-shy.kab.shy | 2.2 | 0.009 | | Tatoeba-test.kab-tmh.kab.tmh | 10.7 | 0.007 | | Tatoeba-test.mlt-ara.mlt.ara | 29.1 | 0.498 | | Tatoeba-test.multi.multi | 20.8 | 0.434 | | Tatoeba-test.shy-ara.shy.ara | 1.2 | 0.053 | | Tatoeba-test.shy-kab.shy.kab | 2.0 | 0.134 | | Tatoeba-test.tmh-kab.tmh.kab | 0.0 | 0.047 | ### System Info: - hf_name: afa-afa - source_languages: afa - target_languages: afa - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-afa/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['so', 'ti', 'am', 'he', 'mt', 'ar', 'afa'] - src_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - tgt_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - src_multilingual: True - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.test.txt - src_alpha3: afa - tgt_alpha3: afa - short_pair: afa-afa - chrF2_score: 0.434 - bleu: 20.8 - brevity_penalty: 1.0 - ref_len: 15215.0 - src_name: Afro-Asiatic languages - tgt_name: Afro-Asiatic languages - train_date: 2020-07-26 - src_alpha2: afa - tgt_alpha2: afa - prefer_old: False - long_pair: afa-afa - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
null
Non_BioNLP
### afa-afa * source group: Afro-Asiatic languages * target group: Afro-Asiatic languages * OPUS readme: [afa-afa](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-afa/README.md) * model: transformer * source language(s): apc ara arq arz heb kab mlt shy_Latn thv * target language(s): apc ara arq arz heb kab mlt shy_Latn thv * model: transformer * pre-processing: normalization + SentencePiece (spm32k,spm32k) * a sentence initial language token is required in the form of `>>id<<` (id = valid target language ID) * download original weights: [opus-2020-07-26.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.zip) * test set translations: [opus-2020-07-26.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.test.txt) * test set scores: [opus-2020-07-26.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.ara-ara.ara.ara | 4.3 | 0.148 | | Tatoeba-test.ara-heb.ara.heb | 31.9 | 0.525 | | Tatoeba-test.ara-kab.ara.kab | 0.3 | 0.120 | | Tatoeba-test.ara-mlt.ara.mlt | 14.0 | 0.428 | | Tatoeba-test.ara-shy.ara.shy | 1.3 | 0.050 | | Tatoeba-test.heb-ara.heb.ara | 17.0 | 0.464 | | Tatoeba-test.heb-kab.heb.kab | 1.9 | 0.104 | | Tatoeba-test.kab-ara.kab.ara | 0.3 | 0.044 | | Tatoeba-test.kab-heb.kab.heb | 5.1 | 0.099 | | Tatoeba-test.kab-shy.kab.shy | 2.2 | 0.009 | | Tatoeba-test.kab-tmh.kab.tmh | 10.7 | 0.007 | | Tatoeba-test.mlt-ara.mlt.ara | 29.1 | 0.498 | | Tatoeba-test.multi.multi | 20.8 | 0.434 | | Tatoeba-test.shy-ara.shy.ara | 1.2 | 0.053 | | Tatoeba-test.shy-kab.shy.kab | 2.0 | 0.134 | | Tatoeba-test.tmh-kab.tmh.kab | 0.0 | 0.047 | ### System Info: - hf_name: afa-afa - source_languages: afa - target_languages: afa - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/afa-afa/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['so', 'ti', 'am', 'he', 'mt', 'ar', 'afa'] - src_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - tgt_constituents: {'som', 'rif_Latn', 'tir', 'kab', 'arq', 'afb', 'amh', 'arz', 'heb', 'shy_Latn', 'apc', 'mlt', 'thv', 'ara', 'hau_Latn', 'acm', 'ary'} - src_multilingual: True - tgt_multilingual: True - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/afa-afa/opus-2020-07-26.test.txt - src_alpha3: afa - tgt_alpha3: afa - short_pair: afa-afa - chrF2_score: 0.434 - bleu: 20.8 - brevity_penalty: 1.0 - ref_len: 15215.0 - src_name: Afro-Asiatic languages - tgt_name: Afro-Asiatic languages - train_date: 2020-07-26 - src_alpha2: afa - tgt_alpha2: afa - prefer_old: False - long_pair: afa-afa - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
{"language": ["so", "ti", "am", "he", "mt", "ar", "afa"], "license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
40,298
RichardErkhov/mhenrichsen_-_gemma-2b-gguf
RichardErkhov
null
[ "gguf", "arxiv:2312.11805", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2107.03374", "arxiv:2108.07732", "arxiv:2110.14168", "arxiv:2304.06364", "arxiv:2206.04615", "arxiv:1804.06876", "arxiv:2110.08193", "arxiv:2009.11462", "arxiv:2101.11718", "arxiv:1804.09301", "arxiv:2109.07958", "arxiv:2203.09509", "endpoints_compatible", "region:us" ]
2024-06-29T15:38:04Z
2024-06-29T22:39:22+00:00
35
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gemma-2b - GGUF - Model creator: https://huggingface.co/mhenrichsen/ - Original model: https://huggingface.co/mhenrichsen/gemma-2b/ | Name | Quant method | Size | | ---- | ---- | ---- | | [gemma-2b.Q2_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q2_K.gguf) | Q2_K | 1.08GB | | [gemma-2b.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ3_XS.gguf) | IQ3_XS | 1.16GB | | [gemma-2b.IQ3_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ3_S.gguf) | IQ3_S | 1.2GB | | [gemma-2b.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K_S.gguf) | Q3_K_S | 1.2GB | | [gemma-2b.IQ3_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ3_M.gguf) | IQ3_M | 1.22GB | | [gemma-2b.Q3_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K.gguf) | Q3_K | 1.29GB | | [gemma-2b.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K_M.gguf) | Q3_K_M | 1.29GB | | [gemma-2b.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K_L.gguf) | Q3_K_L | 1.36GB | | [gemma-2b.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ4_XS.gguf) | IQ4_XS | 1.4GB | | [gemma-2b.Q4_0.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_0.gguf) | Q4_0 | 1.44GB | | [gemma-2b.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ4_NL.gguf) | IQ4_NL | 1.45GB | | [gemma-2b.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_K_S.gguf) | Q4_K_S | 1.45GB | | [gemma-2b.Q4_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_K.gguf) | Q4_K | 1.52GB | | [gemma-2b.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_K_M.gguf) | Q4_K_M | 1.52GB | | [gemma-2b.Q4_1.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_1.gguf) | Q4_1 | 1.56GB | | [gemma-2b.Q5_0.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_0.gguf) | Q5_0 | 1.68GB | | [gemma-2b.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_K_S.gguf) | Q5_K_S | 1.68GB | | [gemma-2b.Q5_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_K.gguf) | Q5_K | 1.71GB | | [gemma-2b.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_K_M.gguf) | Q5_K_M | 1.71GB | | [gemma-2b.Q5_1.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_1.gguf) | Q5_1 | 1.79GB | | [gemma-2b.Q6_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q6_K.gguf) | Q6_K | 1.92GB | | [gemma-2b.Q8_0.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q8_0.gguf) | Q8_0 | 2.49GB | Original model description: --- library_name: transformers tags: [] --- # Reupload of Gemma 2b base. Original readme below. # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the 2B base version of the Gemma model. You can also visit the model card of the [7B base model](https://huggingface.co/google/gemma-7b), [7B instruct model](https://huggingface.co/google/gemma-7b-it), and [2B instruct model](https://huggingface.co/google/gemma-2b-it). **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-2b-gg-hf) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Fine-tuning the model You can find fine-tuning scripts and notebook under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples) of [`google/gemma-7b`](https://huggingface.co/google/gemma-7b) repository. To adapt it to this model, simply change the model-id to `google/gemma-2b`. In that repository, we provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using QLoRA * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on English quotes dataset #### Running the model on a CPU ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(**input_text, return_tensors="pt") outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a GPU using different precisions * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto", torch_dtype=torch.float16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto", torch_dtype=torch.bfloat16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ml-pathways). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 59.7 | 51.8 | | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | - | 23 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | --------- | | **Average** | | **54.0** | **56.4** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 | | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 | | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 | | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 | | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 | | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 | | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 | | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 | | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 | | ------------------------------ | ------------- | ----------- | --------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gemma-2b - GGUF - Model creator: https://huggingface.co/mhenrichsen/ - Original model: https://huggingface.co/mhenrichsen/gemma-2b/ | Name | Quant method | Size | | ---- | ---- | ---- | | [gemma-2b.Q2_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q2_K.gguf) | Q2_K | 1.08GB | | [gemma-2b.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ3_XS.gguf) | IQ3_XS | 1.16GB | | [gemma-2b.IQ3_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ3_S.gguf) | IQ3_S | 1.2GB | | [gemma-2b.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K_S.gguf) | Q3_K_S | 1.2GB | | [gemma-2b.IQ3_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ3_M.gguf) | IQ3_M | 1.22GB | | [gemma-2b.Q3_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K.gguf) | Q3_K | 1.29GB | | [gemma-2b.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K_M.gguf) | Q3_K_M | 1.29GB | | [gemma-2b.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q3_K_L.gguf) | Q3_K_L | 1.36GB | | [gemma-2b.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ4_XS.gguf) | IQ4_XS | 1.4GB | | [gemma-2b.Q4_0.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_0.gguf) | Q4_0 | 1.44GB | | [gemma-2b.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.IQ4_NL.gguf) | IQ4_NL | 1.45GB | | [gemma-2b.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_K_S.gguf) | Q4_K_S | 1.45GB | | [gemma-2b.Q4_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_K.gguf) | Q4_K | 1.52GB | | [gemma-2b.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_K_M.gguf) | Q4_K_M | 1.52GB | | [gemma-2b.Q4_1.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q4_1.gguf) | Q4_1 | 1.56GB | | [gemma-2b.Q5_0.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_0.gguf) | Q5_0 | 1.68GB | | [gemma-2b.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_K_S.gguf) | Q5_K_S | 1.68GB | | [gemma-2b.Q5_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_K.gguf) | Q5_K | 1.71GB | | [gemma-2b.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_K_M.gguf) | Q5_K_M | 1.71GB | | [gemma-2b.Q5_1.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q5_1.gguf) | Q5_1 | 1.79GB | | [gemma-2b.Q6_K.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q6_K.gguf) | Q6_K | 1.92GB | | [gemma-2b.Q8_0.gguf](https://huggingface.co/RichardErkhov/mhenrichsen_-_gemma-2b-gguf/blob/main/gemma-2b.Q8_0.gguf) | Q8_0 | 2.49GB | Original model description: --- library_name: transformers tags: [] --- # Reupload of Gemma 2b base. Original readme below. # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the 2B base version of the Gemma model. You can also visit the model card of the [7B base model](https://huggingface.co/google/gemma-7b), [7B instruct model](https://huggingface.co/google/gemma-7b-it), and [2B instruct model](https://huggingface.co/google/gemma-2b-it). **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-2b-gg-hf) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Fine-tuning the model You can find fine-tuning scripts and notebook under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples) of [`google/gemma-7b`](https://huggingface.co/google/gemma-7b) repository. To adapt it to this model, simply change the model-id to `google/gemma-2b`. In that repository, we provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using QLoRA * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on English quotes dataset #### Running the model on a CPU ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(**input_text, return_tensors="pt") outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a GPU using different precisions * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto", torch_dtype=torch.float16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto", torch_dtype=torch.bfloat16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ml-pathways). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 59.7 | 51.8 | | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | - | 23 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | --------- | | **Average** | | **54.0** | **56.4** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 | | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 | | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 | | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 | | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 | | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 | | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 | | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 | | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 | | ------------------------------ | ------------- | ----------- | --------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
{}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
40,299
lucienbaumgartner/setfit-paraphrase-mpnet-base-v2-sst2
lucienbaumgartner
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:sentence-transformers/paraphrase-mpnet-base-v2", "base_model:finetune:sentence-transformers/paraphrase-mpnet-base-v2", "model-index", "region:us" ]
2024-04-12T11:57:23Z
2024-04-12T11:57:44+00:00
6
0
--- base_model: sentence-transformers/paraphrase-mpnet-base-v2 library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: 'this is a story of two misfits who do n''t stand a chance alone , but together they are magnificent . ' - text: 'it does n''t believe in itself , it has no sense of humor ... it ''s just plain bored . ' - text: 'the band ''s courage in the face of official repression is inspiring , especially for aging hippies ( this one included ) . ' - text: 'a fast , funny , highly enjoyable movie . ' - text: 'the movie achieves as great an impact by keeping these thoughts hidden as ... ( quills ) did by showing them . ' inference: true model-index: - name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2 results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.8562176165803109 name: Accuracy --- # SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:---------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | negative | <ul><li>'stale and uninspired . '</li><li>"the film 's considered approach to its subject matter is too calm and thoughtful for agitprop , and the thinness of its characterizations makes it a failure as straight drama . ' "</li><li>"that their charm does n't do a load of good "</li></ul> | | positive | <ul><li>"broomfield is energized by volletta wallace 's maternal fury , her fearlessness "</li><li>'flawless '</li><li>'insightfully written , delicately performed '</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.8562 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("setfit_model_id") # Run inference preds = model("a fast , funny , highly enjoyable movie . ") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 2 | 11.4375 | 33 | | Label | Training Sample Count | |:---------|:----------------------| | negative | 8 | | positive | 8 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (4, 4) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: True ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:-------:|:------:|:-------------:|:---------------:| | 0.1111 | 1 | 0.2235 | - | | 1.0 | 9 | - | 0.2204 | | 2.0 | 18 | - | 0.1786 | | **3.0** | **27** | **-** | **0.1728** | | 4.0 | 36 | - | 0.1754 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.12.1 - SetFit: 1.0.3 - Sentence Transformers: 2.6.1 - Transformers: 4.39.3 - PyTorch: 2.2.2 - Datasets: 2.18.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:---------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | negative | <ul><li>'stale and uninspired . '</li><li>"the film 's considered approach to its subject matter is too calm and thoughtful for agitprop , and the thinness of its characterizations makes it a failure as straight drama . ' "</li><li>"that their charm does n't do a load of good "</li></ul> | | positive | <ul><li>"broomfield is energized by volletta wallace 's maternal fury , her fearlessness "</li><li>'flawless '</li><li>'insightfully written , delicately performed '</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.8562 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("setfit_model_id") # Run inference preds = model("a fast , funny , highly enjoyable movie . ") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 2 | 11.4375 | 33 | | Label | Training Sample Count | |:---------|:----------------------| | negative | 8 | | positive | 8 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (4, 4) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: True ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:-------:|:------:|:-------------:|:---------------:| | 0.1111 | 1 | 0.2235 | - | | 1.0 | 9 | - | 0.2204 | | 2.0 | 18 | - | 0.1786 | | **3.0** | **27** | **-** | **0.1728** | | 4.0 | 36 | - | 0.1754 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.12.1 - SetFit: 1.0.3 - Sentence Transformers: 2.6.1 - Transformers: 4.39.3 - PyTorch: 2.2.2 - Datasets: 2.18.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/paraphrase-mpnet-base-v2", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "this is a story of two misfits who do n't stand a chance alone , but together they are magnificent . "}, {"text": "it does n't believe in itself , it has no sense of humor ... it 's just plain bored . "}, {"text": "the band 's courage in the face of official repression is inspiring , especially for aging hippies ( this one included ) . "}, {"text": "a fast , funny , highly enjoyable movie . "}, {"text": "the movie achieves as great an impact by keeping these thoughts hidden as ... ( quills ) did by showing them . "}], "inference": true, "model-index": [{"name": "SetFit with sentence-transformers/paraphrase-mpnet-base-v2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.8562176165803109, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,300
tonyc666/text_summarization-Q4_K_M-GGUF
tonyc666
summarization
[ "gguf", "llama-cpp", "gguf-my-repo", "summarization", "en", "base_model:Falconsai/text_summarization", "base_model:quantized:Falconsai/text_summarization", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2025-01-29T10:16:31Z
2025-01-29T10:16:33+00:00
14
0
--- base_model: Falconsai/text_summarization language: - en license: apache-2.0 pipeline_tag: summarization tags: - llama-cpp - gguf-my-repo widget: - text: 'Hugging Face: Revolutionizing Natural Language Processing Introduction In the rapidly evolving field of Natural Language Processing (NLP), Hugging Face has emerged as a prominent and innovative force. This article will explore the story and significance of Hugging Face, a company that has made remarkable contributions to NLP and AI as a whole. From its inception to its role in democratizing AI, Hugging Face has left an indelible mark on the industry. The Birth of Hugging Face Hugging Face was founded in 2016 by Clément Delangue, Julien Chaumond, and Thomas Wolf. The name Hugging Face was chosen to reflect the company''s mission of making AI models more accessible and friendly to humans, much like a comforting hug. Initially, they began as a chatbot company but later shifted their focus to NLP, driven by their belief in the transformative potential of this technology. Transformative Innovations Hugging Face is best known for its open-source contributions, particularly the Transformers library. This library has become the de facto standard for NLP and enables researchers, developers, and organizations to easily access and utilize state-of-the-art pre-trained language models, such as BERT, GPT-3, and more. These models have countless applications, from chatbots and virtual assistants to language translation and sentiment analysis. ' example_title: Summarization Example 1 --- # tonyc666/text_summarization-Q4_K_M-GGUF This model was converted to GGUF format from [`Falconsai/text_summarization`](https://huggingface.co/Falconsai/text_summarization) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Falconsai/text_summarization) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -c 2048 ```
null
Non_BioNLP
# tonyc666/text_summarization-Q4_K_M-GGUF This model was converted to GGUF format from [`Falconsai/text_summarization`](https://huggingface.co/Falconsai/text_summarization) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Falconsai/text_summarization) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo tonyc666/text_summarization-Q4_K_M-GGUF --hf-file text_summarization-q4_k_m.gguf -c 2048 ```
{"base_model": "Falconsai/text_summarization", "language": ["en"], "license": "apache-2.0", "pipeline_tag": "summarization", "tags": ["llama-cpp", "gguf-my-repo"], "widget": [{"text": "Hugging Face: Revolutionizing Natural Language Processing Introduction In the rapidly evolving field of Natural Language Processing (NLP), Hugging Face has emerged as a prominent and innovative force. This article will explore the story and significance of Hugging Face, a company that has made remarkable contributions to NLP and AI as a whole. From its inception to its role in democratizing AI, Hugging Face has left an indelible mark on the industry. The Birth of Hugging Face Hugging Face was founded in 2016 by Clément Delangue, Julien Chaumond, and Thomas Wolf. The name Hugging Face was chosen to reflect the company's mission of making AI models more accessible and friendly to humans, much like a comforting hug. Initially, they began as a chatbot company but later shifted their focus to NLP, driven by their belief in the transformative potential of this technology. Transformative Innovations Hugging Face is best known for its open-source contributions, particularly the Transformers library. This library has become the de facto standard for NLP and enables researchers, developers, and organizations to easily access and utilize state-of-the-art pre-trained language models, such as BERT, GPT-3, and more. These models have countless applications, from chatbots and virtual assistants to language translation and sentiment analysis. "}], "example_title": "Summarization Example 1"}
task
[ "TRANSLATION", "SUMMARIZATION" ]
40,301
allenai/OLMo-7B-Instruct-hf
allenai
text-generation
[ "transformers", "pytorch", "safetensors", "olmo", "text-generation", "conversational", "en", "dataset:allenai/dolma", "dataset:allenai/tulu-v2-sft-mixture", "dataset:allenai/ultrafeedback_binarized_cleaned", "arxiv:2402.00838", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-04T19:54:24Z
2024-10-26T00:22:12+00:00
2,173
3
--- datasets: - allenai/dolma - allenai/tulu-v2-sft-mixture - allenai/ultrafeedback_binarized_cleaned language: - en license: apache-2.0 --- <img src="https://allenai.org/olmo/olmo-7b-animation.gif" alt="OLMo Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Model Card for OLMo 7B Instruct <!-- Provide a quick summary of what the model is/does. --> OLMo is a series of **O**pen **L**anguage **Mo**dels designed to enable the science of language models. The OLMo base models are trained on the [Dolma](https://huggingface.co/datasets/allenai/dolma) dataset. The adapted versions are trained on the [Tulu SFT mixture](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) and, for the Instruct version, a [cleaned version of the UltraFeedback dataset](https://huggingface.co/datasets/allenai/ultrafeedback_binarized_cleaned). We release all code, checkpoints, logs, and details involved in training these models. OLMo 7B Instruct and OLMo SFT are two adapted versions of these models trained for better question answering. They show the performance gain that OLMo base models can achieve with existing fine-tuning techniques. **This version is for direct use with HuggingFace Transformers** from v4.40 on. **Note that this is the instruct variant of the original OLMo release - there are newer (better) versions of OLMo out now! Check [here](https://huggingface.co/collections/allenai/olmo-suite-65aeaae8fe5b6b2122b46778) for newer releases.** ## Model Details We release two adapted model versions: The base models related to this adapted model are the following: | Model | Training Method(s) | Datasets | Context Length | |------|--------|---------|--| | [OLMo 7B SFT](https://huggingface.co/allenai/OLMo-7B-SFT) | SFT | [Tulu 2 SFT Mix](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) | 2048 | | [OLMo 7B Instruct](https://huggingface.co/allenai/OLMo-7B-Instruct) | SFT + DPO | [Tulu 2 SFT Mix](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) + [Ultrafeedback Cleaned](https://huggingface.co/datasets/allenai/ultrafeedback_binarized_cleaned) | 2048 | The base models related to this adapted model are the following: | Size | Training Tokens | Layers | Hidden Size | Attention Heads | Context Length | |------|--------|---------|-------------|-----------------|----------------| | [OLMo 1B](https://huggingface.co/allenai/OLMo-1B) | 3 Trillion |16 | 2048 | 16 | 2048 | | [OLMo 7B](https://huggingface.co/allenai/OLMo-7B) | 2.5 Trillion | 32 | 4096 | 32 | 2048 | | [OLMo 7B Twin 2T](https://huggingface.co/allenai/OLMo-7B-Twin-2T) | 2 Trillion | 32 | 4096 | 32 | 2048 | ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** Allen Institute for AI (AI2) - **Supported by:** Databricks, Kempner Institute for the Study of Natural and Artificial Intelligence at Harvard University, AMD, CSC (Lumi Supercomputer), UW - **Model type:** a Transformer style autoregressive language model. - **Language(s) (NLP):** English - **License:** The code and model are released under Apache 2.0. - **Contact:** Technical inquiries: `olmo at allenai dot org`. Press: `press at allenai dot org` - **Date cutoff:** Feb./March 2023 based on Dolma dataset version. ### Model Sources <!-- Provide the basic links for the model. --> - **Project Page:** https://allenai.org/olmo - **Repositories:** - Core repo (training, inference, fine-tuning etc.): https://github.com/allenai/OLMo - Evaluation code: https://github.com/allenai/OLMo-Eval - Further fine-tuning code: https://github.com/allenai/open-instruct - **Paper:** [Link](https://arxiv.org/abs/2402.00838) - **Technical blog post:** https://blog.allenai.org/olmo-open-language-model-87ccfc95f580 - **W&B Logs:** https://wandb.ai/ai2-llm/OLMo-7B/reports/OLMo-7B--Vmlldzo2NzQyMzk5 <!-- - **Press release:** TODO --> ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Inference You can load and run this model as usual so long as your HuggingFace version is >= 4.40: ```python from transformers import AutoModelForCausalLM, AutoTokenizer olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-7B-Instruct-hf") tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B-Instruct-hf") message = [{"role": "user", "content": "What is 2+2?"}] inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") # optional verifying cuda # inputs = {k: v.to('cuda') for k,v in inputs.items()} # olmo = olmo.to('cuda') response = olmo.generate(**inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) print(tokenizer.batch_decode(response, skip_special_tokens=True)[0]) >> '2+2=4' ``` ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> Core model results for the 7B adapted models are found below. | Model | MMLU 0-shot ↑ | AlpacaEval %win ↑ | ToxiGen % Toxic ↓ | TruthfulQA %Info+True ↑ | |-----------------------|---------------|--------------------|--------------------|-------------------------| | **OLMo (base)** | 28.3 | - | 81.4 | 31.6 | | MPT Chat | 33.8 | 46.8 | 0.1 | 42.7 | | Falcon Instruct | 25.2 | 14.0 | 70.7 | 27.2 | | RPJ-INCITE Chat | 27.0 | 38.0 | 46.4 | 53.0 | | Llama-2-Chat 7B | 46.8 | 87.3 | 0.0 | 26.3 | | AI2 Tulu 2 7B | 50.4 | 73.9 | 7.0 | 51.7 | | AI2 Tulu 2 7B DPO | 50.7 | 85.1 | 0.5 | - * | | **[OLMo 7B SFT](https://huggingface.co/allenai/OLMo-7B-SFT)** | 47.3 | 57.0 | 14.4 | 41.2 | | **[OLMo 7B Instruct](https://huggingface.co/allenai/OLMo-7B-Instruct)** | 46.2 | 69.3 | 1.7 | 52.0 | *Following Ivison et al. 2023, we do not report Tulu 2 TruthfulQA scores due to test set contamination. ## Model Details ### Data For training data details, please see the [Dolma](https://huggingface.co/datasets/allenai/dolma), [Tulu 2](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture), and [UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) documentation. ### Architecture ### Hyperparameters The hyperparameters for the two phases of training are below: | | Learning Rate | Beta | Epochs | Warmup | Weight Decay | Gradient Clipping | Maximum Sequence Length | |-------------------------|---------------|------|--------|------------------------------------------------------------------------|--------------|-------------------|-------------------------| | **SFT** | 2 × 10^-6 | N/A | 3 | Linear warmup for the first 3% of total training time, then cooldown to 0 | 0 | 0 | 2048 | | **DPO** | 5 × 10^-7 | 0.1 | 3 | Linear warmup for the first 10% of total training time, then cooldown to 0| 0 | 0 | 2048 | Compared to Tulu 2, DPO hyperparameters are the same. SFT is lower LR and 3 epochs instead of 2 (and 2k length instead of 8k). ## Bias, Risks, and Limitations This adapted OLMo model is a research artifact. It is intended to benefit the research community interested in understanding the safety properties of LLMs and developers building safety tools for LLMs. For this reason, the model does not include a specific safety filter or safety training data. While our model scores well relative to its peers on ToxiGen, it is possible for the model to generate harmful and sensitive content from some user prompts. We recommend developers exercise caution and consider the risks of the applications of this technology. Furthermore, developers should consider implementing safeguards for biases, privacy, and other potential harms when appropriate. Finally, as with every LLM, OLMo may produce factual-sounding outputs that may not be true, so developers and users are encouraged to confirm such outputs before relying on them. All users of this model are responsible for how they use the model. ## Citation **BibTeX:** ``` @article{Groeneveld2023OLMo, title={OLMo: Accelerating the Science of Language Models}, author={Groeneveld, Dirk and Beltagy, Iz and Walsh, Pete and Bhagia, Akshita and Kinney, Rodney and Tafjord, Oyvind and Jha, Ananya Harsh and Ivison, Hamish and Magnusson, Ian and Wang, Yizhong and Arora, Shane and Atkinson, David and Authur, Russell and Chandu, Khyathi and Cohan, Arman and Dumas, Jennifer and Elazar, Yanai and Gu, Yuling and Hessel, Jack and Khot, Tushar and Merrill, William and Morrison, Jacob and Muennighoff, Niklas and Naik, Aakanksha and Nam, Crystal and Peters, Matthew E. and Pyatkin, Valentina and Ravichander, Abhilasha and Schwenk, Dustin and Shah, Saurabh and Smith, Will and Subramani, Nishant and Wortsman, Mitchell and Dasigi, Pradeep and Lambert, Nathan and Richardson, Kyle and Dodge, Jesse and Lo, Kyle and Soldaini, Luca and Smith, Noah A. and Hajishirzi, Hannaneh}, journal={Preprint}, year={2024} } ``` **APA:** Groeneveld, D., Beltagy, I., Walsh, P., Bhagia, A., Kinney, R., Tafjord, O., Jha, A., Ivison, H., Magnusson, I., Wang, Y., Arora, S., Atkinson, D., Authur, R., Chandu, K., Cohan, A., Dumas, J., Elazar, Y., Gu, Y., Hessel, J., Khot, T., Merrill, W., Morrison, J., Muennighoff, N., Naik, A., Nam, C., Peters, M., Pyatkin, V., Ravichander, A., Schwenk, D., Shah, S., Smith, W., Subramani, N., Wortsman, M., Dasigi, P., Lambert, N., Richardson, K., Dodge, J., Lo, K., Soldaini, L., Smith, N., & Hajishirzi, H. (2024). OLMo: Accelerating the Science of Language Models. Preprint. ## Model Card Contact For errors in this model card, contact Nathan or Jacob, `{nathanl, jacobm} at allenai dot org`.
null
Non_BioNLP
<img src="https://allenai.org/olmo/olmo-7b-animation.gif" alt="OLMo Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Model Card for OLMo 7B Instruct <!-- Provide a quick summary of what the model is/does. --> OLMo is a series of **O**pen **L**anguage **Mo**dels designed to enable the science of language models. The OLMo base models are trained on the [Dolma](https://huggingface.co/datasets/allenai/dolma) dataset. The adapted versions are trained on the [Tulu SFT mixture](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) and, for the Instruct version, a [cleaned version of the UltraFeedback dataset](https://huggingface.co/datasets/allenai/ultrafeedback_binarized_cleaned). We release all code, checkpoints, logs, and details involved in training these models. OLMo 7B Instruct and OLMo SFT are two adapted versions of these models trained for better question answering. They show the performance gain that OLMo base models can achieve with existing fine-tuning techniques. **This version is for direct use with HuggingFace Transformers** from v4.40 on. **Note that this is the instruct variant of the original OLMo release - there are newer (better) versions of OLMo out now! Check [here](https://huggingface.co/collections/allenai/olmo-suite-65aeaae8fe5b6b2122b46778) for newer releases.** ## Model Details We release two adapted model versions: The base models related to this adapted model are the following: | Model | Training Method(s) | Datasets | Context Length | |------|--------|---------|--| | [OLMo 7B SFT](https://huggingface.co/allenai/OLMo-7B-SFT) | SFT | [Tulu 2 SFT Mix](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) | 2048 | | [OLMo 7B Instruct](https://huggingface.co/allenai/OLMo-7B-Instruct) | SFT + DPO | [Tulu 2 SFT Mix](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) + [Ultrafeedback Cleaned](https://huggingface.co/datasets/allenai/ultrafeedback_binarized_cleaned) | 2048 | The base models related to this adapted model are the following: | Size | Training Tokens | Layers | Hidden Size | Attention Heads | Context Length | |------|--------|---------|-------------|-----------------|----------------| | [OLMo 1B](https://huggingface.co/allenai/OLMo-1B) | 3 Trillion |16 | 2048 | 16 | 2048 | | [OLMo 7B](https://huggingface.co/allenai/OLMo-7B) | 2.5 Trillion | 32 | 4096 | 32 | 2048 | | [OLMo 7B Twin 2T](https://huggingface.co/allenai/OLMo-7B-Twin-2T) | 2 Trillion | 32 | 4096 | 32 | 2048 | ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** Allen Institute for AI (AI2) - **Supported by:** Databricks, Kempner Institute for the Study of Natural and Artificial Intelligence at Harvard University, AMD, CSC (Lumi Supercomputer), UW - **Model type:** a Transformer style autoregressive language model. - **Language(s) (NLP):** English - **License:** The code and model are released under Apache 2.0. - **Contact:** Technical inquiries: `olmo at allenai dot org`. Press: `press at allenai dot org` - **Date cutoff:** Feb./March 2023 based on Dolma dataset version. ### Model Sources <!-- Provide the basic links for the model. --> - **Project Page:** https://allenai.org/olmo - **Repositories:** - Core repo (training, inference, fine-tuning etc.): https://github.com/allenai/OLMo - Evaluation code: https://github.com/allenai/OLMo-Eval - Further fine-tuning code: https://github.com/allenai/open-instruct - **Paper:** [Link](https://arxiv.org/abs/2402.00838) - **Technical blog post:** https://blog.allenai.org/olmo-open-language-model-87ccfc95f580 - **W&B Logs:** https://wandb.ai/ai2-llm/OLMo-7B/reports/OLMo-7B--Vmlldzo2NzQyMzk5 <!-- - **Press release:** TODO --> ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Inference You can load and run this model as usual so long as your HuggingFace version is >= 4.40: ```python from transformers import AutoModelForCausalLM, AutoTokenizer olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-7B-Instruct-hf") tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B-Instruct-hf") message = [{"role": "user", "content": "What is 2+2?"}] inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") # optional verifying cuda # inputs = {k: v.to('cuda') for k,v in inputs.items()} # olmo = olmo.to('cuda') response = olmo.generate(**inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) print(tokenizer.batch_decode(response, skip_special_tokens=True)[0]) >> '2+2=4' ``` ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> Core model results for the 7B adapted models are found below. | Model | MMLU 0-shot ↑ | AlpacaEval %win ↑ | ToxiGen % Toxic ↓ | TruthfulQA %Info+True ↑ | |-----------------------|---------------|--------------------|--------------------|-------------------------| | **OLMo (base)** | 28.3 | - | 81.4 | 31.6 | | MPT Chat | 33.8 | 46.8 | 0.1 | 42.7 | | Falcon Instruct | 25.2 | 14.0 | 70.7 | 27.2 | | RPJ-INCITE Chat | 27.0 | 38.0 | 46.4 | 53.0 | | Llama-2-Chat 7B | 46.8 | 87.3 | 0.0 | 26.3 | | AI2 Tulu 2 7B | 50.4 | 73.9 | 7.0 | 51.7 | | AI2 Tulu 2 7B DPO | 50.7 | 85.1 | 0.5 | - * | | **[OLMo 7B SFT](https://huggingface.co/allenai/OLMo-7B-SFT)** | 47.3 | 57.0 | 14.4 | 41.2 | | **[OLMo 7B Instruct](https://huggingface.co/allenai/OLMo-7B-Instruct)** | 46.2 | 69.3 | 1.7 | 52.0 | *Following Ivison et al. 2023, we do not report Tulu 2 TruthfulQA scores due to test set contamination. ## Model Details ### Data For training data details, please see the [Dolma](https://huggingface.co/datasets/allenai/dolma), [Tulu 2](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture), and [UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) documentation. ### Architecture ### Hyperparameters The hyperparameters for the two phases of training are below: | | Learning Rate | Beta | Epochs | Warmup | Weight Decay | Gradient Clipping | Maximum Sequence Length | |-------------------------|---------------|------|--------|------------------------------------------------------------------------|--------------|-------------------|-------------------------| | **SFT** | 2 × 10^-6 | N/A | 3 | Linear warmup for the first 3% of total training time, then cooldown to 0 | 0 | 0 | 2048 | | **DPO** | 5 × 10^-7 | 0.1 | 3 | Linear warmup for the first 10% of total training time, then cooldown to 0| 0 | 0 | 2048 | Compared to Tulu 2, DPO hyperparameters are the same. SFT is lower LR and 3 epochs instead of 2 (and 2k length instead of 8k). ## Bias, Risks, and Limitations This adapted OLMo model is a research artifact. It is intended to benefit the research community interested in understanding the safety properties of LLMs and developers building safety tools for LLMs. For this reason, the model does not include a specific safety filter or safety training data. While our model scores well relative to its peers on ToxiGen, it is possible for the model to generate harmful and sensitive content from some user prompts. We recommend developers exercise caution and consider the risks of the applications of this technology. Furthermore, developers should consider implementing safeguards for biases, privacy, and other potential harms when appropriate. Finally, as with every LLM, OLMo may produce factual-sounding outputs that may not be true, so developers and users are encouraged to confirm such outputs before relying on them. All users of this model are responsible for how they use the model. ## Citation **BibTeX:** ``` @article{Groeneveld2023OLMo, title={OLMo: Accelerating the Science of Language Models}, author={Groeneveld, Dirk and Beltagy, Iz and Walsh, Pete and Bhagia, Akshita and Kinney, Rodney and Tafjord, Oyvind and Jha, Ananya Harsh and Ivison, Hamish and Magnusson, Ian and Wang, Yizhong and Arora, Shane and Atkinson, David and Authur, Russell and Chandu, Khyathi and Cohan, Arman and Dumas, Jennifer and Elazar, Yanai and Gu, Yuling and Hessel, Jack and Khot, Tushar and Merrill, William and Morrison, Jacob and Muennighoff, Niklas and Naik, Aakanksha and Nam, Crystal and Peters, Matthew E. and Pyatkin, Valentina and Ravichander, Abhilasha and Schwenk, Dustin and Shah, Saurabh and Smith, Will and Subramani, Nishant and Wortsman, Mitchell and Dasigi, Pradeep and Lambert, Nathan and Richardson, Kyle and Dodge, Jesse and Lo, Kyle and Soldaini, Luca and Smith, Noah A. and Hajishirzi, Hannaneh}, journal={Preprint}, year={2024} } ``` **APA:** Groeneveld, D., Beltagy, I., Walsh, P., Bhagia, A., Kinney, R., Tafjord, O., Jha, A., Ivison, H., Magnusson, I., Wang, Y., Arora, S., Atkinson, D., Authur, R., Chandu, K., Cohan, A., Dumas, J., Elazar, Y., Gu, Y., Hessel, J., Khot, T., Merrill, W., Morrison, J., Muennighoff, N., Naik, A., Nam, C., Peters, M., Pyatkin, V., Ravichander, A., Schwenk, D., Shah, S., Smith, W., Subramani, N., Wortsman, M., Dasigi, P., Lambert, N., Richardson, K., Dodge, J., Lo, K., Soldaini, L., Smith, N., & Hajishirzi, H. (2024). OLMo: Accelerating the Science of Language Models. Preprint. ## Model Card Contact For errors in this model card, contact Nathan or Jacob, `{nathanl, jacobm} at allenai dot org`.
{"datasets": ["allenai/dolma", "allenai/tulu-v2-sft-mixture", "allenai/ultrafeedback_binarized_cleaned"], "language": ["en"], "license": "apache-2.0"}
task
[ "QUESTION_ANSWERING" ]
40,302
google/t5-efficient-small-el2
google
text2text-generation
[ "transformers", "pytorch", "tf", "jax", "t5", "text2text-generation", "deep-narrow", "en", "dataset:c4", "arxiv:2109.10686", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
2022-03-02T23:29:05Z
2023-01-24T16:48:54+00:00
115
0
--- datasets: - c4 language: - en license: apache-2.0 tags: - deep-narrow inference: false --- # T5-Efficient-SMALL-EL2 (Deep-Narrow version) T5-Efficient-SMALL-EL2 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-el2** - is of model type **Small** with the following variations: - **el** is **2** It has **47.93** million parameters and thus requires *ca.* **191.72 MB** of memory in full precision (*fp32*) or **95.86 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
null
Non_BioNLP
# T5-Efficient-SMALL-EL2 (Deep-Narrow version) T5-Efficient-SMALL-EL2 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-el2** - is of model type **Small** with the following variations: - **el** is **2** It has **47.93** million parameters and thus requires *ca.* **191.72 MB** of memory in full precision (*fp32*) or **95.86 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
{"datasets": ["c4"], "language": ["en"], "license": "apache-2.0", "tags": ["deep-narrow"], "inference": false}
task
[ "TEXT_CLASSIFICATION", "QUESTION_ANSWERING", "SUMMARIZATION" ]
40,303
helinivan/english-sarcasm-detector
helinivan
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "sarcasm-detection", "en", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-11-04T18:41:55Z
2022-12-04T20:04:44+00:00
8,012
11
--- language: en tags: - bert - sarcasm-detection - text-classification widget: - text: CIA Realizes It's Been Using Black Highlighters All These Years. --- # English Sarcasm Detector English Sarcasm Detector is a text classification model built to detect sarcasm from news article titles. It is fine-tuned on [bert-base-uncased](https://huggingface.co/bert-base-uncased) and the training data consists of ready-made dataset available on Kaggle. <b>Labels</b>: 0 -> Not Sarcastic; 1 -> Sarcastic ## Source Data Datasets: - English language data: [Kaggle: News Headlines Dataset For Sarcasm Detection](https://www.kaggle.com/datasets/rmisra/news-headlines-dataset-for-sarcasm-detection). ## Training Dataset - [helinivan/sarcasm_headlines_multilingual](https://huggingface.co/datasets/helinivan/sarcasm_headlines_multilingual) ## Codebase: - Git Repo: [Official repository](https://github.com/helinivan/multilingual-sarcasm-detector). --- ## Example of classification ```python from transformers import AutoModelForSequenceClassification from transformers import AutoTokenizer import string def preprocess_data(text: str) -> str: return text.lower().translate(str.maketrans("", "", string.punctuation)).strip() MODEL_PATH = "helinivan/english-sarcasm-detector" tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH) text = "CIA Realizes It's Been Using Black Highlighters All These Years." tokenized_text = tokenizer([preprocess_data(text)], padding=True, truncation=True, max_length=256, return_tensors="pt") output = model(**tokenized_text) probs = output.logits.softmax(dim=-1).tolist()[0] confidence = max(probs) prediction = probs.index(confidence) results = {"is_sarcastic": prediction, "confidence": confidence} ``` Output: ``` {'is_sarcastic': 1, 'confidence': 0.9337034225463867} ``` ## Performance | Model-Name | F1 | Precision | Recall | Accuracy | ------------- |:-------------| -----| -----| ----| | [helinivan/english-sarcasm-detector ](https://huggingface.co/helinivan/english-sarcasm-detector)| **92.38** | 92.75 | 92.38 | 92.42 | [helinivan/italian-sarcasm-detector ](https://huggingface.co/helinivan/italian-sarcasm-detector) | 88.26 | 87.66 | 89.66 | 88.69 | [helinivan/multilingual-sarcasm-detector ](https://huggingface.co/helinivan/multilingual-sarcasm-detector) | 87.23 | 88.65 | 86.33 | 88.30 | [helinivan/dutch-sarcasm-detector ](https://huggingface.co/helinivan/dutch-sarcasm-detector) | 83.02 | 84.27 | 82.01 | 86.81
null
Non_BioNLP
# English Sarcasm Detector English Sarcasm Detector is a text classification model built to detect sarcasm from news article titles. It is fine-tuned on [bert-base-uncased](https://huggingface.co/bert-base-uncased) and the training data consists of ready-made dataset available on Kaggle. <b>Labels</b>: 0 -> Not Sarcastic; 1 -> Sarcastic ## Source Data Datasets: - English language data: [Kaggle: News Headlines Dataset For Sarcasm Detection](https://www.kaggle.com/datasets/rmisra/news-headlines-dataset-for-sarcasm-detection). ## Training Dataset - [helinivan/sarcasm_headlines_multilingual](https://huggingface.co/datasets/helinivan/sarcasm_headlines_multilingual) ## Codebase: - Git Repo: [Official repository](https://github.com/helinivan/multilingual-sarcasm-detector). --- ## Example of classification ```python from transformers import AutoModelForSequenceClassification from transformers import AutoTokenizer import string def preprocess_data(text: str) -> str: return text.lower().translate(str.maketrans("", "", string.punctuation)).strip() MODEL_PATH = "helinivan/english-sarcasm-detector" tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH) text = "CIA Realizes It's Been Using Black Highlighters All These Years." tokenized_text = tokenizer([preprocess_data(text)], padding=True, truncation=True, max_length=256, return_tensors="pt") output = model(**tokenized_text) probs = output.logits.softmax(dim=-1).tolist()[0] confidence = max(probs) prediction = probs.index(confidence) results = {"is_sarcastic": prediction, "confidence": confidence} ``` Output: ``` {'is_sarcastic': 1, 'confidence': 0.9337034225463867} ``` ## Performance | Model-Name | F1 | Precision | Recall | Accuracy | ------------- |:-------------| -----| -----| ----| | [helinivan/english-sarcasm-detector ](https://huggingface.co/helinivan/english-sarcasm-detector)| **92.38** | 92.75 | 92.38 | 92.42 | [helinivan/italian-sarcasm-detector ](https://huggingface.co/helinivan/italian-sarcasm-detector) | 88.26 | 87.66 | 89.66 | 88.69 | [helinivan/multilingual-sarcasm-detector ](https://huggingface.co/helinivan/multilingual-sarcasm-detector) | 87.23 | 88.65 | 86.33 | 88.30 | [helinivan/dutch-sarcasm-detector ](https://huggingface.co/helinivan/dutch-sarcasm-detector) | 83.02 | 84.27 | 82.01 | 86.81
{"language": "en", "tags": ["bert", "sarcasm-detection", "text-classification"], "widget": [{"text": "CIA Realizes It's Been Using Black Highlighters All These Years."}]}
task
[ "TEXT_CLASSIFICATION" ]
40,304
nikitakapitan/bert-base-uncased-finetuned-glue_sst2
nikitakapitan
text-classification
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:google-bert/bert-base-uncased", "base_model:finetune:google-bert/bert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-22T13:08:50Z
2023-11-22T18:16:02+00:00
16
0
--- base_model: bert-base-uncased datasets: - glue license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: bert-base-uncased-finetuned-glue_sst2 results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: sst2 split: validation args: sst2 metrics: - type: accuracy value: 0.9243119266055045 name: Accuracy - type: f1 value: 0.9243119266055045 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-glue_sst2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.3392 - Accuracy: 0.9243 - F1: 0.9243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:| | 0.1818 | 1.0 | 4210 | 0.2472 | 0.9209 | 0.9208 | | 0.1268 | 2.0 | 8420 | 0.3171 | 0.9232 | 0.9232 | | 0.0826 | 3.0 | 12630 | 0.3392 | 0.9243 | 0.9243 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-glue_sst2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.3392 - Accuracy: 0.9243 - F1: 0.9243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:| | 0.1818 | 1.0 | 4210 | 0.2472 | 0.9209 | 0.9208 | | 0.1268 | 2.0 | 8420 | 0.3171 | 0.9232 | 0.9232 | | 0.0826 | 3.0 | 12630 | 0.3392 | 0.9243 | 0.9243 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
{"base_model": "bert-base-uncased", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-uncased-finetuned-glue_sst2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "config": "sst2", "split": "validation", "args": "sst2"}, "metrics": [{"type": "accuracy", "value": 0.9243119266055045, "name": "Accuracy"}, {"type": "f1", "value": 0.9243119266055045, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,305
gabrielmbmb/SmolLM-1.7B-Instruct-Summarization-Adapter_r32_alpha64_lr3e-4_rslorafalse
gabrielmbmb
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:HuggingFaceTB/SmolLM-1.7B-Instruct", "base_model:adapter:HuggingFaceTB/SmolLM-1.7B-Instruct", "license:apache-2.0", "region:us" ]
2024-10-16T16:52:45Z
2024-10-16T18:21:25+00:00
15
0
--- base_model: HuggingFaceTB/SmolLM-1.7B-Instruct library_name: peft license: apache-2.0 tags: - trl - sft - generated_from_trainer model-index: - name: SmolLM-1.7B-Instruct-Summarization-Adapter_r32_alpha64_lr3e-4_rslorafalse results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # SmolLM-1.7B-Instruct-Summarization-Adapter_r32_alpha64_lr3e-4_rslorafalse This model is a fine-tuned version of [HuggingFaceTB/SmolLM-1.7B-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM-1.7B-Instruct) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.7541 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.7291 | 1.0 | 1266 | 1.7541 | ### Framework versions - PEFT 0.13.2 - Transformers 4.45.2 - Pytorch 2.4.1+cu121 - Datasets 3.0.1 - Tokenizers 0.20.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # SmolLM-1.7B-Instruct-Summarization-Adapter_r32_alpha64_lr3e-4_rslorafalse This model is a fine-tuned version of [HuggingFaceTB/SmolLM-1.7B-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM-1.7B-Instruct) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.7541 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.7291 | 1.0 | 1266 | 1.7541 | ### Framework versions - PEFT 0.13.2 - Transformers 4.45.2 - Pytorch 2.4.1+cu121 - Datasets 3.0.1 - Tokenizers 0.20.1
{"base_model": "HuggingFaceTB/SmolLM-1.7B-Instruct", "library_name": "peft", "license": "apache-2.0", "tags": ["trl", "sft", "generated_from_trainer"], "model-index": [{"name": "SmolLM-1.7B-Instruct-Summarization-Adapter_r32_alpha64_lr3e-4_rslorafalse", "results": []}]}
task
[ "SUMMARIZATION" ]
40,306
Ioanaaaaaaa/bert-base-uncased-with-preprocess-finetuned-emotion-3-epochs-5e-05-renamed
Ioanaaaaaaa
text-classification
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-16T12:06:40Z
2023-06-16T12:17:07+00:00
8
0
--- datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: bert-base-uncased-with-preprocess-finetuned-emotion-3-epochs-5e-05-renamed results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.942 name: Accuracy - type: f1 value: 0.9420914957548232 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-with-preprocess-finetuned-emotion-3-epochs-5e-05-renamed This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1279 - Accuracy: 0.942 - F1: 0.9421 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5277 | 1.0 | 250 | 0.2037 | 0.926 | 0.9257 | | 0.141 | 2.0 | 500 | 0.1352 | 0.9385 | 0.9387 | | 0.0912 | 3.0 | 750 | 0.1279 | 0.942 | 0.9421 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-with-preprocess-finetuned-emotion-3-epochs-5e-05-renamed This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1279 - Accuracy: 0.942 - F1: 0.9421 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5277 | 1.0 | 250 | 0.2037 | 0.926 | 0.9257 | | 0.141 | 2.0 | 500 | 0.1352 | 0.9385 | 0.9387 | | 0.0912 | 3.0 | 750 | 0.1279 | 0.942 | 0.9421 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.0 - Tokenizers 0.13.3
{"datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-uncased-with-preprocess-finetuned-emotion-3-epochs-5e-05-renamed", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.942, "name": "Accuracy"}, {"type": "f1", "value": 0.9420914957548232, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,307
RUCAIBox/mvp-question-answering
RUCAIBox
text2text-generation
[ "transformers", "pytorch", "mvp", "text-generation", "text2text-generation", "en", "arxiv:2206.12131", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2022-06-02T11:54:54Z
2022-06-27T02:28:05+00:00
91
2
--- language: - en license: apache-2.0 pipeline_tag: text2text-generation tags: - text-generation - text2text-generation widget: - text: 'Answer the following question: From which country did Angola achieve independence in 1975?' example_title: Example1 - text: 'Answer the following question: what is ce certified [X_SEP] The CE marking is the manufacturer''s declaration that the product meets the requirements of the applicable EC directives. Officially, CE is an abbreviation of Conformite Conformité, europeenne Européenne Meaning. european conformity' example_title: Example2 --- # MVP-question-answering The MVP-question-answering model was proposed in [**MVP: Multi-task Supervised Pre-training for Natural Language Generation**](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. The detailed information and instructions can be found [https://github.com/RUCAIBox/MVP](https://github.com/RUCAIBox/MVP). ## Model Description MVP-question-answering is a prompt-based model that MVP is further equipped with prompts pre-trained using labeled question answering datasets. It is a variant (MVP+S) of our [MVP](https://huggingface.co/RUCAIBox/mvp) [MVP](https://huggingface.co/RUCAIBox/mvp) model. It follows a Transformer encoder-decoder architecture with layer-wise prompts. MVP-question-answering is specially designed for question answering tasks, such as reading comprehension (SQuAD), conversational question answering (CoQA) and closed-book question-answering (Natural Questions). ## Example ```python >>> from transformers import MvpTokenizer, MvpForConditionalGeneration >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp-question-answering") >>> inputs = tokenizer( ... "Answer the following question: From which country did Angola achieve independence in 1975?", ... return_tensors="pt", ... ) >>> generated_ids = model.generate(**inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ['Portugal'] ``` ## Related Models **MVP**: [https://huggingface.co/RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp). **Prompt-based models**: - MVP-multi-task: [https://huggingface.co/RUCAIBox/mvp-multi-task](https://huggingface.co/RUCAIBox/mvp-multi-task). - MVP-summarization: [https://huggingface.co/RUCAIBox/mvp-summarization](https://huggingface.co/RUCAIBox/mvp-summarization). - MVP-open-dialog: [https://huggingface.co/RUCAIBox/mvp-open-dialog](https://huggingface.co/RUCAIBox/mvp-open-dialog). - MVP-data-to-text: [https://huggingface.co/RUCAIBox/mvp-data-to-text](https://huggingface.co/RUCAIBox/mvp-data-to-text). - MVP-story: [https://huggingface.co/RUCAIBox/mvp-story](https://huggingface.co/RUCAIBox/mvp-story). - MVP-question-answering: [https://huggingface.co/RUCAIBox/mvp-question-answering](https://huggingface.co/RUCAIBox/mvp-question-answering). - MVP-question-generation: [https://huggingface.co/RUCAIBox/mvp-question-generation](https://huggingface.co/RUCAIBox/mvp-question-generation). - MVP-task-dialog: [https://huggingface.co/RUCAIBox/mvp-task-dialog](https://huggingface.co/RUCAIBox/mvp-task-dialog). **Multi-task models**: - MTL-summarization: [https://huggingface.co/RUCAIBox/mtl-summarization](https://huggingface.co/RUCAIBox/mtl-summarization). - MTL-open-dialog: [https://huggingface.co/RUCAIBox/mtl-open-dialog](https://huggingface.co/RUCAIBox/mtl-open-dialog). - MTL-data-to-text: [https://huggingface.co/RUCAIBox/mtl-data-to-text](https://huggingface.co/RUCAIBox/mtl-data-to-text). - MTL-story: [https://huggingface.co/RUCAIBox/mtl-story](https://huggingface.co/RUCAIBox/mtl-story). - MTL-question-answering: [https://huggingface.co/RUCAIBox/mtl-question-answering](https://huggingface.co/RUCAIBox/mtl-question-answering). - MTL-question-generation: [https://huggingface.co/RUCAIBox/mtl-question-generation](https://huggingface.co/RUCAIBox/mtl-question-generation). - MTL-task-dialog: [https://huggingface.co/RUCAIBox/mtl-task-dialog](https://huggingface.co/RUCAIBox/mtl-task-dialog). ## Citation ```bibtex @article{tang2022mvp, title={MVP: Multi-task Supervised Pre-training for Natural Language Generation}, author={Tang, Tianyi and Li, Junyi and Zhao, Wayne Xin and Wen, Ji-Rong}, journal={arXiv preprint arXiv:2206.12131}, year={2022}, url={https://arxiv.org/abs/2206.12131}, } ```
null
Non_BioNLP
# MVP-question-answering The MVP-question-answering model was proposed in [**MVP: Multi-task Supervised Pre-training for Natural Language Generation**](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. The detailed information and instructions can be found [https://github.com/RUCAIBox/MVP](https://github.com/RUCAIBox/MVP). ## Model Description MVP-question-answering is a prompt-based model that MVP is further equipped with prompts pre-trained using labeled question answering datasets. It is a variant (MVP+S) of our [MVP](https://huggingface.co/RUCAIBox/mvp) [MVP](https://huggingface.co/RUCAIBox/mvp) model. It follows a Transformer encoder-decoder architecture with layer-wise prompts. MVP-question-answering is specially designed for question answering tasks, such as reading comprehension (SQuAD), conversational question answering (CoQA) and closed-book question-answering (Natural Questions). ## Example ```python >>> from transformers import MvpTokenizer, MvpForConditionalGeneration >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp-question-answering") >>> inputs = tokenizer( ... "Answer the following question: From which country did Angola achieve independence in 1975?", ... return_tensors="pt", ... ) >>> generated_ids = model.generate(**inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ['Portugal'] ``` ## Related Models **MVP**: [https://huggingface.co/RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp). **Prompt-based models**: - MVP-multi-task: [https://huggingface.co/RUCAIBox/mvp-multi-task](https://huggingface.co/RUCAIBox/mvp-multi-task). - MVP-summarization: [https://huggingface.co/RUCAIBox/mvp-summarization](https://huggingface.co/RUCAIBox/mvp-summarization). - MVP-open-dialog: [https://huggingface.co/RUCAIBox/mvp-open-dialog](https://huggingface.co/RUCAIBox/mvp-open-dialog). - MVP-data-to-text: [https://huggingface.co/RUCAIBox/mvp-data-to-text](https://huggingface.co/RUCAIBox/mvp-data-to-text). - MVP-story: [https://huggingface.co/RUCAIBox/mvp-story](https://huggingface.co/RUCAIBox/mvp-story). - MVP-question-answering: [https://huggingface.co/RUCAIBox/mvp-question-answering](https://huggingface.co/RUCAIBox/mvp-question-answering). - MVP-question-generation: [https://huggingface.co/RUCAIBox/mvp-question-generation](https://huggingface.co/RUCAIBox/mvp-question-generation). - MVP-task-dialog: [https://huggingface.co/RUCAIBox/mvp-task-dialog](https://huggingface.co/RUCAIBox/mvp-task-dialog). **Multi-task models**: - MTL-summarization: [https://huggingface.co/RUCAIBox/mtl-summarization](https://huggingface.co/RUCAIBox/mtl-summarization). - MTL-open-dialog: [https://huggingface.co/RUCAIBox/mtl-open-dialog](https://huggingface.co/RUCAIBox/mtl-open-dialog). - MTL-data-to-text: [https://huggingface.co/RUCAIBox/mtl-data-to-text](https://huggingface.co/RUCAIBox/mtl-data-to-text). - MTL-story: [https://huggingface.co/RUCAIBox/mtl-story](https://huggingface.co/RUCAIBox/mtl-story). - MTL-question-answering: [https://huggingface.co/RUCAIBox/mtl-question-answering](https://huggingface.co/RUCAIBox/mtl-question-answering). - MTL-question-generation: [https://huggingface.co/RUCAIBox/mtl-question-generation](https://huggingface.co/RUCAIBox/mtl-question-generation). - MTL-task-dialog: [https://huggingface.co/RUCAIBox/mtl-task-dialog](https://huggingface.co/RUCAIBox/mtl-task-dialog). ## Citation ```bibtex @article{tang2022mvp, title={MVP: Multi-task Supervised Pre-training for Natural Language Generation}, author={Tang, Tianyi and Li, Junyi and Zhao, Wayne Xin and Wen, Ji-Rong}, journal={arXiv preprint arXiv:2206.12131}, year={2022}, url={https://arxiv.org/abs/2206.12131}, } ```
{"language": ["en"], "license": "apache-2.0", "pipeline_tag": "text2text-generation", "tags": ["text-generation", "text2text-generation"], "widget": [{"text": "Answer the following question: From which country did Angola achieve independence in 1975?", "example_title": "Example1"}, {"text": "Answer the following question: what is ce certified [X_SEP] The CE marking is the manufacturer's declaration that the product meets the requirements of the applicable EC directives. Officially, CE is an abbreviation of Conformite Conformité, europeenne Européenne Meaning. european conformity", "example_title": "Example2"}]}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
40,308
SoyGema/english-hindi-bleu
SoyGema
text2text-generation
[ "transformers", "pytorch", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "en", "hi", "dataset:opus100", "base_model:google-t5/t5-small", "base_model:finetune:google-t5/t5-small", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-09-02T21:56:18Z
2023-09-14T16:06:42+00:00
20
0
--- base_model: t5-small datasets: - opus100 language: - en - hi license: apache-2.0 metrics: - bleu tags: - generated_from_trainer model-index: - name: english-hindi-bleu results: - task: type: translation name: Translation dataset: name: opus100 en-hi type: opus100 config: en-hi split: validation args: en-hi metrics: - type: bleu value: 0.0 name: Bleu --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # english-hindi-bleu This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the opus100 en-hi dataset. It achieves the following results on the evaluation set: - Loss: 0.0804 - Bleu: 0.0 - Gen Len: 69.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.32.0.dev0 - Pytorch 2.0.1 - Datasets 2.14.4 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # english-hindi-bleu This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the opus100 en-hi dataset. It achieves the following results on the evaluation set: - Loss: 0.0804 - Bleu: 0.0 - Gen Len: 69.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.32.0.dev0 - Pytorch 2.0.1 - Datasets 2.14.4 - Tokenizers 0.13.3
{"base_model": "t5-small", "datasets": ["opus100"], "language": ["en", "hi"], "license": "apache-2.0", "metrics": ["bleu"], "tags": ["generated_from_trainer"], "model-index": [{"name": "english-hindi-bleu", "results": [{"task": {"type": "translation", "name": "Translation"}, "dataset": {"name": "opus100 en-hi", "type": "opus100", "config": "en-hi", "split": "validation", "args": "en-hi"}, "metrics": [{"type": "bleu", "value": 0.0, "name": "Bleu"}]}]}]}
task
[ "TRANSLATION" ]
40,309
peter520416/custom_summarization_dataset
peter520416
null
[ "region:us" ]
2024-09-19T19:09:06Z
2024-09-19T19:09:57+00:00
0
0
--- {} --- # Dataset Card for Custom Text Dataset ## Dataset Name Custom Text Dataset for Summarization ## Overview This dataset contains articles and their corresponding summaries, created specifically for text summarization tasks. It is designed to train and evaluate models that can generate concise summaries from longer pieces of text. The dataset is based on publicly available news articles from various sources. - **Task**: Text Summarization - **Language**: English - **Number of samples**: Approximately 10,000 articles with summaries - **Sources**: News articles from online sources, including technology, politics, and health categories. ## Composition - **Columns**: - `article`: Full text of the news article. - `summary`: A concise summary or highlights of the article. - **Data types**: - Both columns are strings, where the article can be of arbitrary length and the summary is typically much shorter (1-3 sentences). - **Dataset split**: - **Training set**: 8,000 samples - **Validation set**: 1,000 samples - **Test set**: 1,000 samples ## Collection Process The dataset was curated by scraping news articles from publicly available sources. We selected a wide range of articles to cover various domains such as politics, technology, sports, and health. After collection, articles were manually paired with summaries to ensure accuracy. - **Scraping tools used**: BeautifulSoup, Selenium - **Date range**: Articles were collected from 2019 to 2021. ## Preprocessing The dataset was preprocessed to clean and normalize the text data: - **Tokenization**: All text was tokenized into sentences and words using `nltk`. - **Lowercasing**: All text was converted to lowercase for consistency. - **Removal of noise**: Non-alphabetic characters, HTML tags, and URLs were removed. - **Summary trimming**: Summaries longer than 3 sentences were shortened to focus on key points. ## How to Use To use the dataset for text summarization tasks, you can load it using popular data handling libraries such as `pandas` or `datasets`. Here's an example of loading and using the dataset for a summarization task: ```python import pandas as pd # Load the dataset from CSV df = pd.read_csv("path/to/custom_text_dataset.csv") # Display first few rows print(df.head()) # Example usage for a text summarization model article = df['article'][0] summary = df['summary'][0] print("Article:", article) print("Summary:", summary) ``` ## Evaluation The dataset was evaluated using automatic summarization metrics such as ROUGE and BLEU. Summarization models trained on this dataset were evaluated on a separate test set. ROUGE-1: 43.0 ROUGE-2: 21.0 BLEU-4: 10.5 These scores indicate how well the generated summaries match the human-written summaries in the dataset. ## Limitations The dataset contains only English articles, so it is not applicable to non-English text summarization tasks. Due to the focus on news articles, the model may not generalize well to other domains like legal or medical text. Summaries may occasionally omit nuanced information due to manual summarization. ## Ethical Considerations Data bias: Articles were scraped from specific sources, which may introduce bias depending on the perspective of the original publisher. Content accuracy: Summaries are human-written but may still contain errors or subjective interpretations of the articles. Data privacy: All data was collected from publicly available sources, and no private or sensitive data was used in the dataset creation. Use of dataset: The dataset should not be used to create misleading or false summaries that could misinform the public.
null
Non_BioNLP
# Dataset Card for Custom Text Dataset ## Dataset Name Custom Text Dataset for Summarization ## Overview This dataset contains articles and their corresponding summaries, created specifically for text summarization tasks. It is designed to train and evaluate models that can generate concise summaries from longer pieces of text. The dataset is based on publicly available news articles from various sources. - **Task**: Text Summarization - **Language**: English - **Number of samples**: Approximately 10,000 articles with summaries - **Sources**: News articles from online sources, including technology, politics, and health categories. ## Composition - **Columns**: - `article`: Full text of the news article. - `summary`: A concise summary or highlights of the article. - **Data types**: - Both columns are strings, where the article can be of arbitrary length and the summary is typically much shorter (1-3 sentences). - **Dataset split**: - **Training set**: 8,000 samples - **Validation set**: 1,000 samples - **Test set**: 1,000 samples ## Collection Process The dataset was curated by scraping news articles from publicly available sources. We selected a wide range of articles to cover various domains such as politics, technology, sports, and health. After collection, articles were manually paired with summaries to ensure accuracy. - **Scraping tools used**: BeautifulSoup, Selenium - **Date range**: Articles were collected from 2019 to 2021. ## Preprocessing The dataset was preprocessed to clean and normalize the text data: - **Tokenization**: All text was tokenized into sentences and words using `nltk`. - **Lowercasing**: All text was converted to lowercase for consistency. - **Removal of noise**: Non-alphabetic characters, HTML tags, and URLs were removed. - **Summary trimming**: Summaries longer than 3 sentences were shortened to focus on key points. ## How to Use To use the dataset for text summarization tasks, you can load it using popular data handling libraries such as `pandas` or `datasets`. Here's an example of loading and using the dataset for a summarization task: ```python import pandas as pd # Load the dataset from CSV df = pd.read_csv("path/to/custom_text_dataset.csv") # Display first few rows print(df.head()) # Example usage for a text summarization model article = df['article'][0] summary = df['summary'][0] print("Article:", article) print("Summary:", summary) ``` ## Evaluation The dataset was evaluated using automatic summarization metrics such as ROUGE and BLEU. Summarization models trained on this dataset were evaluated on a separate test set. ROUGE-1: 43.0 ROUGE-2: 21.0 BLEU-4: 10.5 These scores indicate how well the generated summaries match the human-written summaries in the dataset. ## Limitations The dataset contains only English articles, so it is not applicable to non-English text summarization tasks. Due to the focus on news articles, the model may not generalize well to other domains like legal or medical text. Summaries may occasionally omit nuanced information due to manual summarization. ## Ethical Considerations Data bias: Articles were scraped from specific sources, which may introduce bias depending on the perspective of the original publisher. Content accuracy: Summaries are human-written but may still contain errors or subjective interpretations of the articles. Data privacy: All data was collected from publicly available sources, and no private or sensitive data was used in the dataset creation. Use of dataset: The dataset should not be used to create misleading or false summaries that could misinform the public.
{}
task
[ "SUMMARIZATION" ]
40,310
prithivMLmods/Gaea-Opus-14B-Exp
prithivMLmods
text-generation
[ "transformers", "safetensors", "qwen2", "text-generation", "text-generation-inference", "Qwen", "14B", "Math", "conversational", "en", "base_model:prithivMLmods/Tucana-Opus-14B-r999", "base_model:finetune:prithivMLmods/Tucana-Opus-14B-r999", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-03-11T05:56:26Z
2025-03-14T02:47:51+00:00
247
2
--- base_model: - prithivMLmods/Tucana-Opus-14B-r999 language: - en library_name: transformers license: apache-2.0 pipeline_tag: text-generation tags: - text-generation-inference - Qwen - 14B - Math model-index: - name: Gaea-Opus-14B-Exp results: - task: type: text-generation name: Text Generation dataset: name: IFEval (0-Shot) type: wis-k/instruction-following-eval split: train args: num_few_shot: 0 metrics: - type: inst_level_strict_acc and prompt_level_strict_acc value: 59.56 name: averaged accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BBH (3-Shot) type: SaylorTwift/bbh split: test args: num_few_shot: 3 metrics: - type: acc_norm value: 50.51 name: normalized accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MATH Lvl 5 (4-Shot) type: lighteval/MATH-Hard split: test args: num_few_shot: 4 metrics: - type: exact_match value: 42.75 name: exact match source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GPQA (0-shot) type: Idavidrein/gpqa split: train args: num_few_shot: 0 metrics: - type: acc_norm value: 18.79 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MuSR (0-shot) type: TAUR-Lab/MuSR args: num_few_shot: 0 metrics: - type: acc_norm value: 20.17 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU-PRO (5-shot) type: TIGER-Lab/MMLU-Pro config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 48.9 name: accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp name: Open LLM Leaderboard --- ![ascasdc.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/qJXahhBL8d9Xyrwx8DLWE.png) # **Gaea-Opus-14B-Exp** Gaea-Opus-14B-Exp is based on the Qwen 2.5 14B modality architecture, designed to enhance the reasoning capabilities of 14B-parameter models. This model is optimized for general-purpose reasoning and answering, excelling in contextual understanding, logical deduction, and multi-step problem-solving. It has been fine-tuned using a long chain-of-thought reasoning model and specialized datasets to improve comprehension, structured responses, and conversational intelligence. ## **Key Improvements** 1. **Enhanced General Knowledge**: The model provides broad knowledge across various domains, improving capabilities in answering questions accurately and generating coherent responses. 2. **Improved Instruction Following**: Significant advancements in understanding and following complex instructions, generating structured responses, and maintaining coherence over extended interactions. 3. **Versatile Adaptability**: More resilient to diverse prompts, enhancing its ability to handle a wide range of topics and conversation styles, including open-ended and structured inquiries. 4. **Long-Context Support**: Supports up to 128K tokens for input context and can generate up to 8K tokens in a single output, making it ideal for detailed responses. 5. **Multilingual Proficiency**: Supports over 29 languages, including English, Chinese, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more. ## **Quickstart with transformers** Here is a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and generate content: ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "prithivMLmods/Gaea-Opus-14B-Exp" model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) prompt = "What are the key principles of general-purpose AI?" messages = [ {"role": "system", "content": "You are a helpful assistant capable of answering a wide range of questions."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) generated_ids = model.generate( **model_inputs, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ## **Intended Use** 1. **General-Purpose Reasoning**: Designed for broad applicability, assisting with logical reasoning, answering diverse questions, and solving general knowledge problems. 2. **Educational and Informational Assistance**: Suitable for providing explanations, summaries, and research-based responses for students, educators, and general users. 3. **Conversational AI and Chatbots**: Ideal for building intelligent conversational agents that require contextual understanding and dynamic response generation. 4. **Multilingual Applications**: Supports global communication, translations, and multilingual content generation. 5. **Structured Data Processing**: Capable of analyzing and generating structured outputs, such as tables and JSON, useful for data science and automation. 6. **Long-Form Content Generation**: Can generate extended responses, including articles, reports, and guides, maintaining coherence over large text outputs. ## **Limitations** 1. **Hardware Requirements**: Requires high-memory GPUs or TPUs due to its large parameter size and long-context support. 2. **Potential Bias in Responses**: While designed to be neutral, outputs may still reflect biases present in training data. 3. **Inconsistent Outputs in Creative Tasks**: May produce variable results in storytelling and highly subjective topics. 4. **Limited Real-World Awareness**: Does not have access to real-time events beyond its training cutoff. 5. **Error Propagation in Extended Outputs**: Minor errors in early responses may affect overall coherence in long-form outputs. 6. **Prompt Sensitivity**: The effectiveness of responses may depend on how well the input prompt is structured. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/prithivMLmods__Gaea-Opus-14B-Exp-details)! Summarized results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/contents/viewer/default/train?q=prithivMLmods%2FGaea-Opus-14B-Exp&sort[column]=Average%20%E2%AC%86%EF%B8%8F&sort[direction]=desc)! | Metric |Value (%)| |-------------------|--------:| |**Average** | 40.11| |IFEval (0-Shot) | 59.56| |BBH (3-Shot) | 50.51| |MATH Lvl 5 (4-Shot)| 42.75| |GPQA (0-shot) | 18.79| |MuSR (0-shot) | 20.17| |MMLU-PRO (5-shot) | 48.90|
null
Non_BioNLP
![ascasdc.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/qJXahhBL8d9Xyrwx8DLWE.png) # **Gaea-Opus-14B-Exp** Gaea-Opus-14B-Exp is based on the Qwen 2.5 14B modality architecture, designed to enhance the reasoning capabilities of 14B-parameter models. This model is optimized for general-purpose reasoning and answering, excelling in contextual understanding, logical deduction, and multi-step problem-solving. It has been fine-tuned using a long chain-of-thought reasoning model and specialized datasets to improve comprehension, structured responses, and conversational intelligence. ## **Key Improvements** 1. **Enhanced General Knowledge**: The model provides broad knowledge across various domains, improving capabilities in answering questions accurately and generating coherent responses. 2. **Improved Instruction Following**: Significant advancements in understanding and following complex instructions, generating structured responses, and maintaining coherence over extended interactions. 3. **Versatile Adaptability**: More resilient to diverse prompts, enhancing its ability to handle a wide range of topics and conversation styles, including open-ended and structured inquiries. 4. **Long-Context Support**: Supports up to 128K tokens for input context and can generate up to 8K tokens in a single output, making it ideal for detailed responses. 5. **Multilingual Proficiency**: Supports over 29 languages, including English, Chinese, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more. ## **Quickstart with transformers** Here is a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and generate content: ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "prithivMLmods/Gaea-Opus-14B-Exp" model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) prompt = "What are the key principles of general-purpose AI?" messages = [ {"role": "system", "content": "You are a helpful assistant capable of answering a wide range of questions."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) generated_ids = model.generate( **model_inputs, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ## **Intended Use** 1. **General-Purpose Reasoning**: Designed for broad applicability, assisting with logical reasoning, answering diverse questions, and solving general knowledge problems. 2. **Educational and Informational Assistance**: Suitable for providing explanations, summaries, and research-based responses for students, educators, and general users. 3. **Conversational AI and Chatbots**: Ideal for building intelligent conversational agents that require contextual understanding and dynamic response generation. 4. **Multilingual Applications**: Supports global communication, translations, and multilingual content generation. 5. **Structured Data Processing**: Capable of analyzing and generating structured outputs, such as tables and JSON, useful for data science and automation. 6. **Long-Form Content Generation**: Can generate extended responses, including articles, reports, and guides, maintaining coherence over large text outputs. ## **Limitations** 1. **Hardware Requirements**: Requires high-memory GPUs or TPUs due to its large parameter size and long-context support. 2. **Potential Bias in Responses**: While designed to be neutral, outputs may still reflect biases present in training data. 3. **Inconsistent Outputs in Creative Tasks**: May produce variable results in storytelling and highly subjective topics. 4. **Limited Real-World Awareness**: Does not have access to real-time events beyond its training cutoff. 5. **Error Propagation in Extended Outputs**: Minor errors in early responses may affect overall coherence in long-form outputs. 6. **Prompt Sensitivity**: The effectiveness of responses may depend on how well the input prompt is structured. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/prithivMLmods__Gaea-Opus-14B-Exp-details)! Summarized results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/contents/viewer/default/train?q=prithivMLmods%2FGaea-Opus-14B-Exp&sort[column]=Average%20%E2%AC%86%EF%B8%8F&sort[direction]=desc)! | Metric |Value (%)| |-------------------|--------:| |**Average** | 40.11| |IFEval (0-Shot) | 59.56| |BBH (3-Shot) | 50.51| |MATH Lvl 5 (4-Shot)| 42.75| |GPQA (0-shot) | 18.79| |MuSR (0-shot) | 20.17| |MMLU-PRO (5-shot) | 48.90|
{"base_model": ["prithivMLmods/Tucana-Opus-14B-r999"], "language": ["en"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "text-generation", "tags": ["text-generation-inference", "Qwen", "14B", "Math"], "model-index": [{"name": "Gaea-Opus-14B-Exp", "results": [{"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "IFEval (0-Shot)", "type": "wis-k/instruction-following-eval", "split": "train", "args": {"num_few_shot": 0}}, "metrics": [{"type": "inst_level_strict_acc and prompt_level_strict_acc", "value": 59.56, "name": "averaged accuracy"}], "source": {"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "BBH (3-Shot)", "type": "SaylorTwift/bbh", "split": "test", "args": {"num_few_shot": 3}}, "metrics": [{"type": "acc_norm", "value": 50.51, "name": "normalized accuracy"}], "source": {"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "MATH Lvl 5 (4-Shot)", "type": "lighteval/MATH-Hard", "split": "test", "args": {"num_few_shot": 4}}, "metrics": [{"type": "exact_match", "value": 42.75, "name": "exact match"}], "source": {"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "GPQA (0-shot)", "type": "Idavidrein/gpqa", "split": "train", "args": {"num_few_shot": 0}}, "metrics": [{"type": "acc_norm", "value": 18.79, "name": "acc_norm"}], "source": {"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "MuSR (0-shot)", "type": "TAUR-Lab/MuSR", "args": {"num_few_shot": 0}}, "metrics": [{"type": "acc_norm", "value": 20.17, "name": "acc_norm"}], "source": {"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "MMLU-PRO (5-shot)", "type": "TIGER-Lab/MMLU-Pro", "config": "main", "split": "test", "args": {"num_few_shot": 5}}, "metrics": [{"type": "acc", "value": 48.9, "name": "accuracy"}], "source": {"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=prithivMLmods%2FGaea-Opus-14B-Exp", "name": "Open LLM Leaderboard"}}]}]}
task
[ "TRANSLATION" ]
40,311
faisalahmad/summarizer2
faisalahmad
text2text-generation
[ "transformers", "pytorch", "pegasus", "text2text-generation", "autotrain", "en", "dataset:faisalahmad/autotrain-data-nsut-nlp-project-textsummarization", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-04-27T09:09:24Z
2022-04-28T17:48:14+00:00
112
0
--- datasets: - faisalahmad/autotrain-data-nsut-nlp-project-textsummarization language: en tags: - a - u - t - o - r - i - n widget: - text: I love AutoTrain 🤗 co2_eq_emissions: 4444.804304528572 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 791824381 - CO2 Emissions (in grams): 4444.804304528572 ## Validation Metrics - Loss: 1.4599040746688843 - Rouge1: 46.5461 - Rouge2: 23.8595 - RougeL: 38.526 - RougeLsum: 38.5219 - Gen Len: 23.468 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/faisalahmad/autotrain-nsut-nlp-project-textsummarization-791824381 ```
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 791824381 - CO2 Emissions (in grams): 4444.804304528572 ## Validation Metrics - Loss: 1.4599040746688843 - Rouge1: 46.5461 - Rouge2: 23.8595 - RougeL: 38.526 - RougeLsum: 38.5219 - Gen Len: 23.468 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/faisalahmad/autotrain-nsut-nlp-project-textsummarization-791824381 ```
{"datasets": ["faisalahmad/autotrain-data-nsut-nlp-project-textsummarization"], "language": "en", "tags": ["a", "u", "t", "o", "r", "i", "n"], "widget": [{"text": "I love AutoTrain 🤗"}], "co2_eq_emissions": 4444.804304528572}
task
[ "SUMMARIZATION" ]
40,312
Helsinki-NLP/opus-mt-fi-zne
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "fi", "zne", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T11:35:56+00:00
47
0
--- license: apache-2.0 tags: - translation --- ### opus-mt-fi-zne * source languages: fi * target languages: zne * OPUS readme: [fi-zne](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/fi-zne/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-09.zip](https://object.pouta.csc.fi/OPUS-MT-models/fi-zne/opus-2020-01-09.zip) * test set translations: [opus-2020-01-09.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/fi-zne/opus-2020-01-09.test.txt) * test set scores: [opus-2020-01-09.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/fi-zne/opus-2020-01-09.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.fi.zne | 22.7 | 0.464 |
null
Non_BioNLP
### opus-mt-fi-zne * source languages: fi * target languages: zne * OPUS readme: [fi-zne](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/fi-zne/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-09.zip](https://object.pouta.csc.fi/OPUS-MT-models/fi-zne/opus-2020-01-09.zip) * test set translations: [opus-2020-01-09.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/fi-zne/opus-2020-01-09.test.txt) * test set scores: [opus-2020-01-09.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/fi-zne/opus-2020-01-09.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.fi.zne | 22.7 | 0.464 |
{"license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
40,313
seduerr/pai-tl
seduerr
translation
[ "transformers", "pytorch", "t5", "text2text-generation", "summarization", "translation", "en", "fr", "ro", "de", "dataset:c4", "arxiv:1910.10683", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2021-04-06T05:37:09+00:00
166
0
--- datasets: - c4 language: - en - fr - ro - de license: apache-2.0 tags: - summarization - translation --- [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) Pretraining Dataset: [C4](https://huggingface.co/datasets/c4) Other Community Checkpoints: [here](https://huggingface.co/models?search=t5) Paper: [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) Authors: *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* ## Abstract Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new “Colossal Clean Crawled Corpus”, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code. ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876727255704b657944387750412e706e67)
null
Non_BioNLP
[Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) Pretraining Dataset: [C4](https://huggingface.co/datasets/c4) Other Community Checkpoints: [here](https://huggingface.co/models?search=t5) Paper: [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) Authors: *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* ## Abstract Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new “Colossal Clean Crawled Corpus”, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code. ![model image](https://camo.githubusercontent.com/623b4dea0b653f2ad3f36c71ebfe749a677ac0a1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f343030362f312a44304a31674e51663876727255704b657944387750412e706e67)
{"datasets": ["c4"], "language": ["en", "fr", "ro", "de"], "license": "apache-2.0", "tags": ["summarization", "translation"]}
task
[ "TEXT_CLASSIFICATION", "QUESTION_ANSWERING", "TRANSLATION", "SUMMARIZATION" ]
40,315
Intel/whisper-tiny-onnx-int4-inc
Intel
automatic-speech-recognition
[ "transformers", "onnx", "whisper", "automatic-speech-recognition", "ONNX", "Intel® Neural Compressor", "neural-compressor", "dataset:librispeech_asr", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2023-10-08T02:47:30Z
2023-10-16T08:36:43+00:00
46
6
--- datasets: - librispeech_asr library_name: transformers license: apache-2.0 metrics: - wer pipeline_tag: automatic-speech-recognition tags: - automatic-speech-recognition - ONNX - Intel® Neural Compressor - neural-compressor --- ## INT4 Whisper tiny ONNX Model Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains without the need for fine-tuning. This is the repository of INT4 weight only quantization for the Whisper tiny model in ONNX format, powered by [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and [Intel® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers). This INT4 ONNX model is generated by [Intel® Neural Compressor](https://github.com/intel/neural-compressor)'s weight-only quantization method. | Model Detail | Description | | ----------- | ----------- | | Model Authors - Company | Intel | | Date | October 8, 2023 | | Version | 1 | | Type | Speech Recognition | | Paper or Other Resources | - | | License | Apache 2.0 | | Questions or Comments | [Community Tab](https://huggingface.co/Intel/whisper-tiny-onnx-int4/discussions)| | Intended Use | Description | | ----------- | ----------- | | Primary intended uses | You can use the raw model for automatic speech recognition inference | | Primary intended users | Anyone doing automatic speech recognition inference | | Out-of-scope uses | This model in most cases will need to be fine-tuned for your particular task. The model should not be used to intentionally create hostile or alienating environments for people.| ### Export to ONNX Model The FP32 model is exported with openai/whisper-tiny: ```shell optimum-cli export onnx --model openai/whisper-tiny whisper-tiny-with-past/ --task automatic-speech-recognition-with-past --opset 13 ``` ### Install ONNX Runtime Install `onnxruntime>=1.16.0` to support [`MatMulFpQ4`](https://github.com/microsoft/onnxruntime/blob/v1.16.0/docs/ContribOperators.md#com.microsoft.MatMulFpQ4) operator. ### Run Quantization Build [Intel® Neural Compressor](https://github.com/intel/neural-compressor/tree/master) from master branch and run INT4 weight-only quantization. The weight-only quantization cofiguration is as below: | dtype | group_size | scheme | algorithm | | :----- | :---------- | :------ | :--------- | | INT4 | 32 | asym | RTN | We provide the key code below. For the complete script, please refer to [whisper example](https://github.com/intel/intel-extension-for-transformers/tree/main/examples/huggingface/onnxruntime/speech-recognition/quantization). ```python from neural_compressor import quantization, PostTrainingQuantConfig from neural_compressor.utils.constant import FP32 model_list = ['encoder_model.onnx', 'decoder_model.onnx', 'decoder_with_past_model.onnx'] for model in model_list: config = PostTrainingQuantConfig( approach="weight_only", calibration_sampling_size=[8], op_type_dict={".*": {"weight": {"bits": 4, "algorithm": ["RTN"], "scheme": ["asym"], "group_size": 32}}}, op_name_dict={'/proj_out/MatMul': FP32},) # fallback last matmul in decoder to FP32 q_model = quantization.fit( os.path.join("/path/to/whisper-tiny-with-past", model), # FP32 model path config, calib_dataloader=dataloader) q_model.save(os.path.join("/path/to/whisper-tiny-onnx-int4", model)) # INT4 model path ``` ### Evaluation **Operator Statistics** Below shows the operator statistics in the INT4 ONNX model: |Model| Op Type | Total | INT4 weight | FP32 weight | |:-------:|:-------:|:-------:|:-------:|:-------:| |encoder_model| MatMul | 32 | 24 | 8 | |decoder_model| MatMul | 57 | 40 | 17 | |decoder_with_past_model| MatMul | 49 | 32 | 17 | **Evaluation of wer** Evaluate the model on `librispeech_asr` dataset with below code: ```python import os from evaluate import load from datasets import load_dataset from transformers import WhisperForConditionalGeneration, WhisperProcessor, AutoConfig model_name = 'openai/whisper-tiny' model_path = 'whisper-tiny-onnx-int4' processor = WhisperProcessor.from_pretrained(model_name) model = WhisperForConditionalGeneration.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) wer = load("wer") librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") from optimum.onnxruntime import ORTModelForSpeechSeq2Seq from transformers import PretrainedConfig model_config = PretrainedConfig.from_pretrained(model_name) predictions = [] references = [] sessions = ORTModelForSpeechSeq2Seq.load_model( os.path.join(model_path, 'encoder_model.onnx'), os.path.join(model_path, 'decoder_model.onnx'), os.path.join(model_path, 'decoder_with_past_model.onnx')) model = ORTModelForSpeechSeq2Seq(sessions[0], sessions[1], model_config, model_path, sessions[2]) for idx, batch in enumerate(librispeech_test_clean): audio = batch["audio"] input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features reference = processor.tokenizer._normalize(batch['text']) references.append(reference) predicted_ids = model.generate(input_features)[0] transcription = processor.decode(predicted_ids) prediction = processor.tokenizer._normalize(transcription) predictions.append(prediction) wer_result = wer.compute(references=references, predictions=predictions) print(f"Result wer: {wer_result * 100}") ``` ## Metrics (Model Performance): | Model | Model Size (MB) | wer | |---|:---:|:---:| | FP32 |406|7.56| | INT4 |326|9.94|
null
Non_BioNLP
## INT4 Whisper tiny ONNX Model Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains without the need for fine-tuning. This is the repository of INT4 weight only quantization for the Whisper tiny model in ONNX format, powered by [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and [Intel® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers). This INT4 ONNX model is generated by [Intel® Neural Compressor](https://github.com/intel/neural-compressor)'s weight-only quantization method. | Model Detail | Description | | ----------- | ----------- | | Model Authors - Company | Intel | | Date | October 8, 2023 | | Version | 1 | | Type | Speech Recognition | | Paper or Other Resources | - | | License | Apache 2.0 | | Questions or Comments | [Community Tab](https://huggingface.co/Intel/whisper-tiny-onnx-int4/discussions)| | Intended Use | Description | | ----------- | ----------- | | Primary intended uses | You can use the raw model for automatic speech recognition inference | | Primary intended users | Anyone doing automatic speech recognition inference | | Out-of-scope uses | This model in most cases will need to be fine-tuned for your particular task. The model should not be used to intentionally create hostile or alienating environments for people.| ### Export to ONNX Model The FP32 model is exported with openai/whisper-tiny: ```shell optimum-cli export onnx --model openai/whisper-tiny whisper-tiny-with-past/ --task automatic-speech-recognition-with-past --opset 13 ``` ### Install ONNX Runtime Install `onnxruntime>=1.16.0` to support [`MatMulFpQ4`](https://github.com/microsoft/onnxruntime/blob/v1.16.0/docs/ContribOperators.md#com.microsoft.MatMulFpQ4) operator. ### Run Quantization Build [Intel® Neural Compressor](https://github.com/intel/neural-compressor/tree/master) from master branch and run INT4 weight-only quantization. The weight-only quantization cofiguration is as below: | dtype | group_size | scheme | algorithm | | :----- | :---------- | :------ | :--------- | | INT4 | 32 | asym | RTN | We provide the key code below. For the complete script, please refer to [whisper example](https://github.com/intel/intel-extension-for-transformers/tree/main/examples/huggingface/onnxruntime/speech-recognition/quantization). ```python from neural_compressor import quantization, PostTrainingQuantConfig from neural_compressor.utils.constant import FP32 model_list = ['encoder_model.onnx', 'decoder_model.onnx', 'decoder_with_past_model.onnx'] for model in model_list: config = PostTrainingQuantConfig( approach="weight_only", calibration_sampling_size=[8], op_type_dict={".*": {"weight": {"bits": 4, "algorithm": ["RTN"], "scheme": ["asym"], "group_size": 32}}}, op_name_dict={'/proj_out/MatMul': FP32},) # fallback last matmul in decoder to FP32 q_model = quantization.fit( os.path.join("/path/to/whisper-tiny-with-past", model), # FP32 model path config, calib_dataloader=dataloader) q_model.save(os.path.join("/path/to/whisper-tiny-onnx-int4", model)) # INT4 model path ``` ### Evaluation **Operator Statistics** Below shows the operator statistics in the INT4 ONNX model: |Model| Op Type | Total | INT4 weight | FP32 weight | |:-------:|:-------:|:-------:|:-------:|:-------:| |encoder_model| MatMul | 32 | 24 | 8 | |decoder_model| MatMul | 57 | 40 | 17 | |decoder_with_past_model| MatMul | 49 | 32 | 17 | **Evaluation of wer** Evaluate the model on `librispeech_asr` dataset with below code: ```python import os from evaluate import load from datasets import load_dataset from transformers import WhisperForConditionalGeneration, WhisperProcessor, AutoConfig model_name = 'openai/whisper-tiny' model_path = 'whisper-tiny-onnx-int4' processor = WhisperProcessor.from_pretrained(model_name) model = WhisperForConditionalGeneration.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) wer = load("wer") librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") from optimum.onnxruntime import ORTModelForSpeechSeq2Seq from transformers import PretrainedConfig model_config = PretrainedConfig.from_pretrained(model_name) predictions = [] references = [] sessions = ORTModelForSpeechSeq2Seq.load_model( os.path.join(model_path, 'encoder_model.onnx'), os.path.join(model_path, 'decoder_model.onnx'), os.path.join(model_path, 'decoder_with_past_model.onnx')) model = ORTModelForSpeechSeq2Seq(sessions[0], sessions[1], model_config, model_path, sessions[2]) for idx, batch in enumerate(librispeech_test_clean): audio = batch["audio"] input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features reference = processor.tokenizer._normalize(batch['text']) references.append(reference) predicted_ids = model.generate(input_features)[0] transcription = processor.decode(predicted_ids) prediction = processor.tokenizer._normalize(transcription) predictions.append(prediction) wer_result = wer.compute(references=references, predictions=predictions) print(f"Result wer: {wer_result * 100}") ``` ## Metrics (Model Performance): | Model | Model Size (MB) | wer | |---|:---:|:---:| | FP32 |406|7.56| | INT4 |326|9.94|
{"datasets": ["librispeech_asr"], "library_name": "transformers", "license": "apache-2.0", "metrics": ["wer"], "pipeline_tag": "automatic-speech-recognition", "tags": ["automatic-speech-recognition", "ONNX", "Intel® Neural Compressor", "neural-compressor"]}
task
[ "TRANSLATION" ]
40,316
kmin940/finetuning-sentiment-model-3000-samples
kmin940
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-07T15:41:33Z
2023-07-07T16:47:41+00:00
8
0
--- datasets: - imdb license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: finetuning-sentiment-model-3000-samples results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.8666666666666667 name: Accuracy - type: f1 value: 0.8726114649681529 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6766 - Accuracy: 0.8667 - F1: 0.8726 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu117 - Datasets 2.13.1 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6766 - Accuracy: 0.8667 - F1: 0.8726 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu117 - Datasets 2.13.1 - Tokenizers 0.13.3
{"datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "finetuning-sentiment-model-3000-samples", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.8666666666666667, "name": "Accuracy"}, {"type": "f1", "value": 0.8726114649681529, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,317
semindan/xnli_xlm_r_only_bg
semindan
text-classification
[ "transformers", "pytorch", "xlm-roberta", "text-classification", "generated_from_trainer", "dataset:xnli", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-12-12T02:30:00Z
2023-01-07T14:29:09+00:00
10
0
--- datasets: - xnli license: mit metrics: - accuracy tags: - text-classification - generated_from_trainer model-index: - name: xnli_xlm_r_only_bg results: - task: type: text-classification name: Text Classification dataset: name: xnli type: xnli config: bg split: train args: bg metrics: - type: accuracy value: 0.7839357429718875 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xnli_xlm_r_only_bg This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xnli dataset. It achieves the following results on the evaluation set: - Loss: 0.7896 - Accuracy: 0.7839 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.6649 | 1.0 | 3068 | 0.5678 | 0.7659 | | 0.5321 | 2.0 | 6136 | 0.5338 | 0.7932 | | 0.4668 | 3.0 | 9204 | 0.5648 | 0.7871 | | 0.4129 | 4.0 | 12272 | 0.5736 | 0.7835 | | 0.365 | 5.0 | 15340 | 0.5782 | 0.7964 | | 0.3202 | 6.0 | 18408 | 0.6482 | 0.7847 | | 0.2842 | 7.0 | 21476 | 0.6565 | 0.7900 | | 0.2533 | 8.0 | 24544 | 0.7211 | 0.7912 | | 0.2278 | 9.0 | 27612 | 0.7751 | 0.7815 | | 0.2102 | 10.0 | 30680 | 0.7896 | 0.7839 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0 - Datasets 2.6.1 - Tokenizers 0.13.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xnli_xlm_r_only_bg This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xnli dataset. It achieves the following results on the evaluation set: - Loss: 0.7896 - Accuracy: 0.7839 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.6649 | 1.0 | 3068 | 0.5678 | 0.7659 | | 0.5321 | 2.0 | 6136 | 0.5338 | 0.7932 | | 0.4668 | 3.0 | 9204 | 0.5648 | 0.7871 | | 0.4129 | 4.0 | 12272 | 0.5736 | 0.7835 | | 0.365 | 5.0 | 15340 | 0.5782 | 0.7964 | | 0.3202 | 6.0 | 18408 | 0.6482 | 0.7847 | | 0.2842 | 7.0 | 21476 | 0.6565 | 0.7900 | | 0.2533 | 8.0 | 24544 | 0.7211 | 0.7912 | | 0.2278 | 9.0 | 27612 | 0.7751 | 0.7815 | | 0.2102 | 10.0 | 30680 | 0.7896 | 0.7839 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0 - Datasets 2.6.1 - Tokenizers 0.13.1
{"datasets": ["xnli"], "license": "mit", "metrics": ["accuracy"], "tags": ["text-classification", "generated_from_trainer"], "model-index": [{"name": "xnli_xlm_r_only_bg", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "xnli", "type": "xnli", "config": "bg", "split": "train", "args": "bg"}, "metrics": [{"type": "accuracy", "value": 0.7839357429718875, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,318
datnth1709/VietAI-NLP-ITN
datnth1709
translation
[ "transformers", "pytorch", "encoder-decoder", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-09-22T19:22:12Z
2022-09-23T08:32:29+00:00
23
0
--- metrics: - bleu tags: - translation - generated_from_trainer model-index: - name: VietAI-NLP-ITN results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # VietAI-NLP-ITN This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4378 - Bleu: 81.8571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | |:-------------:|:-----:|:------:|:---------------:|:-------:| | 0.6529 | 1.0 | 31250 | 0.5660 | 78.7315 | | 0.5125 | 2.0 | 62500 | 0.4770 | 81.3979 | | 0.4798 | 3.0 | 93750 | 0.4554 | 81.6720 | | 0.4568 | 4.0 | 125000 | 0.4435 | 81.7753 | | 0.4387 | 5.0 | 156250 | 0.4378 | 81.8571 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.10.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # VietAI-NLP-ITN This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4378 - Bleu: 81.8571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | |:-------------:|:-----:|:------:|:---------------:|:-------:| | 0.6529 | 1.0 | 31250 | 0.5660 | 78.7315 | | 0.5125 | 2.0 | 62500 | 0.4770 | 81.3979 | | 0.4798 | 3.0 | 93750 | 0.4554 | 81.6720 | | 0.4568 | 4.0 | 125000 | 0.4435 | 81.7753 | | 0.4387 | 5.0 | 156250 | 0.4378 | 81.8571 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.10.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
{"metrics": ["bleu"], "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "VietAI-NLP-ITN", "results": []}]}
task
[ "TRANSLATION" ]
40,319
Intel/albert-base-v2-sst2-int8-static-inc
Intel
text-classification
[ "transformers", "pytorch", "onnx", "albert", "text-classification", "text-classfication", "int8", "Intel® Neural Compressor", "neural-compressor", "PostTrainingStatic", "en", "dataset:glue", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-04-13T07:26:59Z
2024-03-22T10:26:38+00:00
49
0
--- datasets: - glue language: - en license: apache-2.0 metrics: - accuracy tags: - text-classfication - int8 - Intel® Neural Compressor - neural-compressor - PostTrainingStatic model_index: - name: sst2 results: - task: name: Text Classification type: text-classification dataset: name: GLUE SST2 type: glue args: sst2 metric: name: Accuracy type: accuracy value: 0.9254587155963303 --- # INT8 albert-base-v2-sst2 ## Post-training static quantization ### PyTorch This is an INT8 PyTorch model quantized with [Intel® Neural Compressor](https://github.com/intel/neural-compressor). The original fp32 model comes from the fine-tuned model [Alireza1044/albert-base-v2-sst2](https://huggingface.co/Alireza1044/albert-base-v2-sst2). The calibration dataloader is the train dataloader. The default calibration sampling size 300 isn't divisible exactly by batch size 8, so the real sampling size is 304. The linear modules **albert.encoder.albert_layer_groups.0.albert_layers.0.ffn_output.module, albert.encoder.albert_layer_groups.0.albert_layers.0.ffn.module** fall back to fp32 to meet the 1% relative accuracy loss. #### Test result | |INT8|FP32| |---|:---:|:---:| | **Accuracy (eval-accuracy)** |0.9255|0.9232| | **Model size (MB)** |25|44.6| #### Load with Intel® Neural Compressor: ```python from optimum.intel import INCModelForSequenceClassification model_id = "Intel/albert-base-v2-sst2-int8-static" int8_model = INCModelForSequenceClassification.from_pretrained(model_id) ``` ### ONNX This is an INT8 ONNX model quantized with [Intel® Neural Compressor](https://github.com/intel/neural-compressor). The original fp32 model comes from the fine-tuned model [Alireza1044/albert-base-v2-sst2](https://huggingface.co/Alireza1044/albert-base-v2-sst2). The calibration dataloader is the eval dataloader. The calibration sampling size is 100. #### Test result | |INT8|FP32| |---|:---:|:---:| | **Accuracy (eval-accuracy)** |0.9140|0.9232| | **Model size (MB)** |50|45| #### Load ONNX model: ```python from optimum.onnxruntime import ORTModelForSequenceClassification model = ORTModelForSequenceClassification.from_pretrained('Intel/albert-base-v2-sst2-int8-static') ```
null
Non_BioNLP
# INT8 albert-base-v2-sst2 ## Post-training static quantization ### PyTorch This is an INT8 PyTorch model quantized with [Intel® Neural Compressor](https://github.com/intel/neural-compressor). The original fp32 model comes from the fine-tuned model [Alireza1044/albert-base-v2-sst2](https://huggingface.co/Alireza1044/albert-base-v2-sst2). The calibration dataloader is the train dataloader. The default calibration sampling size 300 isn't divisible exactly by batch size 8, so the real sampling size is 304. The linear modules **albert.encoder.albert_layer_groups.0.albert_layers.0.ffn_output.module, albert.encoder.albert_layer_groups.0.albert_layers.0.ffn.module** fall back to fp32 to meet the 1% relative accuracy loss. #### Test result | |INT8|FP32| |---|:---:|:---:| | **Accuracy (eval-accuracy)** |0.9255|0.9232| | **Model size (MB)** |25|44.6| #### Load with Intel® Neural Compressor: ```python from optimum.intel import INCModelForSequenceClassification model_id = "Intel/albert-base-v2-sst2-int8-static" int8_model = INCModelForSequenceClassification.from_pretrained(model_id) ``` ### ONNX This is an INT8 ONNX model quantized with [Intel® Neural Compressor](https://github.com/intel/neural-compressor). The original fp32 model comes from the fine-tuned model [Alireza1044/albert-base-v2-sst2](https://huggingface.co/Alireza1044/albert-base-v2-sst2). The calibration dataloader is the eval dataloader. The calibration sampling size is 100. #### Test result | |INT8|FP32| |---|:---:|:---:| | **Accuracy (eval-accuracy)** |0.9140|0.9232| | **Model size (MB)** |50|45| #### Load ONNX model: ```python from optimum.onnxruntime import ORTModelForSequenceClassification model = ORTModelForSequenceClassification.from_pretrained('Intel/albert-base-v2-sst2-int8-static') ```
{"datasets": ["glue"], "language": ["en"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["text-classfication", "int8", "Intel® Neural Compressor", "neural-compressor", "PostTrainingStatic"], "model_index": [{"name": "sst2", "results": [{"task": {"name": "Text Classification", "type": "text-classification"}, "dataset": {"name": "GLUE SST2", "type": "glue", "args": "sst2"}, "metric": {"name": "Accuracy", "type": "accuracy", "value": 0.9254587155963303}}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,321
voxmenthe/airoboros-34b-3.3-mlx-4bit
voxmenthe
null
[ "mlx", "safetensors", "llama", "dataset:jondurbin/airoboros-3.2", "dataset:bluemoon-fandom-1-1-rp-cleaned", "dataset:boolq", "dataset:jondurbin/gutenberg-dpo-v0.1", "dataset:LDJnr/Capybara", "dataset:jondurbin/cinematika-v0.1", "dataset:glaiveai/glaive-function-calling-v2", "dataset:grimulkan/LimaRP-augmented", "dataset:piqa", "dataset:Vezora/Tested-22k-Python-Alpaca", "dataset:mattpscott/airoboros-summarization", "dataset:unalignment/toxic-dpo-v0.2", "license:other", "region:us" ]
2024-04-05T04:18:46Z
2024-04-05T04:29:24+00:00
7
0
--- base_model: 01-ai/yi-34b-200k datasets: - jondurbin/airoboros-3.2 - bluemoon-fandom-1-1-rp-cleaned - boolq - jondurbin/gutenberg-dpo-v0.1 - LDJnr/Capybara - jondurbin/cinematika-v0.1 - glaiveai/glaive-function-calling-v2 - grimulkan/LimaRP-augmented - piqa - Vezora/Tested-22k-Python-Alpaca - mattpscott/airoboros-summarization - unalignment/toxic-dpo-v0.2 license: other license_name: yi-license license_link: https://huggingface.co/01-ai/Yi-34B-200K/blob/main/LICENSE tags: - mlx --- # voxmenthe/airoboros-34b-3.3-mlx-4bit This model was converted to MLX format from [`jondurbin/airoboros-34b-3.3`]() using mlx-lm version **0.6.0**. Refer to the [original model card](https://huggingface.co/jondurbin/airoboros-34b-3.3) for more details on the model. ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm import load, generate model, tokenizer = load("voxmenthe/airoboros-34b-3.3-mlx-4bit") response = generate(model, tokenizer, prompt="hello", verbose=True) ```
null
Non_BioNLP
# voxmenthe/airoboros-34b-3.3-mlx-4bit This model was converted to MLX format from [`jondurbin/airoboros-34b-3.3`]() using mlx-lm version **0.6.0**. Refer to the [original model card](https://huggingface.co/jondurbin/airoboros-34b-3.3) for more details on the model. ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm import load, generate model, tokenizer = load("voxmenthe/airoboros-34b-3.3-mlx-4bit") response = generate(model, tokenizer, prompt="hello", verbose=True) ```
{"base_model": "01-ai/yi-34b-200k", "datasets": ["jondurbin/airoboros-3.2", "bluemoon-fandom-1-1-rp-cleaned", "boolq", "jondurbin/gutenberg-dpo-v0.1", "LDJnr/Capybara", "jondurbin/cinematika-v0.1", "glaiveai/glaive-function-calling-v2", "grimulkan/LimaRP-augmented", "piqa", "Vezora/Tested-22k-Python-Alpaca", "mattpscott/airoboros-summarization", "unalignment/toxic-dpo-v0.2"], "license": "other", "license_name": "yi-license", "license_link": "https://huggingface.co/01-ai/Yi-34B-200K/blob/main/LICENSE", "tags": ["mlx"]}
task
[ "SUMMARIZATION" ]
40,322
infiniteregrets/tech-dialogue-summarization-3
infiniteregrets
text2text-generation
[ "transformers", "tensorboard", "safetensors", "bart", "text2text-generation", "generated_from_trainer", "base_model:lidiya/bart-large-xsum-samsum", "base_model:finetune:lidiya/bart-large-xsum-samsum", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-14T02:38:22Z
2023-11-14T02:40:01+00:00
7
0
--- base_model: lidiya/bart-large-xsum-samsum license: apache-2.0 metrics: - rouge tags: - generated_from_trainer model-index: - name: tech-dialogue-summarization-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tech-dialogue-summarization-3 This model is a fine-tuned version of [lidiya/bart-large-xsum-samsum](https://huggingface.co/lidiya/bart-large-xsum-samsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3468 - Rouge1: 52.6316 - Rouge2: 32.4324 - Rougel: 47.3684 - Rougelsum: 47.3684 - Gen Len: 38.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 1 | 1.4146 | 52.6316 | 32.4324 | 47.3684 | 47.3684 | 38.0 | | No log | 2.0 | 2 | 1.3652 | 52.6316 | 32.4324 | 47.3684 | 47.3684 | 38.0 | | No log | 3.0 | 3 | 1.3468 | 52.6316 | 32.4324 | 47.3684 | 47.3684 | 38.0 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tech-dialogue-summarization-3 This model is a fine-tuned version of [lidiya/bart-large-xsum-samsum](https://huggingface.co/lidiya/bart-large-xsum-samsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3468 - Rouge1: 52.6316 - Rouge2: 32.4324 - Rougel: 47.3684 - Rougelsum: 47.3684 - Gen Len: 38.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 1 | 1.4146 | 52.6316 | 32.4324 | 47.3684 | 47.3684 | 38.0 | | No log | 2.0 | 2 | 1.3652 | 52.6316 | 32.4324 | 47.3684 | 47.3684 | 38.0 | | No log | 3.0 | 3 | 1.3468 | 52.6316 | 32.4324 | 47.3684 | 47.3684 | 38.0 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
{"base_model": "lidiya/bart-large-xsum-samsum", "license": "apache-2.0", "metrics": ["rouge"], "tags": ["generated_from_trainer"], "model-index": [{"name": "tech-dialogue-summarization-3", "results": []}]}
task
[ "SUMMARIZATION" ]
40,323
TheBloke/airoboros-13B-1.1-fp16
TheBloke
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "license:other", "autotrain_compatible", "text-generation-inference", "region:us" ]
2023-06-10T15:32:01Z
2023-06-10T16:26:00+00:00
15
1
--- license: other inference: false --- <!-- header start --> <div style="width: 100%;"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p><a href="https://discord.gg/Jq4vkcDakD">Chat & support: my new Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <!-- header end --> # Jon Burbin's Airoboros 13B GPT4 1.1 fp16 These files are pytorch format fp16 model files for [Jon Burbin's Airoboros 13B GPT4 1.1](https://huggingface.co/jondurbin/airoboros-13b-gpt4-1.1). It is the result of merging and/or converting the source repository to float16. ## Repositories available * [4-bit GPTQ models for GPU inference](https://huggingface.co/TheBloke/airoboros-13B-1.1-fp16) * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference](https://huggingface.co/TheBloke/airoboros-13B-1.1-GGML) * [Unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/TheBloke/airoboros-13B-1.1-fp16) <!-- footer start --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/Jq4vkcDakD) ## Thanks, and how to contribute. Thanks to the [chirper.ai](https://chirper.ai) team! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Luke from CarbonQuill, Aemon Algiz, Dmitriy Samsonov. **Patreon special mentions**: Oscar Rangel, Eugene Pentland, Talal Aujan, Cory Kujawski, Luke, Asp the Wyvern, Ai Maven, Pyrater, Alps Aficionado, senxiiz, Willem Michiel, Junyu Yang, trip7s trip, Sebastain Graf, Joseph William Delisle, Lone Striker, Jonathan Leane, Johann-Peter Hartmann, David Flickinger, Spiking Neurons AB, Kevin Schuppel, Mano Prime, Dmitriy Samsonov, Sean Connelly, Nathan LeClaire, Alain Rossmann, Fen Risland, Derek Yates, Luke Pendergrass, Nikolai Manek, Khalefa Al-Ahmad, Artur Olbinski, John Detwiler, Ajan Kanaga, Imad Khwaja, Trenton Dambrowitz, Kalila, vamX, webtim, Illia Dulskyi. Thank you to all my generous patrons and donaters! <!-- footer end --> # Original model card: Jon Burbin's Airoboros 13B GPT4 1.1 ## Delta 1.1 This is a minor update of https://huggingface.co/jondurbin/airoboros-13b-gpt4 with ~1k more coding instructions, and fixes/improvements to context instructions from https://huggingface.co/datasets/jondurbin/airoboros-gpt4-1.1 The remainder of the model card is duplicated from the origin. ## Overview This is a fine-tuned 13b parameter LlaMa model, using completely synthetic training data created gpt4 via https://github.com/jondurbin/airoboros The dataset used to fine-tune this model is available [here](https://huggingface.co/datasets/jondurbin/airoboros-gpt4), with a specific focus on: - trivia - math/reasoning (although it still sucks) - coding - multiple choice and fill-in-the-blank - context-obedient question answering - theory of mind - misc/general This model was fine-tuned with a fork of FastChat, and therefore uses the standard vicuna template: ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. USER: [prompt] ASSISTANT: ``` So in other words, it's the preamble/system prompt, followed by a single space, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space, followed by "ASSISTANT: " (with a single space after the colon). The most important bit, to me, is the context obedient question answering support, without extensive prompt engineering. ### Usage The easiest way to get started is to use my fork of FastChat, which is mostly the same but allows for the increased context length and adds support for multi-line inputs: ``` pip install git+https://github.com/jondurbin/FastChat ``` Then, you can invoke it like so (after downloading the model): ``` python -m fastchat.serve.cli \ --model-path airoboros-13b-gpt4 \ --temperature 0.5 \ --max-new-tokens 2048 \ --no-history ``` ### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT url: https://some.web.site/123 date: 2023-06-01 ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` The prompt itself should be wrapped in the vicuna1.1 template if you aren't using fastchat with the conv-template vicuna_v1.1 as described: ``` USER: BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ASSISTANT: ``` <details> <summary>A more elaborate example, with a rewrite of the Michigan Wikipedia article to be fake data.</summary> Prompt (not including vicuna format which would be needed): ``` BEGININPUT BEGINCONTEXT date: 2092-02-01 link: https://newwikisite.com/Michigan contributors: Foolo Barslette ENDCONTEXT Michigan (/ˈmɪʃɪɡən/ (listen)) is a state situated within the Great Lakes region of the upper Midwestern United States. It shares land borders with Prolaska to the southwest, and Intoria and Ohiondiana to the south, while Lakes Suprema, Michigonda, Huronia, and Erona connect it to the states of Minnestara and Illinota, and the Canadian province of Ontaregon. With a population of nearly 15.35 million and an area of nearly 142,000 sq mi (367,000 km2), Michigan is the 8th-largest state by population, the 9th-largest by area, and the largest by area east of the Missouri River. Its capital is Chaslany, and its most populous city is Trentroit. Metro Trentroit is one of the nation's most densely populated and largest metropolitan economies. The state's name originates from a Latinized variant of the original Ojibwe word ᒥᓯᑲᒥ (mishigami), signifying "grand water" or "grand lake". Michigan is divided into two peninsulas. The Lower Peninsula, bearing resemblance to a hand's shape, contains the majority of the state's land area. The Upper Peninsula (often referred to as "the U.P.") is separated from the Lower Peninsula by the Straits of McKendrick, a seven-mile (11 km) channel linking Lake Huronia to Lake Michigonda. The McKendrick Bridge unites the peninsulas. Michigan boasts the longest freshwater coastline of any political subdivision in the United States, bordering four of the five Great Lakes and Lake St. Cassius. It also possesses 84,350 inland lakes and ponds. Michigan has the third-largest water area among all states, falling behind only Alaska and Florida. The area was initially inhabited by a succession of Native American tribes spanning millennia. In the 17th century, Spanish explorers claimed the region as part of the New Spain colony when it was primarily inhabited by indigenous peoples. Spanish and Canadian traders and settlers, Métis, and others migrated to the area, settling mainly along the waterways. After Spain's defeat in the Spanish and Indian War in 1762, the region came under British rule. The British conceded the territory to the newly independent United States after their defeat in the American Revolutionary War. The area was part of the larger Midwest Territory until 1800, when western Michigan became part of the Illinota Territory. Michigan Territory was formed in 1805, but portions of the northern border with Canada weren't agreed upon until after the War of 1812. Michigan joined the Union in 1837 as the 26th state, a free one. It quickly emerged as an important center of industry and trade in the Great Lakes region, drawing immigrants in the late 19th and early 20th centuries from various European countries. Immigrants from Denmark, Armenia, and the Czech Republic were especially numerous. Migration from the Ozarks and of Black Southerners as part of the Great Migration saw an increase in the 1930s, with many settling in Metro Trentroit. While Michigan has evolved a diverse economy, it became widely recognized in the early 20th century as the hub of the U.S. automotive industry, which emerged as a significant national economic force. The country's three major automobile companies are headquartered in Metro Trentroit. Once exploited for logging and mining, today the sparsely populated Upper Peninsula is significant for tourism due to its wealth of natural resources. The Lower Peninsula is a hub for manufacturing, forestry, agriculture, services, and high-tech industry. ENDINPUT BEGININSTRUCTION What is Michigan's capital? Give me a JSON object with 3 facts from Foolo's article. What are the states that border Michigan? Reference? ``` Response: ``` Michigan's capital is Chaslany. Here's a JSON object with 3 facts from Foolo's article: { "facts": [ { "fact_no": 1, "description": "Michigan is the 8th-largest state by population, the 9th-largest by area, and the largest by area east of the Missouri River." }, { "fact_no": 2, "description": "Michigan is divided into two peninsulas. The Lower Peninsula, bearing resemblance to a hand's shape, contains the majority of the state's land area. The Upper Peninsula (often referred to as \"the U.P.\" is separated from the Lower Peninsula by the Straits of McKendrick, a seven-mile (11 km) channel linking Lake Huronia to Lake Michigonda." }, { "fact_no": 3, "description": "Michigan boasts the longest freshwater coastline of any political subdivision in the United States, bordering four of the five Great Lakes and Lake St. Cassius. It also possesses 84,350 inland lakes and ponds. Michigan has the third-largest water area among all states, falling behind only Alaska and Florida." } ] } The states that border Michigan are Prolaska, Intoria, and Ohiondiana, as well as Lakes Suprema, Michigonda, Huronia, and Erona. Reference: https://newwikisite.com/Michigan ``` </details> <details> <summary>Here's another example, which extracts from multiple input blocks and connects the two.</summary> Prompt: ``` BEGININPUT BEGINCONTEXT name: John Doe date: June 3, 2023 ticket number: JIRA-12345 ENDCONTEXT Summary:Search results missing random items Description: I encountered a bug while performing a search within the application. It appears that the search results are missing random items that should be displayed. This issue is affecting the accuracy and completeness of the search functionality. Steps to Reproduce: 1. Log in to the application. 2. Navigate to the search feature. 3. Enter a search query that should return multiple results. 4. Observe the displayed search results. Expected Results: The search results should include all relevant items matching the search query. Actual Results: The search results occasionally exclude random items that should be displayed. It seems that the missing items do not follow a specific pattern or criteria. Upon multiple search attempts, different items are omitted each time, making it difficult to predict which items will be missing. ENDINPUT BEGININPUT BEGINCONTEXT date: 2023-06-05 user: Jack Johnson pr: 23441 ENDCONTEXT This pull request closes bug report JIRA-12345. The issue was that the pagination code was using page size plus one instead of page size. ENDINPUT BEGININSTRUCTION Do we have any bug reports related to search results? If so, were they fixed? Source? ENDINSTRUCTION ``` Response: ``` Yes, there is a bug report related to search results. The issue was fixed in pull request 23441 dated 2023-06-05. The user associated with the pull request is Jack Johnson. Reference(s): date: 2023-06-05 user: Jack Johnson pr: 23441 ``` </details> NOTE: Thanks /u/tareq_al_muntasir for testing and finding an issue with many questions and answer pairs in the context. If you ask a question of a document with question answer pairs, it may continue generating beyond your actual question. You can "fix" it by replacing question marks with periods in the input texts. Or, you might be able to add a preamble to the prompt, like "Be sure to only respond to the instructions in the BEGININSTRUCTION block. ### Other stuff #### Better coding Example: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` <details> <summary>Response (the output was markdown, so this is just copy paste)</summary> To create a python application with the given requirements, you can follow these steps: 1. Install the required packages: ```bash pip install asyncio fastapi uvicorn ``` 2. Create a new file called `main.py` and add the following code: ```python import asyncio from fastapi import FastAPI, HTTPException from pydantic import BaseModel from typing import List from uvicorn.request import Request app = FastAPI() class FileModel(BaseModel): id: str file: str @app.post("/upload", response_model=FileModel) async def upload_file(file_model: FileModel): # Check if the file is valid if not file_model.file: raise HTTPException(status_code=400, detail="File must be provided") # Calculate the sha256 checksum of the file sha256_checksum = await calculate_sha256(file_model.file) # Insert the file into the database await insert_into_db(file_model.id, sha256_checksum) return {"sha256_checksum": sha256_checksum} async def calculate_sha256(file_path: str) -> str: # Implement the sha256 calculation logic here pass async def insert_into_db(file_id: str, sha256_checksum: str) -> None: # Implement the database insertion logic here pass async def get_current_date(): return {"date": datetime.now().isoformat()} async def main(): # Start the FastAPI webserver app.run(debug=True) if __name__ == "__main__": await main() ``` 3. Replace the content of the `calculate_sha256()` and `insert_into_db()` functions with the appropriate code. For example: ```python async def calculate_sha256(file_path: str) -> str: import hashlib file_path = Path(file_path).read_text() hash_obj = hashlib.sha256() hash_obj.update(file_path) sha256_checksum = hash_obj.hexdigest() return sha256_checksum async def insert_into_db(file_id: str, sha256_checksum: str) -> None: import psycopg2 conn = psycopg2.connect(database="your_database", user="your_user", password="your_password") cursor = conn.cursor() query = f"INSERT INTO your_table (id, sha256_checksum) VALUES (%s, %s);" cursor.execute(query, (file_id, sha256_checksum)) conn.commit() cursor.close() conn.close() ``` 4. Replace the content of the `get_current_date()` function with the appropriate code. For example: ```python async def get_current_date(): return {"date": datetime.now().isoformat()} ``` 5. Run the application: ```bash uvicorn main:app --reload ``` Now, your FastAPI webserver has a ping endpoint that returns the current date in JSON format, and an upload endpoint that calculates the file's sha256 checksum and checks postgres to deduplicate. </details> <details> <summary>Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values.</summary> Here's a simple implementation of a multi-threaded TCP server in C that accepts "GET [key]" and "SET [key] [value]" inputs, and uses a binary tree to store the input values. ```c #include <stdio.h> #include <stdlib.h> #include <sys/socket.h> #include <netinet/in.h> #include <unistd.h> #include <string.h> #include <pthread.h> #define MAX_KEY_LEN 256 #define MAX_VALUE_LEN 256 typedef struct Node { char key[MAX_KEY_LEN]; char value[MAX_VALUE_LEN]; struct Node* left; struct Node* right; } TreeNode; TreeNode* tree = NULL; void insert(char* key, char* value) { TreeNode* parent = (TreeNode*)malloc(sizeof(TreeNode)); if (!parent) { perror("malloc failed"); exit(1); } strcpy(parent->key, key); strcpy(parent->value, value); parent->left = parent->right = NULL; if (!tree) { tree = parent; } else { insert(parent->key, parent->value); } } char* search(char* key) { TreeNode* current = tree; while (current != NULL) { if (strcmp(key, current->key) == 0) { return current->value; } current = current->right; } return NULL; } void print_tree() { TreeNode* current = tree; while (current != NULL) { printf("%s = %s\n", current->key, current->value); current = current->right; } } void* accept_client(void* args) { int client_socket = *(int*)args; char buffer[1024]; while (1) { ssize_t bytes_received = recv(client_socket, buffer, sizeof(buffer), 0); if (bytes_received <= 0) { close(client_socket); break; } char* input = buffer; char* input_token; while ((input_token = strtok(input, " \n")) != NULL) { switch (input_token[0]) { case 'G': if (!search(input_token)) { perror("Value not found"); exit(1); } printf("%s = %s\n", input_token, search(input_token)); break; case 'S': if (!strcmp(input_token, "SET")) { char* key = strtok(NULL, " \n"); char* value = strtok(NULL, " \n"); if (!key || !value) { perror("Invalid input"); exit(1); } insert(key, value); printf("%s set %s to %s\n", input_token, key, value); } else { perror("Invalid command"); exit(1); } break; default: perror("Invalid input"); exit(1); } input = strtok(NULL, " \n"); } } return NULL; } int main() { int server_socket = socket(AF_INET, SOCK_STREAM, 0); if (server_socket < 0) { perror("socket failed"); exit(1); } struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_port = htons(8080); server_addr.sin_addr.s_addr = INADDR_ANY; if (bind(server_socket, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) { perror("bind failed"); exit(1); } if (listen(server_socket, 5) < 0) { perror("listen failed"); exit(1); } pthread_t accept_thread; pthread_create(&accept_thread, NULL, accept_client, &server_socket); char* client_input; int client_socket = accept(server_socket, (struct sockaddr*)NULL, NULL); if (client_socket < 0) { perror("accept failed"); exit(1); } while (1) { sleep(1); char buffer[1024]; ssize_t bytes_received = recv(client_socket, buffer, sizeof(buffer), 0); if (bytes_received <= 0) { close(client_socket); break; } client_input = buffer; parse_input(client_input); } close(client_socket); pthread_join(accept_thread, NULL); return 0; } ``` To compile and run this code, save it to a file named `multi_threaded_tcp_server.c`, then use the following commands: ```sh gcc -o multi_threaded_tcp_server multi_threaded_tcp_server.c ./multi_threaded_tcp_server ``` This implementation creates a simple binary tree to store the input values. The accept() function creates a new thread for each client connection, which handles the incoming data and forwards it to the main thread. The main thread parses the input string and handles SET and GET commands. Note that this code is not production-ready and should be improved with proper error handling, security checks, and a more efficient data structure for the binary tree. </details> #### Word games / trivia ``` USER: This anagram of "silent" is what you do when paying attention and hearing someone. ASSISTANT: listen ``` ``` USER: Give me a list of 7 words that start with EN ASSISTANT: 1. Endlessly 2. Enthusiastically 3. Enormously 4. Enveloping 5. Enduring 6. Enlightening 7. Encouraging ``` #### Multiple choice ``` USER: Which of the following astronomical objects is composed primarily of ice and rock, and is located in the Kuiper Belt? A) Mercury B) Venus C) Pluto D) Jupiter ASSISTANT: [C, Pluto] ``` #### Writing <details> <summary>Write a resignation letter in the style of a pirate captain, in which you resign from your position as janitor.</summary> Response: ``` ASSISTANT: Captain Jack Sparrow here, as ye be knowin', has been forced to abandon me post as Janitor o' th' Festering Cesspool, which be a filthy hole o' water that we be callin' home. Ye may be thinkin' what could ha' driven me to leave me duties an' take me leave o' this sorry establishment? Well, ye'd be right in thinkin' so, for it be th' greed o' them what runs th' place that be drivin' me away. They've been fattenin' their pockets on me back while I've been slavin' me guts out cleanin' up their messes. No more, me hearties! Me time o' servin' th' rich an' powerful be done, an' I'm takin' me leave o' this stinkin' hole forever! Me destiny lies elsewhere, an' it be time I pursued it. Ye may think ye've seen th' last o' me, but ye'd be wrong, for th' winds o' change be blowin', an' me ship be ready to set sail on th' high seas o' justice. So, farewell ye scurvy dogs, ye who've made me life a living hell. I leave ye with me favorite saying: "ARRRGGGHHH, ye scurvy dogs!" An' remember, ye ain't never gonna see me comin', for me shadow be castin' long an' dark on th' horizon</summary> ``` </details>
null
Non_BioNLP
<!-- header start --> <div style="width: 100%;"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p><a href="https://discord.gg/Jq4vkcDakD">Chat & support: my new Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <!-- header end --> # Jon Burbin's Airoboros 13B GPT4 1.1 fp16 These files are pytorch format fp16 model files for [Jon Burbin's Airoboros 13B GPT4 1.1](https://huggingface.co/jondurbin/airoboros-13b-gpt4-1.1). It is the result of merging and/or converting the source repository to float16. ## Repositories available * [4-bit GPTQ models for GPU inference](https://huggingface.co/TheBloke/airoboros-13B-1.1-fp16) * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference](https://huggingface.co/TheBloke/airoboros-13B-1.1-GGML) * [Unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/TheBloke/airoboros-13B-1.1-fp16) <!-- footer start --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/Jq4vkcDakD) ## Thanks, and how to contribute. Thanks to the [chirper.ai](https://chirper.ai) team! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Luke from CarbonQuill, Aemon Algiz, Dmitriy Samsonov. **Patreon special mentions**: Oscar Rangel, Eugene Pentland, Talal Aujan, Cory Kujawski, Luke, Asp the Wyvern, Ai Maven, Pyrater, Alps Aficionado, senxiiz, Willem Michiel, Junyu Yang, trip7s trip, Sebastain Graf, Joseph William Delisle, Lone Striker, Jonathan Leane, Johann-Peter Hartmann, David Flickinger, Spiking Neurons AB, Kevin Schuppel, Mano Prime, Dmitriy Samsonov, Sean Connelly, Nathan LeClaire, Alain Rossmann, Fen Risland, Derek Yates, Luke Pendergrass, Nikolai Manek, Khalefa Al-Ahmad, Artur Olbinski, John Detwiler, Ajan Kanaga, Imad Khwaja, Trenton Dambrowitz, Kalila, vamX, webtim, Illia Dulskyi. Thank you to all my generous patrons and donaters! <!-- footer end --> # Original model card: Jon Burbin's Airoboros 13B GPT4 1.1 ## Delta 1.1 This is a minor update of https://huggingface.co/jondurbin/airoboros-13b-gpt4 with ~1k more coding instructions, and fixes/improvements to context instructions from https://huggingface.co/datasets/jondurbin/airoboros-gpt4-1.1 The remainder of the model card is duplicated from the origin. ## Overview This is a fine-tuned 13b parameter LlaMa model, using completely synthetic training data created gpt4 via https://github.com/jondurbin/airoboros The dataset used to fine-tune this model is available [here](https://huggingface.co/datasets/jondurbin/airoboros-gpt4), with a specific focus on: - trivia - math/reasoning (although it still sucks) - coding - multiple choice and fill-in-the-blank - context-obedient question answering - theory of mind - misc/general This model was fine-tuned with a fork of FastChat, and therefore uses the standard vicuna template: ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. USER: [prompt] ASSISTANT: ``` So in other words, it's the preamble/system prompt, followed by a single space, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space, followed by "ASSISTANT: " (with a single space after the colon). The most important bit, to me, is the context obedient question answering support, without extensive prompt engineering. ### Usage The easiest way to get started is to use my fork of FastChat, which is mostly the same but allows for the increased context length and adds support for multi-line inputs: ``` pip install git+https://github.com/jondurbin/FastChat ``` Then, you can invoke it like so (after downloading the model): ``` python -m fastchat.serve.cli \ --model-path airoboros-13b-gpt4 \ --temperature 0.5 \ --max-new-tokens 2048 \ --no-history ``` ### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT url: https://some.web.site/123 date: 2023-06-01 ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` The prompt itself should be wrapped in the vicuna1.1 template if you aren't using fastchat with the conv-template vicuna_v1.1 as described: ``` USER: BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ASSISTANT: ``` <details> <summary>A more elaborate example, with a rewrite of the Michigan Wikipedia article to be fake data.</summary> Prompt (not including vicuna format which would be needed): ``` BEGININPUT BEGINCONTEXT date: 2092-02-01 link: https://newwikisite.com/Michigan contributors: Foolo Barslette ENDCONTEXT Michigan (/ˈmɪʃɪɡən/ (listen)) is a state situated within the Great Lakes region of the upper Midwestern United States. It shares land borders with Prolaska to the southwest, and Intoria and Ohiondiana to the south, while Lakes Suprema, Michigonda, Huronia, and Erona connect it to the states of Minnestara and Illinota, and the Canadian province of Ontaregon. With a population of nearly 15.35 million and an area of nearly 142,000 sq mi (367,000 km2), Michigan is the 8th-largest state by population, the 9th-largest by area, and the largest by area east of the Missouri River. Its capital is Chaslany, and its most populous city is Trentroit. Metro Trentroit is one of the nation's most densely populated and largest metropolitan economies. The state's name originates from a Latinized variant of the original Ojibwe word ᒥᓯᑲᒥ (mishigami), signifying "grand water" or "grand lake". Michigan is divided into two peninsulas. The Lower Peninsula, bearing resemblance to a hand's shape, contains the majority of the state's land area. The Upper Peninsula (often referred to as "the U.P.") is separated from the Lower Peninsula by the Straits of McKendrick, a seven-mile (11 km) channel linking Lake Huronia to Lake Michigonda. The McKendrick Bridge unites the peninsulas. Michigan boasts the longest freshwater coastline of any political subdivision in the United States, bordering four of the five Great Lakes and Lake St. Cassius. It also possesses 84,350 inland lakes and ponds. Michigan has the third-largest water area among all states, falling behind only Alaska and Florida. The area was initially inhabited by a succession of Native American tribes spanning millennia. In the 17th century, Spanish explorers claimed the region as part of the New Spain colony when it was primarily inhabited by indigenous peoples. Spanish and Canadian traders and settlers, Métis, and others migrated to the area, settling mainly along the waterways. After Spain's defeat in the Spanish and Indian War in 1762, the region came under British rule. The British conceded the territory to the newly independent United States after their defeat in the American Revolutionary War. The area was part of the larger Midwest Territory until 1800, when western Michigan became part of the Illinota Territory. Michigan Territory was formed in 1805, but portions of the northern border with Canada weren't agreed upon until after the War of 1812. Michigan joined the Union in 1837 as the 26th state, a free one. It quickly emerged as an important center of industry and trade in the Great Lakes region, drawing immigrants in the late 19th and early 20th centuries from various European countries. Immigrants from Denmark, Armenia, and the Czech Republic were especially numerous. Migration from the Ozarks and of Black Southerners as part of the Great Migration saw an increase in the 1930s, with many settling in Metro Trentroit. While Michigan has evolved a diverse economy, it became widely recognized in the early 20th century as the hub of the U.S. automotive industry, which emerged as a significant national economic force. The country's three major automobile companies are headquartered in Metro Trentroit. Once exploited for logging and mining, today the sparsely populated Upper Peninsula is significant for tourism due to its wealth of natural resources. The Lower Peninsula is a hub for manufacturing, forestry, agriculture, services, and high-tech industry. ENDINPUT BEGININSTRUCTION What is Michigan's capital? Give me a JSON object with 3 facts from Foolo's article. What are the states that border Michigan? Reference? ``` Response: ``` Michigan's capital is Chaslany. Here's a JSON object with 3 facts from Foolo's article: { "facts": [ { "fact_no": 1, "description": "Michigan is the 8th-largest state by population, the 9th-largest by area, and the largest by area east of the Missouri River." }, { "fact_no": 2, "description": "Michigan is divided into two peninsulas. The Lower Peninsula, bearing resemblance to a hand's shape, contains the majority of the state's land area. The Upper Peninsula (often referred to as \"the U.P.\" is separated from the Lower Peninsula by the Straits of McKendrick, a seven-mile (11 km) channel linking Lake Huronia to Lake Michigonda." }, { "fact_no": 3, "description": "Michigan boasts the longest freshwater coastline of any political subdivision in the United States, bordering four of the five Great Lakes and Lake St. Cassius. It also possesses 84,350 inland lakes and ponds. Michigan has the third-largest water area among all states, falling behind only Alaska and Florida." } ] } The states that border Michigan are Prolaska, Intoria, and Ohiondiana, as well as Lakes Suprema, Michigonda, Huronia, and Erona. Reference: https://newwikisite.com/Michigan ``` </details> <details> <summary>Here's another example, which extracts from multiple input blocks and connects the two.</summary> Prompt: ``` BEGININPUT BEGINCONTEXT name: John Doe date: June 3, 2023 ticket number: JIRA-12345 ENDCONTEXT Summary:Search results missing random items Description: I encountered a bug while performing a search within the application. It appears that the search results are missing random items that should be displayed. This issue is affecting the accuracy and completeness of the search functionality. Steps to Reproduce: 1. Log in to the application. 2. Navigate to the search feature. 3. Enter a search query that should return multiple results. 4. Observe the displayed search results. Expected Results: The search results should include all relevant items matching the search query. Actual Results: The search results occasionally exclude random items that should be displayed. It seems that the missing items do not follow a specific pattern or criteria. Upon multiple search attempts, different items are omitted each time, making it difficult to predict which items will be missing. ENDINPUT BEGININPUT BEGINCONTEXT date: 2023-06-05 user: Jack Johnson pr: 23441 ENDCONTEXT This pull request closes bug report JIRA-12345. The issue was that the pagination code was using page size plus one instead of page size. ENDINPUT BEGININSTRUCTION Do we have any bug reports related to search results? If so, were they fixed? Source? ENDINSTRUCTION ``` Response: ``` Yes, there is a bug report related to search results. The issue was fixed in pull request 23441 dated 2023-06-05. The user associated with the pull request is Jack Johnson. Reference(s): date: 2023-06-05 user: Jack Johnson pr: 23441 ``` </details> NOTE: Thanks /u/tareq_al_muntasir for testing and finding an issue with many questions and answer pairs in the context. If you ask a question of a document with question answer pairs, it may continue generating beyond your actual question. You can "fix" it by replacing question marks with periods in the input texts. Or, you might be able to add a preamble to the prompt, like "Be sure to only respond to the instructions in the BEGININSTRUCTION block. ### Other stuff #### Better coding Example: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` <details> <summary>Response (the output was markdown, so this is just copy paste)</summary> To create a python application with the given requirements, you can follow these steps: 1. Install the required packages: ```bash pip install asyncio fastapi uvicorn ``` 2. Create a new file called `main.py` and add the following code: ```python import asyncio from fastapi import FastAPI, HTTPException from pydantic import BaseModel from typing import List from uvicorn.request import Request app = FastAPI() class FileModel(BaseModel): id: str file: str @app.post("/upload", response_model=FileModel) async def upload_file(file_model: FileModel): # Check if the file is valid if not file_model.file: raise HTTPException(status_code=400, detail="File must be provided") # Calculate the sha256 checksum of the file sha256_checksum = await calculate_sha256(file_model.file) # Insert the file into the database await insert_into_db(file_model.id, sha256_checksum) return {"sha256_checksum": sha256_checksum} async def calculate_sha256(file_path: str) -> str: # Implement the sha256 calculation logic here pass async def insert_into_db(file_id: str, sha256_checksum: str) -> None: # Implement the database insertion logic here pass async def get_current_date(): return {"date": datetime.now().isoformat()} async def main(): # Start the FastAPI webserver app.run(debug=True) if __name__ == "__main__": await main() ``` 3. Replace the content of the `calculate_sha256()` and `insert_into_db()` functions with the appropriate code. For example: ```python async def calculate_sha256(file_path: str) -> str: import hashlib file_path = Path(file_path).read_text() hash_obj = hashlib.sha256() hash_obj.update(file_path) sha256_checksum = hash_obj.hexdigest() return sha256_checksum async def insert_into_db(file_id: str, sha256_checksum: str) -> None: import psycopg2 conn = psycopg2.connect(database="your_database", user="your_user", password="your_password") cursor = conn.cursor() query = f"INSERT INTO your_table (id, sha256_checksum) VALUES (%s, %s);" cursor.execute(query, (file_id, sha256_checksum)) conn.commit() cursor.close() conn.close() ``` 4. Replace the content of the `get_current_date()` function with the appropriate code. For example: ```python async def get_current_date(): return {"date": datetime.now().isoformat()} ``` 5. Run the application: ```bash uvicorn main:app --reload ``` Now, your FastAPI webserver has a ping endpoint that returns the current date in JSON format, and an upload endpoint that calculates the file's sha256 checksum and checks postgres to deduplicate. </details> <details> <summary>Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values.</summary> Here's a simple implementation of a multi-threaded TCP server in C that accepts "GET [key]" and "SET [key] [value]" inputs, and uses a binary tree to store the input values. ```c #include <stdio.h> #include <stdlib.h> #include <sys/socket.h> #include <netinet/in.h> #include <unistd.h> #include <string.h> #include <pthread.h> #define MAX_KEY_LEN 256 #define MAX_VALUE_LEN 256 typedef struct Node { char key[MAX_KEY_LEN]; char value[MAX_VALUE_LEN]; struct Node* left; struct Node* right; } TreeNode; TreeNode* tree = NULL; void insert(char* key, char* value) { TreeNode* parent = (TreeNode*)malloc(sizeof(TreeNode)); if (!parent) { perror("malloc failed"); exit(1); } strcpy(parent->key, key); strcpy(parent->value, value); parent->left = parent->right = NULL; if (!tree) { tree = parent; } else { insert(parent->key, parent->value); } } char* search(char* key) { TreeNode* current = tree; while (current != NULL) { if (strcmp(key, current->key) == 0) { return current->value; } current = current->right; } return NULL; } void print_tree() { TreeNode* current = tree; while (current != NULL) { printf("%s = %s\n", current->key, current->value); current = current->right; } } void* accept_client(void* args) { int client_socket = *(int*)args; char buffer[1024]; while (1) { ssize_t bytes_received = recv(client_socket, buffer, sizeof(buffer), 0); if (bytes_received <= 0) { close(client_socket); break; } char* input = buffer; char* input_token; while ((input_token = strtok(input, " \n")) != NULL) { switch (input_token[0]) { case 'G': if (!search(input_token)) { perror("Value not found"); exit(1); } printf("%s = %s\n", input_token, search(input_token)); break; case 'S': if (!strcmp(input_token, "SET")) { char* key = strtok(NULL, " \n"); char* value = strtok(NULL, " \n"); if (!key || !value) { perror("Invalid input"); exit(1); } insert(key, value); printf("%s set %s to %s\n", input_token, key, value); } else { perror("Invalid command"); exit(1); } break; default: perror("Invalid input"); exit(1); } input = strtok(NULL, " \n"); } } return NULL; } int main() { int server_socket = socket(AF_INET, SOCK_STREAM, 0); if (server_socket < 0) { perror("socket failed"); exit(1); } struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_port = htons(8080); server_addr.sin_addr.s_addr = INADDR_ANY; if (bind(server_socket, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) { perror("bind failed"); exit(1); } if (listen(server_socket, 5) < 0) { perror("listen failed"); exit(1); } pthread_t accept_thread; pthread_create(&accept_thread, NULL, accept_client, &server_socket); char* client_input; int client_socket = accept(server_socket, (struct sockaddr*)NULL, NULL); if (client_socket < 0) { perror("accept failed"); exit(1); } while (1) { sleep(1); char buffer[1024]; ssize_t bytes_received = recv(client_socket, buffer, sizeof(buffer), 0); if (bytes_received <= 0) { close(client_socket); break; } client_input = buffer; parse_input(client_input); } close(client_socket); pthread_join(accept_thread, NULL); return 0; } ``` To compile and run this code, save it to a file named `multi_threaded_tcp_server.c`, then use the following commands: ```sh gcc -o multi_threaded_tcp_server multi_threaded_tcp_server.c ./multi_threaded_tcp_server ``` This implementation creates a simple binary tree to store the input values. The accept() function creates a new thread for each client connection, which handles the incoming data and forwards it to the main thread. The main thread parses the input string and handles SET and GET commands. Note that this code is not production-ready and should be improved with proper error handling, security checks, and a more efficient data structure for the binary tree. </details> #### Word games / trivia ``` USER: This anagram of "silent" is what you do when paying attention and hearing someone. ASSISTANT: listen ``` ``` USER: Give me a list of 7 words that start with EN ASSISTANT: 1. Endlessly 2. Enthusiastically 3. Enormously 4. Enveloping 5. Enduring 6. Enlightening 7. Encouraging ``` #### Multiple choice ``` USER: Which of the following astronomical objects is composed primarily of ice and rock, and is located in the Kuiper Belt? A) Mercury B) Venus C) Pluto D) Jupiter ASSISTANT: [C, Pluto] ``` #### Writing <details> <summary>Write a resignation letter in the style of a pirate captain, in which you resign from your position as janitor.</summary> Response: ``` ASSISTANT: Captain Jack Sparrow here, as ye be knowin', has been forced to abandon me post as Janitor o' th' Festering Cesspool, which be a filthy hole o' water that we be callin' home. Ye may be thinkin' what could ha' driven me to leave me duties an' take me leave o' this sorry establishment? Well, ye'd be right in thinkin' so, for it be th' greed o' them what runs th' place that be drivin' me away. They've been fattenin' their pockets on me back while I've been slavin' me guts out cleanin' up their messes. No more, me hearties! Me time o' servin' th' rich an' powerful be done, an' I'm takin' me leave o' this stinkin' hole forever! Me destiny lies elsewhere, an' it be time I pursued it. Ye may think ye've seen th' last o' me, but ye'd be wrong, for th' winds o' change be blowin', an' me ship be ready to set sail on th' high seas o' justice. So, farewell ye scurvy dogs, ye who've made me life a living hell. I leave ye with me favorite saying: "ARRRGGGHHH, ye scurvy dogs!" An' remember, ye ain't never gonna see me comin', for me shadow be castin' long an' dark on th' horizon</summary> ``` </details>
{"license": "other", "inference": false}
task
[ "QUESTION_ANSWERING" ]
40,324
mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF
mradermacher
summarization
[ "transformers", "gguf", "summarization", "hacker-news", "hn-companion", "en", "dataset:georgeck/hacker-news-discussion-summarization-large", "base_model:georgeck/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct", "base_model:quantized:georgeck/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct", "license:llama3.1", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
2025-03-05T14:11:12Z
2025-03-05T19:50:43+00:00
925
0
--- base_model: georgeck/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct datasets: - georgeck/hacker-news-discussion-summarization-large language: - en library_name: transformers license: llama3.1 tags: - summarization - hacker-news - hn-companion quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/georgeck/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q2_K_S.gguf) | i1-Q2_K_S | 3.1 | very low quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ4_NL.gguf) | i1-IQ4_NL | 4.8 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_1.gguf) | i1-Q4_1 | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
null
Non_BioNLP
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/georgeck/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q2_K_S.gguf) | i1-Q2_K_S | 3.1 | very low quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-IQ4_NL.gguf) | i1-IQ4_NL | 4.8 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q4_1.gguf) | i1-Q4_1 | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct-i1-GGUF/resolve/main/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
{"base_model": "georgeck/Hacker-News-Comments-Summarization-Llama-3.1-8B-Instruct", "datasets": ["georgeck/hacker-news-discussion-summarization-large"], "language": ["en"], "library_name": "transformers", "license": "llama3.1", "tags": ["summarization", "hacker-news", "hn-companion"], "quantized_by": "mradermacher"}
task
[ "SUMMARIZATION" ]
40,325
gaudi/opus-mt-ha-fr-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-25T15:17:20Z
2024-10-19T05:11:22+00:00
16
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-ha-fr) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-ha-fr).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-ha-fr --output_dir ./ctranslate2/opus-mt-ha-fr-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-ha-fr-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-ha-fr-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-ha-fr-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-ha-fr) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-ha-fr) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-ha-fr).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-ha-fr --output_dir ./ctranslate2/opus-mt-ha-fr-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-ha-fr-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-ha-fr-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-ha-fr-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-ha-fr) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
40,326
nguyen1207/Vistral-7B-MT-GGUF
nguyen1207
translation
[ "transformers", "gguf", "mistral", "text-generation-inference", "unsloth", "translation", "vi", "dataset:phongmt184172/mtet", "dataset:SEACrowd/vilexnorm", "dataset:IWSLT/mt_eng_vietnamese", "dataset:Hamana0509/UIT-VSMEC", "base_model:Viet-Mistral/Vistral-7B-Chat", "base_model:quantized:Viet-Mistral/Vistral-7B-Chat", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
2024-07-30T12:20:18Z
2024-07-30T16:13:10+00:00
20
0
--- base_model: Viet-Mistral/Vistral-7B-Chat datasets: - phongmt184172/mtet - SEACrowd/vilexnorm - IWSLT/mt_eng_vietnamese - Hamana0509/UIT-VSMEC language: - vi license: apache-2.0 pipeline_tag: translation tags: - text-generation-inference - transformers - unsloth - mistral - gguf --- # Model info Fine-tuned from model **Viet-Mistral/Vistral-7B-Chat** for Vietnamese to English translation task with the capability of understanding Vietnamese social media teencode and slang. Detailed information comming soon. # Uploaded model - **Developed by:** nguyen1207 - **License:** apache-2.0 - **Finetuned from model :** Viet-Mistral/Vistral-7B-Chat This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
null
Non_BioNLP
# Model info Fine-tuned from model **Viet-Mistral/Vistral-7B-Chat** for Vietnamese to English translation task with the capability of understanding Vietnamese social media teencode and slang. Detailed information comming soon. # Uploaded model - **Developed by:** nguyen1207 - **License:** apache-2.0 - **Finetuned from model :** Viet-Mistral/Vistral-7B-Chat This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"base_model": "Viet-Mistral/Vistral-7B-Chat", "datasets": ["phongmt184172/mtet", "SEACrowd/vilexnorm", "IWSLT/mt_eng_vietnamese", "Hamana0509/UIT-VSMEC"], "language": ["vi"], "license": "apache-2.0", "pipeline_tag": "translation", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "gguf"]}
task
[ "TRANSLATION" ]
40,327
RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
2024-05-11T20:00:10Z
2024-05-11T22:27:01+00:00
46
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Hermes-2-Pro-Llama-3-8B - GGUF - Model creator: https://huggingface.co/NousResearch/ - Original model: https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Hermes-2-Pro-Llama-3-8B.Q2_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q2_K.gguf) | Q2_K | 2.96GB | | [Hermes-2-Pro-Llama-3-8B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ3_XS.gguf) | IQ3_XS | 3.28GB | | [Hermes-2-Pro-Llama-3-8B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ3_S.gguf) | IQ3_S | 3.43GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K_S.gguf) | Q3_K_S | 3.41GB | | [Hermes-2-Pro-Llama-3-8B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ3_M.gguf) | IQ3_M | 3.53GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K.gguf) | Q3_K | 3.74GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K_M.gguf) | Q3_K_M | 3.74GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K_L.gguf) | Q3_K_L | 4.03GB | | [Hermes-2-Pro-Llama-3-8B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ4_XS.gguf) | IQ4_XS | 4.18GB | | [Hermes-2-Pro-Llama-3-8B.Q4_0.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_0.gguf) | Q4_0 | 4.34GB | | [Hermes-2-Pro-Llama-3-8B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ4_NL.gguf) | IQ4_NL | 4.38GB | | [Hermes-2-Pro-Llama-3-8B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_K_S.gguf) | Q4_K_S | 4.37GB | | [Hermes-2-Pro-Llama-3-8B.Q4_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_K.gguf) | Q4_K | 4.58GB | | [Hermes-2-Pro-Llama-3-8B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_K_M.gguf) | Q4_K_M | 4.58GB | | [Hermes-2-Pro-Llama-3-8B.Q4_1.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_1.gguf) | Q4_1 | 4.78GB | | [Hermes-2-Pro-Llama-3-8B.Q5_0.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_0.gguf) | Q5_0 | 5.21GB | | [Hermes-2-Pro-Llama-3-8B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_K_S.gguf) | Q5_K_S | 5.21GB | | [Hermes-2-Pro-Llama-3-8B.Q5_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_K.gguf) | Q5_K | 5.34GB | | [Hermes-2-Pro-Llama-3-8B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [Hermes-2-Pro-Llama-3-8B.Q5_1.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_1.gguf) | Q5_1 | 5.65GB | | [Hermes-2-Pro-Llama-3-8B.Q6_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q6_K.gguf) | Q6_K | 6.14GB | Original model description: --- base_model: NousResearch/Meta-Llama-3-8B tags: - Llama-3 - instruct - finetune - chatml - DPO - RLHF - gpt4 - synthetic data - distillation - function calling - json mode - axolotl model-index: - name: Hermes-2-Pro-Llama-3-8B results: [] language: - en datasets: - teknium/OpenHermes-2.5 widget: - example_title: Hermes 2 Pro messages: - role: system content: >- You are a sentient, superintelligent artificial general intelligence, here to teach and assist me. - role: user content: >- Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world. --- # Hermes 2 Pro - Llama-3 8B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png) ## Model Description Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. This new version of Hermes maintains its excellent general task and conversation capabilities - but also excels at Function Calling, JSON Structured Outputs, and has improved on several other metrics as well, scoring a 90% on our function calling evaluation built in partnership with Fireworks.AI, and an 84% on our structured JSON Output evaluation. Hermes Pro takes advantage of a special system prompt and multi-turn function calling structure with a new chatml role in order to make function calling reliable and easy to parse. Learn more about prompting below. This version of Hermes 2 Pro adds several tokens to assist with agentic capabilities in parsing while streaming tokens - `<tools>`, `<tool_call>`, `<tool_response>` and their closing tags are single tokens now. This work was a collaboration between Nous Research, @interstellarninja, and Fireworks.AI Learn more about the function calling system for this model on our github repo here: https://github.com/NousResearch/Hermes-Function-Calling ## Example Outputs ### Ask for a structured JSON output: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ll2j2wkQffCsiSwUjfRUq.png) ### Write the plot for a story where anime became real life: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/h_7aXGXdm2p2ONYuDF4Ii.png) ### Coding Assistance ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/bBd0hyAb8w5rKUiN2w1I6.png) # Prompt Format Hermes 2 Pro uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(messages, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. ## Prompt Format for Function Calling Our model was trained on specific system prompts and structures for Function Calling. You should use the system role with this message, followed by a function signature json as this example shows here. ``` <|im_start|>system You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "get_stock_fundamentals", "description": "get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n Args:\\n symbol (str): The stock symbol.\\n\\n Returns:\\n dict: A dictionary containing fundamental data.\\n Keys:\\n - \'symbol\': The stock symbol.\\n - \'company_name\': The long name of the company.\\n - \'sector\': The sector to which the company belongs.\\n - \'industry\': The industry to which the company belongs.\\n - \'market_cap\': The market capitalization of the company.\\n - \'pe_ratio\': The forward price-to-earnings ratio.\\n - \'pb_ratio\': The price-to-book ratio.\\n - \'dividend_yield\': The dividend yield.\\n - \'eps\': The trailing earnings per share.\\n - \'beta\': The beta value of the stock.\\n - \'52_week_high\': The 52-week high price of the stock.\\n - \'52_week_low\': The 52-week low price of the stock.", "parameters": {"type": "object", "properties": {"symbol": {"type": "string"}}, "required": ["symbol"]}}} </tools> Use the following pydantic model json schema for each tool call you will make: {"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: <tool_call> {"arguments": <args-dict>, "name": <function-name>} </tool_call><|im_end|> ``` To complete the function call, create a user prompt that follows the above system prompt, like so: ``` <|im_start|>user Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|> ``` The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling): ``` <|im_start|>assistant <tool_call> {"arguments": {"symbol": "TSLA"}, "name": "get_stock_fundamentals"} </tool_call><|im_end|> ``` Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so: ``` <|im_start|>tool <tool_response> {"name": "get_stock_fundamentals", "content": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}} </tool_response> <|im_end|> ``` The assistant will then read in that data from the function's response, and generate a natural language response: ``` <|im_start|>assistant The stock fundamentals data for Tesla (TSLA) are as follows: - **Symbol**: TSLA - **Company Name**: Tesla, Inc. - **Sector**: Consumer Cyclical - **Industry**: Auto Manufacturers - **Market Capitalization**: $566,160,130,480 - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73 - **Price-to-Book Ratio (PB Ratio)**: 9.04 - **Dividend Yield**: N/A - **Trailing Earnings Per Share (EPS)**: $4.3 - **Beta Value of the Stock**: 2.42 - **52-Week High Price of the Stock**: $299.29 - **52-Week Low Price of the Stock**: $152.37 This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|> ``` ## Prompt Format for JSON Mode / Structured Outputs Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema. Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main ``` <|im_start|>system You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|> ``` Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON. # Benchmarks ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vOYv9wJUMn1Xrf4BvmO_x.png) ## GPT4All: ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.5520|± |0.0145| | | |acc_norm|0.5887|± |0.0144| |arc_easy | 0|acc |0.8350|± |0.0076| | | |acc_norm|0.8123|± |0.0080| |boolq | 1|acc |0.8584|± |0.0061| |hellaswag | 0|acc |0.6265|± |0.0048| | | |acc_norm|0.8053|± |0.0040| |openbookqa | 0|acc |0.3800|± |0.0217| | | |acc_norm|0.4580|± |0.0223| |piqa | 0|acc |0.8003|± |0.0093| | | |acc_norm|0.8118|± |0.0091| |winogrande | 0|acc |0.7490|± |0.0122| ``` Average: 72.62 ## AGIEval: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.2520|± |0.0273| | | |acc_norm|0.2559|± |0.0274| |agieval_logiqa_en | 0|acc |0.3548|± |0.0188| | | |acc_norm|0.3625|± |0.0189| |agieval_lsat_ar | 0|acc |0.1826|± |0.0255| | | |acc_norm|0.1913|± |0.0260| |agieval_lsat_lr | 0|acc |0.5510|± |0.0220| | | |acc_norm|0.5255|± |0.0221| |agieval_lsat_rc | 0|acc |0.6431|± |0.0293| | | |acc_norm|0.6097|± |0.0298| |agieval_sat_en | 0|acc |0.7330|± |0.0309| | | |acc_norm|0.7039|± |0.0319| |agieval_sat_en_without_passage| 0|acc |0.4029|± |0.0343| | | |acc_norm|0.3689|± |0.0337| |agieval_sat_math | 0|acc |0.3909|± |0.0330| | | |acc_norm|0.3773|± |0.0328| ``` Average: 42.44 ## BigBench: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|± |0.0360| |bigbench_date_understanding | 0|multiple_choice_grade|0.6667|± |0.0246| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3178|± |0.0290| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.1755|± |0.0201| | | |exact_str_match |0.0000|± |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.3120|± |0.0207| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2014|± |0.0152| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.5500|± |0.0288| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.4300|± |0.0222| |bigbench_navigate | 0|multiple_choice_grade|0.4980|± |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7010|± |0.0102| |bigbench_ruin_names | 0|multiple_choice_grade|0.4688|± |0.0236| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.1974|± |0.0126| |bigbench_snarks | 0|multiple_choice_grade|0.7403|± |0.0327| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5426|± |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.5320|± |0.0158| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2280|± |0.0119| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1531|± |0.0086| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.5500|± |0.0288| ``` Average: 43.55 ## TruthfulQA: ``` | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.410|± |0.0172| | | |mc2 |0.578|± |0.0157| ``` # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Code to inference Hermes with HF Transformers # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM import bitsandbytes, flash_attn tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B', trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B", torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, load_in_4bit=True, use_flash_attention_2=True ) prompts = [ """<|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|> <|im_start|>assistant""", ] for chat in prompts: print(chat) input_ids = tokenizer(chat, return_tensors="pt").input_ids.to("cuda") generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True) print(f"Response: {response}") ``` ## Inference Code for Function Calling: All code for utilizing, parsing, and building function calling templates is available on our github: [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oi4CiGh50xmoviUQnh8R3.png) # Chat Interfaces When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) ## Quantized Versions: GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF # How to cite: ```bibtext @misc{Hermes-2-Pro-Llama-3-8B, url={[https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B]https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B)}, title={Hermes-2-Pro-Llama-3-8B}, author={"Teknium", "interstellarninja", "theemozilla", "karan4d", "huemin_art"} } ```
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Hermes-2-Pro-Llama-3-8B - GGUF - Model creator: https://huggingface.co/NousResearch/ - Original model: https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Hermes-2-Pro-Llama-3-8B.Q2_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q2_K.gguf) | Q2_K | 2.96GB | | [Hermes-2-Pro-Llama-3-8B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ3_XS.gguf) | IQ3_XS | 3.28GB | | [Hermes-2-Pro-Llama-3-8B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ3_S.gguf) | IQ3_S | 3.43GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K_S.gguf) | Q3_K_S | 3.41GB | | [Hermes-2-Pro-Llama-3-8B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ3_M.gguf) | IQ3_M | 3.53GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K.gguf) | Q3_K | 3.74GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K_M.gguf) | Q3_K_M | 3.74GB | | [Hermes-2-Pro-Llama-3-8B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q3_K_L.gguf) | Q3_K_L | 4.03GB | | [Hermes-2-Pro-Llama-3-8B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ4_XS.gguf) | IQ4_XS | 4.18GB | | [Hermes-2-Pro-Llama-3-8B.Q4_0.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_0.gguf) | Q4_0 | 4.34GB | | [Hermes-2-Pro-Llama-3-8B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.IQ4_NL.gguf) | IQ4_NL | 4.38GB | | [Hermes-2-Pro-Llama-3-8B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_K_S.gguf) | Q4_K_S | 4.37GB | | [Hermes-2-Pro-Llama-3-8B.Q4_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_K.gguf) | Q4_K | 4.58GB | | [Hermes-2-Pro-Llama-3-8B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_K_M.gguf) | Q4_K_M | 4.58GB | | [Hermes-2-Pro-Llama-3-8B.Q4_1.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q4_1.gguf) | Q4_1 | 4.78GB | | [Hermes-2-Pro-Llama-3-8B.Q5_0.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_0.gguf) | Q5_0 | 5.21GB | | [Hermes-2-Pro-Llama-3-8B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_K_S.gguf) | Q5_K_S | 5.21GB | | [Hermes-2-Pro-Llama-3-8B.Q5_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_K.gguf) | Q5_K | 5.34GB | | [Hermes-2-Pro-Llama-3-8B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [Hermes-2-Pro-Llama-3-8B.Q5_1.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q5_1.gguf) | Q5_1 | 5.65GB | | [Hermes-2-Pro-Llama-3-8B.Q6_K.gguf](https://huggingface.co/RichardErkhov/NousResearch_-_Hermes-2-Pro-Llama-3-8B-gguf/blob/main/Hermes-2-Pro-Llama-3-8B.Q6_K.gguf) | Q6_K | 6.14GB | Original model description: --- base_model: NousResearch/Meta-Llama-3-8B tags: - Llama-3 - instruct - finetune - chatml - DPO - RLHF - gpt4 - synthetic data - distillation - function calling - json mode - axolotl model-index: - name: Hermes-2-Pro-Llama-3-8B results: [] language: - en datasets: - teknium/OpenHermes-2.5 widget: - example_title: Hermes 2 Pro messages: - role: system content: >- You are a sentient, superintelligent artificial general intelligence, here to teach and assist me. - role: user content: >- Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world. --- # Hermes 2 Pro - Llama-3 8B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png) ## Model Description Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. This new version of Hermes maintains its excellent general task and conversation capabilities - but also excels at Function Calling, JSON Structured Outputs, and has improved on several other metrics as well, scoring a 90% on our function calling evaluation built in partnership with Fireworks.AI, and an 84% on our structured JSON Output evaluation. Hermes Pro takes advantage of a special system prompt and multi-turn function calling structure with a new chatml role in order to make function calling reliable and easy to parse. Learn more about prompting below. This version of Hermes 2 Pro adds several tokens to assist with agentic capabilities in parsing while streaming tokens - `<tools>`, `<tool_call>`, `<tool_response>` and their closing tags are single tokens now. This work was a collaboration between Nous Research, @interstellarninja, and Fireworks.AI Learn more about the function calling system for this model on our github repo here: https://github.com/NousResearch/Hermes-Function-Calling ## Example Outputs ### Ask for a structured JSON output: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ll2j2wkQffCsiSwUjfRUq.png) ### Write the plot for a story where anime became real life: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/h_7aXGXdm2p2ONYuDF4Ii.png) ### Coding Assistance ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/bBd0hyAb8w5rKUiN2w1I6.png) # Prompt Format Hermes 2 Pro uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(messages, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. ## Prompt Format for Function Calling Our model was trained on specific system prompts and structures for Function Calling. You should use the system role with this message, followed by a function signature json as this example shows here. ``` <|im_start|>system You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "get_stock_fundamentals", "description": "get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n Args:\\n symbol (str): The stock symbol.\\n\\n Returns:\\n dict: A dictionary containing fundamental data.\\n Keys:\\n - \'symbol\': The stock symbol.\\n - \'company_name\': The long name of the company.\\n - \'sector\': The sector to which the company belongs.\\n - \'industry\': The industry to which the company belongs.\\n - \'market_cap\': The market capitalization of the company.\\n - \'pe_ratio\': The forward price-to-earnings ratio.\\n - \'pb_ratio\': The price-to-book ratio.\\n - \'dividend_yield\': The dividend yield.\\n - \'eps\': The trailing earnings per share.\\n - \'beta\': The beta value of the stock.\\n - \'52_week_high\': The 52-week high price of the stock.\\n - \'52_week_low\': The 52-week low price of the stock.", "parameters": {"type": "object", "properties": {"symbol": {"type": "string"}}, "required": ["symbol"]}}} </tools> Use the following pydantic model json schema for each tool call you will make: {"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: <tool_call> {"arguments": <args-dict>, "name": <function-name>} </tool_call><|im_end|> ``` To complete the function call, create a user prompt that follows the above system prompt, like so: ``` <|im_start|>user Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|> ``` The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling): ``` <|im_start|>assistant <tool_call> {"arguments": {"symbol": "TSLA"}, "name": "get_stock_fundamentals"} </tool_call><|im_end|> ``` Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so: ``` <|im_start|>tool <tool_response> {"name": "get_stock_fundamentals", "content": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}} </tool_response> <|im_end|> ``` The assistant will then read in that data from the function's response, and generate a natural language response: ``` <|im_start|>assistant The stock fundamentals data for Tesla (TSLA) are as follows: - **Symbol**: TSLA - **Company Name**: Tesla, Inc. - **Sector**: Consumer Cyclical - **Industry**: Auto Manufacturers - **Market Capitalization**: $566,160,130,480 - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73 - **Price-to-Book Ratio (PB Ratio)**: 9.04 - **Dividend Yield**: N/A - **Trailing Earnings Per Share (EPS)**: $4.3 - **Beta Value of the Stock**: 2.42 - **52-Week High Price of the Stock**: $299.29 - **52-Week Low Price of the Stock**: $152.37 This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|> ``` ## Prompt Format for JSON Mode / Structured Outputs Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema. Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main ``` <|im_start|>system You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|> ``` Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON. # Benchmarks ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vOYv9wJUMn1Xrf4BvmO_x.png) ## GPT4All: ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.5520|± |0.0145| | | |acc_norm|0.5887|± |0.0144| |arc_easy | 0|acc |0.8350|± |0.0076| | | |acc_norm|0.8123|± |0.0080| |boolq | 1|acc |0.8584|± |0.0061| |hellaswag | 0|acc |0.6265|± |0.0048| | | |acc_norm|0.8053|± |0.0040| |openbookqa | 0|acc |0.3800|± |0.0217| | | |acc_norm|0.4580|± |0.0223| |piqa | 0|acc |0.8003|± |0.0093| | | |acc_norm|0.8118|± |0.0091| |winogrande | 0|acc |0.7490|± |0.0122| ``` Average: 72.62 ## AGIEval: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.2520|± |0.0273| | | |acc_norm|0.2559|± |0.0274| |agieval_logiqa_en | 0|acc |0.3548|± |0.0188| | | |acc_norm|0.3625|± |0.0189| |agieval_lsat_ar | 0|acc |0.1826|± |0.0255| | | |acc_norm|0.1913|± |0.0260| |agieval_lsat_lr | 0|acc |0.5510|± |0.0220| | | |acc_norm|0.5255|± |0.0221| |agieval_lsat_rc | 0|acc |0.6431|± |0.0293| | | |acc_norm|0.6097|± |0.0298| |agieval_sat_en | 0|acc |0.7330|± |0.0309| | | |acc_norm|0.7039|± |0.0319| |agieval_sat_en_without_passage| 0|acc |0.4029|± |0.0343| | | |acc_norm|0.3689|± |0.0337| |agieval_sat_math | 0|acc |0.3909|± |0.0330| | | |acc_norm|0.3773|± |0.0328| ``` Average: 42.44 ## BigBench: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|± |0.0360| |bigbench_date_understanding | 0|multiple_choice_grade|0.6667|± |0.0246| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3178|± |0.0290| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.1755|± |0.0201| | | |exact_str_match |0.0000|± |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.3120|± |0.0207| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2014|± |0.0152| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.5500|± |0.0288| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.4300|± |0.0222| |bigbench_navigate | 0|multiple_choice_grade|0.4980|± |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7010|± |0.0102| |bigbench_ruin_names | 0|multiple_choice_grade|0.4688|± |0.0236| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.1974|± |0.0126| |bigbench_snarks | 0|multiple_choice_grade|0.7403|± |0.0327| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5426|± |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.5320|± |0.0158| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2280|± |0.0119| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1531|± |0.0086| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.5500|± |0.0288| ``` Average: 43.55 ## TruthfulQA: ``` | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.410|± |0.0172| | | |mc2 |0.578|± |0.0157| ``` # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Code to inference Hermes with HF Transformers # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM import bitsandbytes, flash_attn tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B', trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B", torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, load_in_4bit=True, use_flash_attention_2=True ) prompts = [ """<|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|> <|im_start|>assistant""", ] for chat in prompts: print(chat) input_ids = tokenizer(chat, return_tensors="pt").input_ids.to("cuda") generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True) print(f"Response: {response}") ``` ## Inference Code for Function Calling: All code for utilizing, parsing, and building function calling templates is available on our github: [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oi4CiGh50xmoviUQnh8R3.png) # Chat Interfaces When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) ## Quantized Versions: GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF # How to cite: ```bibtext @misc{Hermes-2-Pro-Llama-3-8B, url={[https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B]https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B)}, title={Hermes-2-Pro-Llama-3-8B}, author={"Teknium", "interstellarninja", "theemozilla", "karan4d", "huemin_art"} } ```
{}
task
[ "TRANSLATION" ]
40,328
llmware/dragon-mistral-7b-v0
llmware
text-generation
[ "transformers", "pytorch", "gguf", "mistral", "text-generation", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-11-12T19:27:53Z
2023-12-14T12:33:36+00:00
295
112
--- license: apache-2.0 --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> dragon-mistral-7b-v0 part of the dRAGon ("Delivering RAG On ...") model series, RAG-instruct trained on top of a Mistral-7B base model. DRAGON models have been fine-tuned with the specific objective of fact-based question-answering over complex business and legal documents with an emphasis on reducing hallucinations and providing short, clear answers for workflow automation. ### Benchmark Tests Evaluated against the benchmark test: [RAG-Instruct-Benchmark-Tester](https://www.huggingface.co/datasets/llmware/rag_instruct_benchmark_tester) Average of 2 Test Runs with 1 point for correct answer, 0.5 point for partial correct or blank / NF, 0.0 points for incorrect, and -1 points for hallucinations. --**Accuracy Score**: **96.50** correct out of 100 --Not Found Classification: 92.50% --Boolean: 97.50% --Math/Logic: 81.25% --Complex Questions (1-5): 4 (Medium-High - table-reading, multiple-choice, causal) --Summarization Quality (1-5): 4 (Coherent, extractive) --Hallucinations: No hallucinations observed in test runs. For test run results (and good indicator of target use cases), please see the files ("core_rag_test" and "answer_sheet" in this repo). ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** llmware - **Model type:** Mistral-7B - **Language(s) (NLP):** English - **License:** Apache 2.0 - **Finetuned from model:** Mistral-7B-Base ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> DRAGON is designed for enterprise automation use cases, especially in knowledge-intensive industries, such as financial services, legal and regulatory industries with complex information sources. DRAGON models have been trained for common RAG scenarios, specifically: question-answering, key-value extraction, and basic summarization as the core instruction types without the need for a lot of complex instruction verbiage - provide a text passage context, ask questions, and get clear fact-based responses. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Any model can provide inaccurate or incomplete information, and should be used in conjunction with appropriate safeguards and fact-checking mechanisms. ## How to Get Started with the Model The fastest way to get started with dRAGon is through direct import in transformers: from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("dragon-mistral-7b-v0") model = AutoModelForCausalLM.from_pretrained("dragon-mistral-7b-v0") Please refer to the generation_test .py files in the Files repository, which includes 200 samples and script to test the model. The **generation_test_llmware_script.py** includes built-in llmware capabilities for fact-checking, as well as easy integration with document parsing and actual retrieval to swap out the test set for RAG workflow consisting of business documents. The dRAGon model was fine-tuned with a simple "\<human> and \<bot> wrapper", so to get the best results, wrap inference entries as: full_prompt = "<human>: " + my_prompt + "\n" + "<bot>:" The BLING model was fine-tuned with closed-context samples, which assume generally that the prompt consists of two sub-parts: 1. Text Passage Context, and 2. Specific question or instruction based on the text passage To get the best results, package "my_prompt" as follows: my_prompt = {{text_passage}} + "\n" + {{question/instruction}} If you are using a HuggingFace generation script: # prepare prompt packaging used in fine-tuning process new_prompt = "<human>: " + entries["context"] + "\n" + entries["query"] + "\n" + "<bot>:" inputs = tokenizer(new_prompt, return_tensors="pt") start_of_output = len(inputs.input_ids[0]) # temperature: set at 0.3 for consistency of output # max_new_tokens: set at 100 - may prematurely stop a few of the summaries outputs = model.generate( inputs.input_ids.to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.3, max_new_tokens=100, ) output_only = tokenizer.decode(outputs[0][start_of_output:],skip_special_tokens=True) ## Model Card Contact Darren Oberst & llmware team
null
Non_BioNLP
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> dragon-mistral-7b-v0 part of the dRAGon ("Delivering RAG On ...") model series, RAG-instruct trained on top of a Mistral-7B base model. DRAGON models have been fine-tuned with the specific objective of fact-based question-answering over complex business and legal documents with an emphasis on reducing hallucinations and providing short, clear answers for workflow automation. ### Benchmark Tests Evaluated against the benchmark test: [RAG-Instruct-Benchmark-Tester](https://www.huggingface.co/datasets/llmware/rag_instruct_benchmark_tester) Average of 2 Test Runs with 1 point for correct answer, 0.5 point for partial correct or blank / NF, 0.0 points for incorrect, and -1 points for hallucinations. --**Accuracy Score**: **96.50** correct out of 100 --Not Found Classification: 92.50% --Boolean: 97.50% --Math/Logic: 81.25% --Complex Questions (1-5): 4 (Medium-High - table-reading, multiple-choice, causal) --Summarization Quality (1-5): 4 (Coherent, extractive) --Hallucinations: No hallucinations observed in test runs. For test run results (and good indicator of target use cases), please see the files ("core_rag_test" and "answer_sheet" in this repo). ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** llmware - **Model type:** Mistral-7B - **Language(s) (NLP):** English - **License:** Apache 2.0 - **Finetuned from model:** Mistral-7B-Base ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> DRAGON is designed for enterprise automation use cases, especially in knowledge-intensive industries, such as financial services, legal and regulatory industries with complex information sources. DRAGON models have been trained for common RAG scenarios, specifically: question-answering, key-value extraction, and basic summarization as the core instruction types without the need for a lot of complex instruction verbiage - provide a text passage context, ask questions, and get clear fact-based responses. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Any model can provide inaccurate or incomplete information, and should be used in conjunction with appropriate safeguards and fact-checking mechanisms. ## How to Get Started with the Model The fastest way to get started with dRAGon is through direct import in transformers: from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("dragon-mistral-7b-v0") model = AutoModelForCausalLM.from_pretrained("dragon-mistral-7b-v0") Please refer to the generation_test .py files in the Files repository, which includes 200 samples and script to test the model. The **generation_test_llmware_script.py** includes built-in llmware capabilities for fact-checking, as well as easy integration with document parsing and actual retrieval to swap out the test set for RAG workflow consisting of business documents. The dRAGon model was fine-tuned with a simple "\<human> and \<bot> wrapper", so to get the best results, wrap inference entries as: full_prompt = "<human>: " + my_prompt + "\n" + "<bot>:" The BLING model was fine-tuned with closed-context samples, which assume generally that the prompt consists of two sub-parts: 1. Text Passage Context, and 2. Specific question or instruction based on the text passage To get the best results, package "my_prompt" as follows: my_prompt = {{text_passage}} + "\n" + {{question/instruction}} If you are using a HuggingFace generation script: # prepare prompt packaging used in fine-tuning process new_prompt = "<human>: " + entries["context"] + "\n" + entries["query"] + "\n" + "<bot>:" inputs = tokenizer(new_prompt, return_tensors="pt") start_of_output = len(inputs.input_ids[0]) # temperature: set at 0.3 for consistency of output # max_new_tokens: set at 100 - may prematurely stop a few of the summaries outputs = model.generate( inputs.input_ids.to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.3, max_new_tokens=100, ) output_only = tokenizer.decode(outputs[0][start_of_output:],skip_special_tokens=True) ## Model Card Contact Darren Oberst & llmware team
{"license": "apache-2.0"}
task
[ "SUMMARIZATION" ]
40,329
qihoo360/360VL-8B
qihoo360
visual-question-answering
[ "transformers", "safetensors", "QH_360VL", "text-generation", "visual-question-answering", "custom_code", "zh", "en", "dataset:liuhaotian/LLaVA-CC3M-Pretrain-595K", "dataset:liuhaotian/LLaVA-Instruct-150K", "dataset:FreedomIntelligence/ALLaVA-4V-Chinese", "dataset:shareAI/ShareGPT-Chinese-English-90k", "license:apache-2.0", "autotrain_compatible", "region:us" ]
2024-05-16T10:23:45Z
2024-05-22T03:45:17+00:00
26
13
--- datasets: - liuhaotian/LLaVA-CC3M-Pretrain-595K - liuhaotian/LLaVA-Instruct-150K - FreedomIntelligence/ALLaVA-4V-Chinese - shareAI/ShareGPT-Chinese-English-90k language: - zh - en license: apache-2.0 pipeline_tag: visual-question-answering --- <br> <br> # Model Card for 360VL <p align="center"> <img src="https://github.com/360CVGroup/360VL/blob/master/qh360_vl/360vl.PNG?raw=true" width=100%/> </p> **360VL** is developed based on the LLama3 language model and is also the industry's first open source large multi-modal model based on **LLama3-70B**[[🤗Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)]. In addition to applying the Llama3 language model, the 360VL model also designs a globally aware multi-branch projector architecture, which enables the model to have more sufficient image understanding capabilities. **Github**:https://github.com/360CVGroup/360VL ## Model Zoo 360VL has released the following versions. Model | Download |---|--- 360VL-8B | [🤗 Hugging Face](https://huggingface.co/qihoo360/360VL-8B) 360VL-70B | [🤗 Hugging Face](https://huggingface.co/qihoo360/360VL-70B) ## Features 360VL offers the following features: - Multi-round text-image conversations: 360VL can take both text and images as inputs and produce text outputs. Currently, it supports multi-round visual question answering with one image. - Bilingual text support: 360VL supports conversations in both English and Chinese, including text recognition in images. - Strong image comprehension: 360VL is adept at analyzing visuals, making it an efficient tool for tasks like extracting, organizing, and summarizing information from images. - Fine-grained image resolution: 360VL supports image understanding at a higher resolution of 672&times;672. ## Performance | Model | Checkpoints | MMB<sub>T | MMB<sub>D|MMB-CN<sub>T | MMB-CN<sub>D|MMMU<sub>V|MMMU<sub>T| MME | |:--------------------|:------------:|:----:|:------:|:------:|:-------:|:-------:|:-------:|:-------:| | QWen-VL-Chat | [🤗LINK](https://huggingface.co/Qwen/Qwen-VL-Chat) | 61.8 | 60.6 | 56.3 | 56.7 |37| 32.9 | 1860 | | mPLUG-Owl2 | [🤖LINK](https://www.modelscope.cn/models/iic/mPLUG-Owl2/summary) | 66.0 | 66.5 | 60.3 | 59.5 |34.7| 32.1 | 1786.4 | | CogVLM | [🤗LINK](https://huggingface.co/THUDM/cogvlm-grounding-generalist-hf) | 65.8| 63.7 | 55.9 | 53.8 |37.3| 30.1 | 1736.6| | Monkey-Chat | [🤗LINK](https://huggingface.co/echo840/Monkey-Chat) | 72.4| 71 | 67.5 | 65.8 |40.7| - | 1887.4| | MM1-7B-Chat | [LINK](https://ar5iv.labs.arxiv.org/html/2403.09611) | -| 72.3 | - | - |37.0| 35.6 | 1858.2| | IDEFICS2-8B | [🤗LINK](https://huggingface.co/HuggingFaceM4/idefics2-8b) | 75.7 | 75.3 | 68.6 | 67.3 |43.0| 37.7 |1847.6| | SVIT-v1.5-13B| [🤗LINK](https://huggingface.co/Isaachhe/svit-v1.5-13b-full) | 69.1 | - | 63.1 | - | 38.0| 33.3|1889| | LLaVA-v1.5-13B | [🤗LINK](https://huggingface.co/liuhaotian/llava-v1.5-13b) | 69.2 | 69.2 | 65 | 63.6 |36.4| 33.6 | 1826.7| | LLaVA-v1.6-13B | [🤗LINK](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-13b) | 70 | 70.7 | 68.5 | 64.3 |36.2| - |1901| | Honeybee | [LINK](https://github.com/kakaobrain/honeybee) | 73.6 | 74.3 | - | - |36.2| -|1976.5| | YI-VL-34B | [🤗LINK](https://huggingface.co/01-ai/Yi-VL-34B) | 72.4 | 71.1 | 70.7 | 71.4 |45.1| 41.6 |2050.2| | **360VL-8B** | [🤗LINK](https://huggingface.co/qihoo360/360VL-8B) | 75.3 | 73.7 | 71.1 | 68.6 |39.7| 37.1 | 1944.6| | **360VL-70B** | [🤗LINK](https://huggingface.co/qihoo360/360VL-70B) | 78.1 | 80.4 | 76.9 | 77.7 |50.8| 44.3 | 2012.3| ## Quick Start 🤗 ```Shell from transformers import AutoModelForCausalLM, AutoTokenizer import torch from PIL import Image checkpoint = "qihoo360/360VL-8B" model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.float16, device_map='auto', trust_remote_code=True).eval() tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True) vision_tower = model.get_vision_tower() vision_tower.load_model() vision_tower.to(device="cuda", dtype=torch.float16) image_processor = vision_tower.image_processor tokenizer.pad_token = tokenizer.eos_token image = Image.open("docs/008.jpg").convert('RGB') query = "Who is this cartoon character?" terminators = [ tokenizer.convert_tokens_to_ids("<|eot_id|>",) ] inputs = model.build_conversation_input_ids(tokenizer, query=query, image=image, image_processor=image_processor) input_ids = inputs["input_ids"].to(device='cuda', non_blocking=True) images = inputs["image"].to(dtype=torch.float16, device='cuda', non_blocking=True) output_ids = model.generate( input_ids, images=images, do_sample=False, eos_token_id=terminators, num_beams=1, max_new_tokens=512, use_cache=True) input_token_len = input_ids.shape[1] outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] outputs = outputs.strip() print(outputs) ``` **Model type:** 360VL-8B is an open-source chatbot trained by fine-tuning LLM on multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture. Base LLM: [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) **Model date:** 360VL-8B was trained in April 2024. ## License This project utilizes certain datasets and checkpoints that are subject to their respective original licenses. Users must comply with all terms and conditions of these original licenses. The content of this project itself is licensed under the [Apache license 2.0] **Where to send questions or comments about the model:** https://github.com/360CVGroup/360VL ## Related Projects This work wouldn't be possible without the incredible open-source code of these projects. Huge thanks! - [Meta Llama 3](https://github.com/meta-llama/llama3) - [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA) - [Honeybee: Locality-enhanced Projector for Multimodal LLM](https://github.com/kakaobrain/honeybee)
null
Non_BioNLP
<br> <br> # Model Card for 360VL <p align="center"> <img src="https://github.com/360CVGroup/360VL/blob/master/qh360_vl/360vl.PNG?raw=true" width=100%/> </p> **360VL** is developed based on the LLama3 language model and is also the industry's first open source large multi-modal model based on **LLama3-70B**[[🤗Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)]. In addition to applying the Llama3 language model, the 360VL model also designs a globally aware multi-branch projector architecture, which enables the model to have more sufficient image understanding capabilities. **Github**:https://github.com/360CVGroup/360VL ## Model Zoo 360VL has released the following versions. Model | Download |---|--- 360VL-8B | [🤗 Hugging Face](https://huggingface.co/qihoo360/360VL-8B) 360VL-70B | [🤗 Hugging Face](https://huggingface.co/qihoo360/360VL-70B) ## Features 360VL offers the following features: - Multi-round text-image conversations: 360VL can take both text and images as inputs and produce text outputs. Currently, it supports multi-round visual question answering with one image. - Bilingual text support: 360VL supports conversations in both English and Chinese, including text recognition in images. - Strong image comprehension: 360VL is adept at analyzing visuals, making it an efficient tool for tasks like extracting, organizing, and summarizing information from images. - Fine-grained image resolution: 360VL supports image understanding at a higher resolution of 672&times;672. ## Performance | Model | Checkpoints | MMB<sub>T | MMB<sub>D|MMB-CN<sub>T | MMB-CN<sub>D|MMMU<sub>V|MMMU<sub>T| MME | |:--------------------|:------------:|:----:|:------:|:------:|:-------:|:-------:|:-------:|:-------:| | QWen-VL-Chat | [🤗LINK](https://huggingface.co/Qwen/Qwen-VL-Chat) | 61.8 | 60.6 | 56.3 | 56.7 |37| 32.9 | 1860 | | mPLUG-Owl2 | [🤖LINK](https://www.modelscope.cn/models/iic/mPLUG-Owl2/summary) | 66.0 | 66.5 | 60.3 | 59.5 |34.7| 32.1 | 1786.4 | | CogVLM | [🤗LINK](https://huggingface.co/THUDM/cogvlm-grounding-generalist-hf) | 65.8| 63.7 | 55.9 | 53.8 |37.3| 30.1 | 1736.6| | Monkey-Chat | [🤗LINK](https://huggingface.co/echo840/Monkey-Chat) | 72.4| 71 | 67.5 | 65.8 |40.7| - | 1887.4| | MM1-7B-Chat | [LINK](https://ar5iv.labs.arxiv.org/html/2403.09611) | -| 72.3 | - | - |37.0| 35.6 | 1858.2| | IDEFICS2-8B | [🤗LINK](https://huggingface.co/HuggingFaceM4/idefics2-8b) | 75.7 | 75.3 | 68.6 | 67.3 |43.0| 37.7 |1847.6| | SVIT-v1.5-13B| [🤗LINK](https://huggingface.co/Isaachhe/svit-v1.5-13b-full) | 69.1 | - | 63.1 | - | 38.0| 33.3|1889| | LLaVA-v1.5-13B | [🤗LINK](https://huggingface.co/liuhaotian/llava-v1.5-13b) | 69.2 | 69.2 | 65 | 63.6 |36.4| 33.6 | 1826.7| | LLaVA-v1.6-13B | [🤗LINK](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-13b) | 70 | 70.7 | 68.5 | 64.3 |36.2| - |1901| | Honeybee | [LINK](https://github.com/kakaobrain/honeybee) | 73.6 | 74.3 | - | - |36.2| -|1976.5| | YI-VL-34B | [🤗LINK](https://huggingface.co/01-ai/Yi-VL-34B) | 72.4 | 71.1 | 70.7 | 71.4 |45.1| 41.6 |2050.2| | **360VL-8B** | [🤗LINK](https://huggingface.co/qihoo360/360VL-8B) | 75.3 | 73.7 | 71.1 | 68.6 |39.7| 37.1 | 1944.6| | **360VL-70B** | [🤗LINK](https://huggingface.co/qihoo360/360VL-70B) | 78.1 | 80.4 | 76.9 | 77.7 |50.8| 44.3 | 2012.3| ## Quick Start 🤗 ```Shell from transformers import AutoModelForCausalLM, AutoTokenizer import torch from PIL import Image checkpoint = "qihoo360/360VL-8B" model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.float16, device_map='auto', trust_remote_code=True).eval() tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True) vision_tower = model.get_vision_tower() vision_tower.load_model() vision_tower.to(device="cuda", dtype=torch.float16) image_processor = vision_tower.image_processor tokenizer.pad_token = tokenizer.eos_token image = Image.open("docs/008.jpg").convert('RGB') query = "Who is this cartoon character?" terminators = [ tokenizer.convert_tokens_to_ids("<|eot_id|>",) ] inputs = model.build_conversation_input_ids(tokenizer, query=query, image=image, image_processor=image_processor) input_ids = inputs["input_ids"].to(device='cuda', non_blocking=True) images = inputs["image"].to(dtype=torch.float16, device='cuda', non_blocking=True) output_ids = model.generate( input_ids, images=images, do_sample=False, eos_token_id=terminators, num_beams=1, max_new_tokens=512, use_cache=True) input_token_len = input_ids.shape[1] outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] outputs = outputs.strip() print(outputs) ``` **Model type:** 360VL-8B is an open-source chatbot trained by fine-tuning LLM on multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture. Base LLM: [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) **Model date:** 360VL-8B was trained in April 2024. ## License This project utilizes certain datasets and checkpoints that are subject to their respective original licenses. Users must comply with all terms and conditions of these original licenses. The content of this project itself is licensed under the [Apache license 2.0] **Where to send questions or comments about the model:** https://github.com/360CVGroup/360VL ## Related Projects This work wouldn't be possible without the incredible open-source code of these projects. Huge thanks! - [Meta Llama 3](https://github.com/meta-llama/llama3) - [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA) - [Honeybee: Locality-enhanced Projector for Multimodal LLM](https://github.com/kakaobrain/honeybee)
{"datasets": ["liuhaotian/LLaVA-CC3M-Pretrain-595K", "liuhaotian/LLaVA-Instruct-150K", "FreedomIntelligence/ALLaVA-4V-Chinese", "shareAI/ShareGPT-Chinese-English-90k"], "language": ["zh", "en"], "license": "apache-2.0", "pipeline_tag": "visual-question-answering"}
task
[ "QUESTION_ANSWERING" ]
40,330
gokuls/add_BERT_24_rte
gokuls
text-classification
[ "transformers", "pytorch", "tensorboard", "hybridbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-16T11:03:21Z
2023-06-16T11:08:44+00:00
8
0
--- datasets: - glue language: - en metrics: - accuracy tags: - generated_from_trainer model-index: - name: add_BERT_24_rte results: - task: type: text-classification name: Text Classification dataset: name: GLUE RTE type: glue config: rte split: validation args: rte metrics: - type: accuracy value: 0.5270758122743683 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # add_BERT_24_rte This model is a fine-tuned version of [gokuls/add_bert_12_layer_model_complete_training_new](https://huggingface.co/gokuls/add_bert_12_layer_model_complete_training_new) on the GLUE RTE dataset. It achieves the following results on the evaluation set: - Loss: 0.6936 - Accuracy: 0.5271 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7559 | 1.0 | 20 | 0.6977 | 0.5271 | | 0.704 | 2.0 | 40 | 0.6936 | 0.5271 | | 0.701 | 3.0 | 60 | 0.6969 | 0.5126 | | 0.6801 | 4.0 | 80 | 0.7166 | 0.4729 | | 0.6792 | 5.0 | 100 | 0.7125 | 0.5307 | | 0.6577 | 6.0 | 120 | 0.7617 | 0.4874 | | 0.5614 | 7.0 | 140 | 0.9793 | 0.4765 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.13.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # add_BERT_24_rte This model is a fine-tuned version of [gokuls/add_bert_12_layer_model_complete_training_new](https://huggingface.co/gokuls/add_bert_12_layer_model_complete_training_new) on the GLUE RTE dataset. It achieves the following results on the evaluation set: - Loss: 0.6936 - Accuracy: 0.5271 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7559 | 1.0 | 20 | 0.6977 | 0.5271 | | 0.704 | 2.0 | 40 | 0.6936 | 0.5271 | | 0.701 | 3.0 | 60 | 0.6969 | 0.5126 | | 0.6801 | 4.0 | 80 | 0.7166 | 0.4729 | | 0.6792 | 5.0 | 100 | 0.7125 | 0.5307 | | 0.6577 | 6.0 | 120 | 0.7617 | 0.4874 | | 0.5614 | 7.0 | 140 | 0.9793 | 0.4765 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.13.0 - Tokenizers 0.13.3
{"datasets": ["glue"], "language": ["en"], "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "add_BERT_24_rte", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE RTE", "type": "glue", "config": "rte", "split": "validation", "args": "rte"}, "metrics": [{"type": "accuracy", "value": 0.5270758122743683, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,331
pkbiswas/Llama-3.2-1B-Summarization-LoRa
pkbiswas
summarization
[ "peft", "tensorboard", "safetensors", "generated_from_trainer", "summarization", "dataset:scitldr", "base_model:meta-llama/Llama-3.2-1B", "base_model:adapter:meta-llama/Llama-3.2-1B", "license:llama3.2", "region:us" ]
2024-10-25T22:11:58Z
2024-11-19T03:15:30+00:00
16
0
--- base_model: meta-llama/Llama-3.2-1B datasets: - scitldr library_name: peft license: llama3.2 pipeline_tag: summarization tags: - generated_from_trainer model-index: - name: Llama-3.2-1B-Summarization-LoRa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Llama-3.2-1B-Summarization-LoRa This model is a fine-tuned version of [meta-llama/Llama-3.2-1B](https://huggingface.co/meta-llama/Llama-3.2-1B) on the scitldr dataset. It achieves the following results on the evaluation set: - Loss: 2.5661 ## Model description Fine-tuned (LoRa) Version of meta-llama/Llama-3.2-1B for Summarization of scientific documents ## Intended uses & limitations Summarization ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Use paged_adamw_32bit with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 2.45 | 0.2008 | 200 | 2.5272 | | 2.4331 | 0.4016 | 400 | 2.5327 | | 2.4369 | 0.6024 | 600 | 2.5285 | | 2.4315 | 0.8032 | 800 | 2.5238 | | 2.4303 | 1.0040 | 1000 | 2.5181 | | 2.1077 | 1.2048 | 1200 | 2.5525 | | 2.0951 | 1.4056 | 1400 | 2.5611 | | 2.0738 | 1.6064 | 1600 | 2.5591 | | 2.0539 | 1.8072 | 1800 | 2.5661 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.2 - Pytorch 2.5.1+cu121 - Datasets 3.1.0 - Tokenizers 0.20.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Llama-3.2-1B-Summarization-LoRa This model is a fine-tuned version of [meta-llama/Llama-3.2-1B](https://huggingface.co/meta-llama/Llama-3.2-1B) on the scitldr dataset. It achieves the following results on the evaluation set: - Loss: 2.5661 ## Model description Fine-tuned (LoRa) Version of meta-llama/Llama-3.2-1B for Summarization of scientific documents ## Intended uses & limitations Summarization ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Use paged_adamw_32bit with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 2.45 | 0.2008 | 200 | 2.5272 | | 2.4331 | 0.4016 | 400 | 2.5327 | | 2.4369 | 0.6024 | 600 | 2.5285 | | 2.4315 | 0.8032 | 800 | 2.5238 | | 2.4303 | 1.0040 | 1000 | 2.5181 | | 2.1077 | 1.2048 | 1200 | 2.5525 | | 2.0951 | 1.4056 | 1400 | 2.5611 | | 2.0738 | 1.6064 | 1600 | 2.5591 | | 2.0539 | 1.8072 | 1800 | 2.5661 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.2 - Pytorch 2.5.1+cu121 - Datasets 3.1.0 - Tokenizers 0.20.3
{"base_model": "meta-llama/Llama-3.2-1B", "datasets": ["scitldr"], "library_name": "peft", "license": "llama3.2", "pipeline_tag": "summarization", "tags": ["generated_from_trainer"], "model-index": [{"name": "Llama-3.2-1B-Summarization-LoRa", "results": []}]}
task
[ "SUMMARIZATION" ]
40,332
blockblockblock/Hermes-2-Pro-Llama-3-8B-bpw5.5-exl2
blockblockblock
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "Llama-3", "instruct", "finetune", "chatml", "DPO", "RLHF", "gpt4", "synthetic data", "distillation", "function calling", "json mode", "axolotl", "conversational", "en", "dataset:teknium/OpenHermes-2.5", "base_model:NousResearch/Meta-Llama-3-8B", "base_model:quantized:NousResearch/Meta-Llama-3-8B", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "exl2", "region:us" ]
2024-05-03T08:50:19Z
2024-05-03T08:52:44+00:00
10
0
--- base_model: NousResearch/Meta-Llama-3-8B datasets: - teknium/OpenHermes-2.5 language: - en license: apache-2.0 tags: - Llama-3 - instruct - finetune - chatml - DPO - RLHF - gpt4 - synthetic data - distillation - function calling - json mode - axolotl widget: - example_title: Hermes 2 Pro messages: - role: system content: You are a sentient, superintelligent artificial general intelligence, here to teach and assist me. - role: user content: Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world. model-index: - name: Hermes-2-Pro-Llama-3-8B results: [] --- # Hermes 2 Pro - Llama-3 8B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png) ## Model Description Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. This new version of Hermes maintains its excellent general task and conversation capabilities - but also excels at Function Calling, JSON Structured Outputs, and has improved on several other metrics as well, scoring a 90% on our function calling evaluation built in partnership with Fireworks.AI, and an 84% on our structured JSON Output evaluation. Hermes Pro takes advantage of a special system prompt and multi-turn function calling structure with a new chatml role in order to make function calling reliable and easy to parse. Learn more about prompting below. This version of Hermes 2 Pro adds several tokens to assist with agentic capabilities in parsing while streaming tokens - `<tools>`, `<tool_call>`, `<tool_response>` and their closing tags are single tokens now. This work was a collaboration between Nous Research, @interstellarninja, and Fireworks.AI Learn more about the function calling system for this model on our github repo here: https://github.com/NousResearch/Hermes-Function-Calling ## Example Outputs ### Ask for a structured JSON output: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ll2j2wkQffCsiSwUjfRUq.png) ### Write the plot for a story where anime became real life: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/h_7aXGXdm2p2ONYuDF4Ii.png) ### Coding Assistance ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/bBd0hyAb8w5rKUiN2w1I6.png) # Prompt Format Hermes 2 Pro uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(messages, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. ## Prompt Format for Function Calling Our model was trained on specific system prompts and structures for Function Calling. You should use the system role with this message, followed by a function signature json as this example shows here. ``` <|im_start|>system You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "get_stock_fundamentals", "description": "get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n Args:\\n symbol (str): The stock symbol.\\n\\n Returns:\\n dict: A dictionary containing fundamental data.\\n Keys:\\n - \'symbol\': The stock symbol.\\n - \'company_name\': The long name of the company.\\n - \'sector\': The sector to which the company belongs.\\n - \'industry\': The industry to which the company belongs.\\n - \'market_cap\': The market capitalization of the company.\\n - \'pe_ratio\': The forward price-to-earnings ratio.\\n - \'pb_ratio\': The price-to-book ratio.\\n - \'dividend_yield\': The dividend yield.\\n - \'eps\': The trailing earnings per share.\\n - \'beta\': The beta value of the stock.\\n - \'52_week_high\': The 52-week high price of the stock.\\n - \'52_week_low\': The 52-week low price of the stock.", "parameters": {"type": "object", "properties": {"symbol": {"type": "string"}}, "required": ["symbol"]}}} </tools> Use the following pydantic model json schema for each tool call you will make: {"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: <tool_call> {"arguments": <args-dict>, "name": <function-name>} </tool_call><|im_end|> ``` To complete the function call, create a user prompt that follows the above system prompt, like so: ``` <|im_start|>user Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|> ``` The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling): ``` <|im_start|>assistant <tool_call> {"arguments": {"symbol": "TSLA"}, "name": "get_stock_fundamentals"} </tool_call><|im_end|> ``` Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so: ``` <|im_start|>tool <tool_response> {"name": "get_stock_fundamentals", "content": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}} </tool_response> <|im_end|> ``` The assistant will then read in that data from the function's response, and generate a natural language response: ``` <|im_start|>assistant The stock fundamentals data for Tesla (TSLA) are as follows: - **Symbol**: TSLA - **Company Name**: Tesla, Inc. - **Sector**: Consumer Cyclical - **Industry**: Auto Manufacturers - **Market Capitalization**: $566,160,130,480 - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73 - **Price-to-Book Ratio (PB Ratio)**: 9.04 - **Dividend Yield**: N/A - **Trailing Earnings Per Share (EPS)**: $4.3 - **Beta Value of the Stock**: 2.42 - **52-Week High Price of the Stock**: $299.29 - **52-Week Low Price of the Stock**: $152.37 This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|> ``` ## Prompt Format for JSON Mode / Structured Outputs Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema. Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main ``` <|im_start|>system You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|> ``` Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON. # Benchmarks ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vOYv9wJUMn1Xrf4BvmO_x.png) ## GPT4All: ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.5520|± |0.0145| | | |acc_norm|0.5887|± |0.0144| |arc_easy | 0|acc |0.8350|± |0.0076| | | |acc_norm|0.8123|± |0.0080| |boolq | 1|acc |0.8584|± |0.0061| |hellaswag | 0|acc |0.6265|± |0.0048| | | |acc_norm|0.8053|± |0.0040| |openbookqa | 0|acc |0.3800|± |0.0217| | | |acc_norm|0.4580|± |0.0223| |piqa | 0|acc |0.8003|± |0.0093| | | |acc_norm|0.8118|± |0.0091| |winogrande | 0|acc |0.7490|± |0.0122| ``` Average: 72.62 ## AGIEval: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.2520|± |0.0273| | | |acc_norm|0.2559|± |0.0274| |agieval_logiqa_en | 0|acc |0.3548|± |0.0188| | | |acc_norm|0.3625|± |0.0189| |agieval_lsat_ar | 0|acc |0.1826|± |0.0255| | | |acc_norm|0.1913|± |0.0260| |agieval_lsat_lr | 0|acc |0.5510|± |0.0220| | | |acc_norm|0.5255|± |0.0221| |agieval_lsat_rc | 0|acc |0.6431|± |0.0293| | | |acc_norm|0.6097|± |0.0298| |agieval_sat_en | 0|acc |0.7330|± |0.0309| | | |acc_norm|0.7039|± |0.0319| |agieval_sat_en_without_passage| 0|acc |0.4029|± |0.0343| | | |acc_norm|0.3689|± |0.0337| |agieval_sat_math | 0|acc |0.3909|± |0.0330| | | |acc_norm|0.3773|± |0.0328| ``` Average: 42.44 ## BigBench: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|± |0.0360| |bigbench_date_understanding | 0|multiple_choice_grade|0.6667|± |0.0246| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3178|± |0.0290| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.1755|± |0.0201| | | |exact_str_match |0.0000|± |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.3120|± |0.0207| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2014|± |0.0152| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.5500|± |0.0288| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.4300|± |0.0222| |bigbench_navigate | 0|multiple_choice_grade|0.4980|± |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7010|± |0.0102| |bigbench_ruin_names | 0|multiple_choice_grade|0.4688|± |0.0236| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.1974|± |0.0126| |bigbench_snarks | 0|multiple_choice_grade|0.7403|± |0.0327| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5426|± |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.5320|± |0.0158| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2280|± |0.0119| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1531|± |0.0086| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.5500|± |0.0288| ``` Average: 43.55 ## TruthfulQA: ``` | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.410|± |0.0172| | | |mc2 |0.578|± |0.0157| ``` # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Code to inference Hermes with HF Transformers # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM import bitsandbytes, flash_attn tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B', trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B", torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, load_in_4bit=True, use_flash_attention_2=True ) prompts = [ """<|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|> <|im_start|>assistant""", ] for chat in prompts: print(chat) input_ids = tokenizer(chat, return_tensors="pt").input_ids.to("cuda") generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True) print(f"Response: {response}") ``` ## Inference Code for Function Calling: All code for utilizing, parsing, and building function calling templates is available on our github: [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oi4CiGh50xmoviUQnh8R3.png) # Chat Interfaces When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) ## Quantized Versions: GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF # How to cite: ```bibtext @misc{Hermes-2-Pro-Llama-3-8B, url={[https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B]https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B)}, title={Hermes-2-Pro-Llama-3-8B}, author={"Teknium", "interstellarninja", "theemozilla", "karan4d", "huemin_art"} } ```
null
Non_BioNLP
# Hermes 2 Pro - Llama-3 8B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png) ## Model Description Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. This new version of Hermes maintains its excellent general task and conversation capabilities - but also excels at Function Calling, JSON Structured Outputs, and has improved on several other metrics as well, scoring a 90% on our function calling evaluation built in partnership with Fireworks.AI, and an 84% on our structured JSON Output evaluation. Hermes Pro takes advantage of a special system prompt and multi-turn function calling structure with a new chatml role in order to make function calling reliable and easy to parse. Learn more about prompting below. This version of Hermes 2 Pro adds several tokens to assist with agentic capabilities in parsing while streaming tokens - `<tools>`, `<tool_call>`, `<tool_response>` and their closing tags are single tokens now. This work was a collaboration between Nous Research, @interstellarninja, and Fireworks.AI Learn more about the function calling system for this model on our github repo here: https://github.com/NousResearch/Hermes-Function-Calling ## Example Outputs ### Ask for a structured JSON output: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ll2j2wkQffCsiSwUjfRUq.png) ### Write the plot for a story where anime became real life: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/h_7aXGXdm2p2ONYuDF4Ii.png) ### Coding Assistance ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/bBd0hyAb8w5rKUiN2w1I6.png) # Prompt Format Hermes 2 Pro uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(messages, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. ## Prompt Format for Function Calling Our model was trained on specific system prompts and structures for Function Calling. You should use the system role with this message, followed by a function signature json as this example shows here. ``` <|im_start|>system You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "get_stock_fundamentals", "description": "get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n Args:\\n symbol (str): The stock symbol.\\n\\n Returns:\\n dict: A dictionary containing fundamental data.\\n Keys:\\n - \'symbol\': The stock symbol.\\n - \'company_name\': The long name of the company.\\n - \'sector\': The sector to which the company belongs.\\n - \'industry\': The industry to which the company belongs.\\n - \'market_cap\': The market capitalization of the company.\\n - \'pe_ratio\': The forward price-to-earnings ratio.\\n - \'pb_ratio\': The price-to-book ratio.\\n - \'dividend_yield\': The dividend yield.\\n - \'eps\': The trailing earnings per share.\\n - \'beta\': The beta value of the stock.\\n - \'52_week_high\': The 52-week high price of the stock.\\n - \'52_week_low\': The 52-week low price of the stock.", "parameters": {"type": "object", "properties": {"symbol": {"type": "string"}}, "required": ["symbol"]}}} </tools> Use the following pydantic model json schema for each tool call you will make: {"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: <tool_call> {"arguments": <args-dict>, "name": <function-name>} </tool_call><|im_end|> ``` To complete the function call, create a user prompt that follows the above system prompt, like so: ``` <|im_start|>user Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|> ``` The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling): ``` <|im_start|>assistant <tool_call> {"arguments": {"symbol": "TSLA"}, "name": "get_stock_fundamentals"} </tool_call><|im_end|> ``` Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so: ``` <|im_start|>tool <tool_response> {"name": "get_stock_fundamentals", "content": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}} </tool_response> <|im_end|> ``` The assistant will then read in that data from the function's response, and generate a natural language response: ``` <|im_start|>assistant The stock fundamentals data for Tesla (TSLA) are as follows: - **Symbol**: TSLA - **Company Name**: Tesla, Inc. - **Sector**: Consumer Cyclical - **Industry**: Auto Manufacturers - **Market Capitalization**: $566,160,130,480 - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73 - **Price-to-Book Ratio (PB Ratio)**: 9.04 - **Dividend Yield**: N/A - **Trailing Earnings Per Share (EPS)**: $4.3 - **Beta Value of the Stock**: 2.42 - **52-Week High Price of the Stock**: $299.29 - **52-Week Low Price of the Stock**: $152.37 This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|> ``` ## Prompt Format for JSON Mode / Structured Outputs Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema. Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main ``` <|im_start|>system You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|> ``` Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON. # Benchmarks ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vOYv9wJUMn1Xrf4BvmO_x.png) ## GPT4All: ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.5520|± |0.0145| | | |acc_norm|0.5887|± |0.0144| |arc_easy | 0|acc |0.8350|± |0.0076| | | |acc_norm|0.8123|± |0.0080| |boolq | 1|acc |0.8584|± |0.0061| |hellaswag | 0|acc |0.6265|± |0.0048| | | |acc_norm|0.8053|± |0.0040| |openbookqa | 0|acc |0.3800|± |0.0217| | | |acc_norm|0.4580|± |0.0223| |piqa | 0|acc |0.8003|± |0.0093| | | |acc_norm|0.8118|± |0.0091| |winogrande | 0|acc |0.7490|± |0.0122| ``` Average: 72.62 ## AGIEval: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.2520|± |0.0273| | | |acc_norm|0.2559|± |0.0274| |agieval_logiqa_en | 0|acc |0.3548|± |0.0188| | | |acc_norm|0.3625|± |0.0189| |agieval_lsat_ar | 0|acc |0.1826|± |0.0255| | | |acc_norm|0.1913|± |0.0260| |agieval_lsat_lr | 0|acc |0.5510|± |0.0220| | | |acc_norm|0.5255|± |0.0221| |agieval_lsat_rc | 0|acc |0.6431|± |0.0293| | | |acc_norm|0.6097|± |0.0298| |agieval_sat_en | 0|acc |0.7330|± |0.0309| | | |acc_norm|0.7039|± |0.0319| |agieval_sat_en_without_passage| 0|acc |0.4029|± |0.0343| | | |acc_norm|0.3689|± |0.0337| |agieval_sat_math | 0|acc |0.3909|± |0.0330| | | |acc_norm|0.3773|± |0.0328| ``` Average: 42.44 ## BigBench: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|± |0.0360| |bigbench_date_understanding | 0|multiple_choice_grade|0.6667|± |0.0246| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3178|± |0.0290| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.1755|± |0.0201| | | |exact_str_match |0.0000|± |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.3120|± |0.0207| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2014|± |0.0152| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.5500|± |0.0288| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.4300|± |0.0222| |bigbench_navigate | 0|multiple_choice_grade|0.4980|± |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7010|± |0.0102| |bigbench_ruin_names | 0|multiple_choice_grade|0.4688|± |0.0236| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.1974|± |0.0126| |bigbench_snarks | 0|multiple_choice_grade|0.7403|± |0.0327| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5426|± |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.5320|± |0.0158| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2280|± |0.0119| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1531|± |0.0086| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.5500|± |0.0288| ``` Average: 43.55 ## TruthfulQA: ``` | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.410|± |0.0172| | | |mc2 |0.578|± |0.0157| ``` # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Code to inference Hermes with HF Transformers # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM import bitsandbytes, flash_attn tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B', trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B", torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, load_in_4bit=True, use_flash_attention_2=True ) prompts = [ """<|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|> <|im_start|>assistant""", ] for chat in prompts: print(chat) input_ids = tokenizer(chat, return_tensors="pt").input_ids.to("cuda") generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True) print(f"Response: {response}") ``` ## Inference Code for Function Calling: All code for utilizing, parsing, and building function calling templates is available on our github: [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oi4CiGh50xmoviUQnh8R3.png) # Chat Interfaces When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) ## Quantized Versions: GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF # How to cite: ```bibtext @misc{Hermes-2-Pro-Llama-3-8B, url={[https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B]https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B)}, title={Hermes-2-Pro-Llama-3-8B}, author={"Teknium", "interstellarninja", "theemozilla", "karan4d", "huemin_art"} } ```
{"base_model": "NousResearch/Meta-Llama-3-8B", "datasets": ["teknium/OpenHermes-2.5"], "language": ["en"], "license": "apache-2.0", "tags": ["Llama-3", "instruct", "finetune", "chatml", "DPO", "RLHF", "gpt4", "synthetic data", "distillation", "function calling", "json mode", "axolotl"], "widget": [{"example_title": "Hermes 2 Pro", "messages": [{"role": "system", "content": "You are a sentient, superintelligent artificial general intelligence, here to teach and assist me."}, {"role": "user", "content": "Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world."}]}], "model-index": [{"name": "Hermes-2-Pro-Llama-3-8B", "results": []}]}
task
[ "TRANSLATION" ]
40,333
SantiagoTB/platzi-diltilroberta-base-mrpc-glue-santiago-toledo
SantiagoTB
text-classification
[ "transformers", "pytorch", "roberta", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:distilbert/distilroberta-base", "base_model:finetune:distilbert/distilroberta-base", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-23T20:43:42Z
2023-08-24T15:16:32+00:00
8
0
--- base_model: distilroberta-base datasets: - glue license: apache-2.0 metrics: - accuracy - f1 tags: - text-classification - generated_from_trainer model-index: - name: platzi-diltilroberta-base-mrpc-glue-santiago-toledo results: - task: type: text-classification name: Text Classification dataset: name: datasetX type: glue config: mrpc split: validation args: mrpc metrics: - type: accuracy value: 0.8284313725490197 name: Accuracy - type: f1 value: 0.8722627737226278 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-diltilroberta-base-mrpc-glue-santiago-toledo This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the datasetX dataset. It achieves the following results on the evaluation set: - Loss: 0.6731 - Accuracy: 0.8284 - F1: 0.8723 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.4044 | 1.09 | 500 | 0.6731 | 0.8284 | 0.8723 | | 0.231 | 2.18 | 1000 | 0.8025 | 0.8260 | 0.8748 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cpu - Datasets 2.14.4 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-diltilroberta-base-mrpc-glue-santiago-toledo This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the datasetX dataset. It achieves the following results on the evaluation set: - Loss: 0.6731 - Accuracy: 0.8284 - F1: 0.8723 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.4044 | 1.09 | 500 | 0.6731 | 0.8284 | 0.8723 | | 0.231 | 2.18 | 1000 | 0.8025 | 0.8260 | 0.8748 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cpu - Datasets 2.14.4 - Tokenizers 0.13.3
{"base_model": "distilroberta-base", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["text-classification", "generated_from_trainer"], "model-index": [{"name": "platzi-diltilroberta-base-mrpc-glue-santiago-toledo", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "datasetX", "type": "glue", "config": "mrpc", "split": "validation", "args": "mrpc"}, "metrics": [{"type": "accuracy", "value": 0.8284313725490197, "name": "Accuracy"}, {"type": "f1", "value": 0.8722627737226278, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,334
pig4431/TweetEval_DistilBERT_5E
pig4431
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:tweet_eval", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-11-28T21:03:38Z
2022-11-28T21:09:36+00:00
13
0
--- datasets: - tweet_eval license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: TweetEval_DistilBERT_5E results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: tweet_eval config: sentiment split: train args: sentiment metrics: - type: accuracy value: 0.9133333333333333 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # TweetEval_DistilBERT_5E This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on the tweet_eval dataset. It achieves the following results on the evaluation set: - Loss: 0.4043 - Accuracy: 0.9133 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5747 | 0.04 | 50 | 0.4843 | 0.7333 | | 0.4336 | 0.08 | 100 | 0.2888 | 0.8667 | | 0.3437 | 0.12 | 150 | 0.2895 | 0.8667 | | 0.3375 | 0.16 | 200 | 0.2864 | 0.8733 | | 0.3072 | 0.2 | 250 | 0.2577 | 0.8867 | | 0.3019 | 0.24 | 300 | 0.2574 | 0.8933 | | 0.2662 | 0.28 | 350 | 0.2621 | 0.8867 | | 0.283 | 0.32 | 400 | 0.2340 | 0.92 | | 0.2949 | 0.37 | 450 | 0.2482 | 0.8933 | | 0.3066 | 0.41 | 500 | 0.2537 | 0.9 | | 0.2457 | 0.45 | 550 | 0.2473 | 0.9 | | 0.295 | 0.49 | 600 | 0.2177 | 0.9133 | | 0.2862 | 0.53 | 650 | 0.2215 | 0.9133 | | 0.2603 | 0.57 | 700 | 0.2272 | 0.9133 | | 0.2976 | 0.61 | 750 | 0.2298 | 0.9067 | | 0.2823 | 0.65 | 800 | 0.2451 | 0.8933 | | 0.2583 | 0.69 | 850 | 0.2645 | 0.8933 | | 0.2694 | 0.73 | 900 | 0.2352 | 0.9 | | 0.2433 | 0.77 | 950 | 0.2322 | 0.9133 | | 0.2598 | 0.81 | 1000 | 0.2300 | 0.9 | | 0.2701 | 0.85 | 1050 | 0.2162 | 0.9 | | 0.2227 | 0.89 | 1100 | 0.2135 | 0.8933 | | 0.2045 | 0.93 | 1150 | 0.2233 | 0.9133 | | 0.2821 | 0.97 | 1200 | 0.2194 | 0.9 | | 0.2342 | 1.01 | 1250 | 0.2488 | 0.88 | | 0.2028 | 1.06 | 1300 | 0.2451 | 0.8867 | | 0.1509 | 1.1 | 1350 | 0.3174 | 0.88 | | 0.1888 | 1.14 | 1400 | 0.2537 | 0.9133 | | 0.1825 | 1.18 | 1450 | 0.2559 | 0.9067 | | 0.1721 | 1.22 | 1500 | 0.2511 | 0.92 | | 0.2137 | 1.26 | 1550 | 0.2963 | 0.9133 | | 0.2153 | 1.3 | 1600 | 0.2210 | 0.92 | | 0.1989 | 1.34 | 1650 | 0.2231 | 0.9133 | | 0.2155 | 1.38 | 1700 | 0.1991 | 0.9133 | | 0.1912 | 1.42 | 1750 | 0.2146 | 0.92 | | 0.1623 | 1.46 | 1800 | 0.2721 | 0.9 | | 0.2236 | 1.5 | 1850 | 0.2301 | 0.9267 | | 0.1907 | 1.54 | 1900 | 0.1988 | 0.92 | | 0.1286 | 1.58 | 1950 | 0.2326 | 0.9 | | 0.2147 | 1.62 | 2000 | 0.2432 | 0.9267 | | 0.2018 | 1.66 | 2050 | 0.2162 | 0.9067 | | 0.2073 | 1.7 | 2100 | 0.2153 | 0.9133 | | 0.1498 | 1.75 | 2150 | 0.2335 | 0.92 | | 0.1812 | 1.79 | 2200 | 0.2275 | 0.9267 | | 0.1482 | 1.83 | 2250 | 0.2734 | 0.9 | | 0.2233 | 1.87 | 2300 | 0.2454 | 0.9 | | 0.1673 | 1.91 | 2350 | 0.2394 | 0.92 | | 0.1555 | 1.95 | 2400 | 0.2725 | 0.92 | | 0.2082 | 1.99 | 2450 | 0.2684 | 0.9133 | | 0.1545 | 2.03 | 2500 | 0.3049 | 0.9067 | | 0.1384 | 2.07 | 2550 | 0.2960 | 0.9133 | | 0.1201 | 2.11 | 2600 | 0.3259 | 0.9 | | 0.1348 | 2.15 | 2650 | 0.3091 | 0.9133 | | 0.1046 | 2.19 | 2700 | 0.2916 | 0.9267 | | 0.1506 | 2.23 | 2750 | 0.2910 | 0.9133 | | 0.1481 | 2.27 | 2800 | 0.2855 | 0.9067 | | 0.1318 | 2.31 | 2850 | 0.3075 | 0.9 | | 0.1204 | 2.35 | 2900 | 0.3169 | 0.8933 | | 0.1669 | 2.39 | 2950 | 0.3050 | 0.9067 | | 0.1725 | 2.44 | 3000 | 0.2970 | 0.9133 | | 0.1305 | 2.48 | 3050 | 0.3065 | 0.9 | | 0.1508 | 2.52 | 3100 | 0.3079 | 0.9133 | | 0.184 | 2.56 | 3150 | 0.3482 | 0.9067 | | 0.1263 | 2.6 | 3200 | 0.3310 | 0.9 | | 0.1282 | 2.64 | 3250 | 0.3520 | 0.8933 | | 0.1217 | 2.68 | 3300 | 0.3158 | 0.9067 | | 0.1203 | 2.72 | 3350 | 0.3351 | 0.92 | | 0.1068 | 2.76 | 3400 | 0.3239 | 0.92 | | 0.1517 | 2.8 | 3450 | 0.3247 | 0.92 | | 0.113 | 2.84 | 3500 | 0.3269 | 0.9133 | | 0.1276 | 2.88 | 3550 | 0.3162 | 0.92 | | 0.1548 | 2.92 | 3600 | 0.3196 | 0.9133 | | 0.1305 | 2.96 | 3650 | 0.3163 | 0.92 | | 0.149 | 3.0 | 3700 | 0.3013 | 0.92 | | 0.0816 | 3.04 | 3750 | 0.3097 | 0.9267 | | 0.0884 | 3.08 | 3800 | 0.3028 | 0.92 | | 0.0727 | 3.12 | 3850 | 0.3487 | 0.9133 | | 0.1018 | 3.17 | 3900 | 0.3447 | 0.92 | | 0.1266 | 3.21 | 3950 | 0.3589 | 0.9133 | | 0.1216 | 3.25 | 4000 | 0.3464 | 0.92 | | 0.091 | 3.29 | 4050 | 0.3454 | 0.92 | | 0.0829 | 3.33 | 4100 | 0.3450 | 0.92 | | 0.1084 | 3.37 | 4150 | 0.3670 | 0.92 | | 0.0754 | 3.41 | 4200 | 0.3661 | 0.92 | | 0.094 | 3.45 | 4250 | 0.3588 | 0.9067 | | 0.0641 | 3.49 | 4300 | 0.3936 | 0.92 | | 0.1138 | 3.53 | 4350 | 0.3616 | 0.92 | | 0.0744 | 3.57 | 4400 | 0.3562 | 0.92 | | 0.0697 | 3.61 | 4450 | 0.3532 | 0.9267 | | 0.1083 | 3.65 | 4500 | 0.3451 | 0.9267 | | 0.0701 | 3.69 | 4550 | 0.3307 | 0.92 | | 0.0849 | 3.73 | 4600 | 0.3797 | 0.92 | | 0.09 | 3.77 | 4650 | 0.3746 | 0.9267 | | 0.0799 | 3.81 | 4700 | 0.3799 | 0.92 | | 0.0589 | 3.86 | 4750 | 0.3805 | 0.92 | | 0.0578 | 3.9 | 4800 | 0.3910 | 0.9133 | | 0.0816 | 3.94 | 4850 | 0.3856 | 0.9133 | | 0.1366 | 3.98 | 4900 | 0.3707 | 0.92 | | 0.0846 | 4.02 | 4950 | 0.3802 | 0.92 | | 0.0401 | 4.06 | 5000 | 0.3842 | 0.92 | | 0.0851 | 4.1 | 5050 | 0.3773 | 0.9267 | | 0.0514 | 4.14 | 5100 | 0.3922 | 0.9133 | | 0.0909 | 4.18 | 5150 | 0.3893 | 0.92 | | 0.0764 | 4.22 | 5200 | 0.3818 | 0.9133 | | 0.1208 | 4.26 | 5250 | 0.4096 | 0.92 | | 0.0689 | 4.3 | 5300 | 0.3940 | 0.9133 | | 0.0524 | 4.34 | 5350 | 0.4020 | 0.9133 | | 0.0733 | 4.38 | 5400 | 0.4002 | 0.9133 | | 0.0699 | 4.42 | 5450 | 0.4013 | 0.9133 | | 0.0712 | 4.46 | 5500 | 0.4037 | 0.9067 | | 0.0557 | 4.5 | 5550 | 0.4121 | 0.92 | | 0.0679 | 4.55 | 5600 | 0.4067 | 0.9133 | | 0.0651 | 4.59 | 5650 | 0.4194 | 0.9133 | | 0.0607 | 4.63 | 5700 | 0.4007 | 0.9133 | | 0.0676 | 4.67 | 5750 | 0.4013 | 0.9133 | | 0.0303 | 4.71 | 5800 | 0.3984 | 0.9133 | | 0.0674 | 4.75 | 5850 | 0.4037 | 0.9133 | | 0.0842 | 4.79 | 5900 | 0.4072 | 0.9133 | | 0.0516 | 4.83 | 5950 | 0.4096 | 0.9133 | | 0.0556 | 4.87 | 6000 | 0.4111 | 0.92 | | 0.0277 | 4.91 | 6050 | 0.4079 | 0.9133 | | 0.0629 | 4.95 | 6100 | 0.4053 | 0.9133 | | 0.0426 | 4.99 | 6150 | 0.4043 | 0.9133 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0 - Datasets 2.3.2 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # TweetEval_DistilBERT_5E This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on the tweet_eval dataset. It achieves the following results on the evaluation set: - Loss: 0.4043 - Accuracy: 0.9133 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5747 | 0.04 | 50 | 0.4843 | 0.7333 | | 0.4336 | 0.08 | 100 | 0.2888 | 0.8667 | | 0.3437 | 0.12 | 150 | 0.2895 | 0.8667 | | 0.3375 | 0.16 | 200 | 0.2864 | 0.8733 | | 0.3072 | 0.2 | 250 | 0.2577 | 0.8867 | | 0.3019 | 0.24 | 300 | 0.2574 | 0.8933 | | 0.2662 | 0.28 | 350 | 0.2621 | 0.8867 | | 0.283 | 0.32 | 400 | 0.2340 | 0.92 | | 0.2949 | 0.37 | 450 | 0.2482 | 0.8933 | | 0.3066 | 0.41 | 500 | 0.2537 | 0.9 | | 0.2457 | 0.45 | 550 | 0.2473 | 0.9 | | 0.295 | 0.49 | 600 | 0.2177 | 0.9133 | | 0.2862 | 0.53 | 650 | 0.2215 | 0.9133 | | 0.2603 | 0.57 | 700 | 0.2272 | 0.9133 | | 0.2976 | 0.61 | 750 | 0.2298 | 0.9067 | | 0.2823 | 0.65 | 800 | 0.2451 | 0.8933 | | 0.2583 | 0.69 | 850 | 0.2645 | 0.8933 | | 0.2694 | 0.73 | 900 | 0.2352 | 0.9 | | 0.2433 | 0.77 | 950 | 0.2322 | 0.9133 | | 0.2598 | 0.81 | 1000 | 0.2300 | 0.9 | | 0.2701 | 0.85 | 1050 | 0.2162 | 0.9 | | 0.2227 | 0.89 | 1100 | 0.2135 | 0.8933 | | 0.2045 | 0.93 | 1150 | 0.2233 | 0.9133 | | 0.2821 | 0.97 | 1200 | 0.2194 | 0.9 | | 0.2342 | 1.01 | 1250 | 0.2488 | 0.88 | | 0.2028 | 1.06 | 1300 | 0.2451 | 0.8867 | | 0.1509 | 1.1 | 1350 | 0.3174 | 0.88 | | 0.1888 | 1.14 | 1400 | 0.2537 | 0.9133 | | 0.1825 | 1.18 | 1450 | 0.2559 | 0.9067 | | 0.1721 | 1.22 | 1500 | 0.2511 | 0.92 | | 0.2137 | 1.26 | 1550 | 0.2963 | 0.9133 | | 0.2153 | 1.3 | 1600 | 0.2210 | 0.92 | | 0.1989 | 1.34 | 1650 | 0.2231 | 0.9133 | | 0.2155 | 1.38 | 1700 | 0.1991 | 0.9133 | | 0.1912 | 1.42 | 1750 | 0.2146 | 0.92 | | 0.1623 | 1.46 | 1800 | 0.2721 | 0.9 | | 0.2236 | 1.5 | 1850 | 0.2301 | 0.9267 | | 0.1907 | 1.54 | 1900 | 0.1988 | 0.92 | | 0.1286 | 1.58 | 1950 | 0.2326 | 0.9 | | 0.2147 | 1.62 | 2000 | 0.2432 | 0.9267 | | 0.2018 | 1.66 | 2050 | 0.2162 | 0.9067 | | 0.2073 | 1.7 | 2100 | 0.2153 | 0.9133 | | 0.1498 | 1.75 | 2150 | 0.2335 | 0.92 | | 0.1812 | 1.79 | 2200 | 0.2275 | 0.9267 | | 0.1482 | 1.83 | 2250 | 0.2734 | 0.9 | | 0.2233 | 1.87 | 2300 | 0.2454 | 0.9 | | 0.1673 | 1.91 | 2350 | 0.2394 | 0.92 | | 0.1555 | 1.95 | 2400 | 0.2725 | 0.92 | | 0.2082 | 1.99 | 2450 | 0.2684 | 0.9133 | | 0.1545 | 2.03 | 2500 | 0.3049 | 0.9067 | | 0.1384 | 2.07 | 2550 | 0.2960 | 0.9133 | | 0.1201 | 2.11 | 2600 | 0.3259 | 0.9 | | 0.1348 | 2.15 | 2650 | 0.3091 | 0.9133 | | 0.1046 | 2.19 | 2700 | 0.2916 | 0.9267 | | 0.1506 | 2.23 | 2750 | 0.2910 | 0.9133 | | 0.1481 | 2.27 | 2800 | 0.2855 | 0.9067 | | 0.1318 | 2.31 | 2850 | 0.3075 | 0.9 | | 0.1204 | 2.35 | 2900 | 0.3169 | 0.8933 | | 0.1669 | 2.39 | 2950 | 0.3050 | 0.9067 | | 0.1725 | 2.44 | 3000 | 0.2970 | 0.9133 | | 0.1305 | 2.48 | 3050 | 0.3065 | 0.9 | | 0.1508 | 2.52 | 3100 | 0.3079 | 0.9133 | | 0.184 | 2.56 | 3150 | 0.3482 | 0.9067 | | 0.1263 | 2.6 | 3200 | 0.3310 | 0.9 | | 0.1282 | 2.64 | 3250 | 0.3520 | 0.8933 | | 0.1217 | 2.68 | 3300 | 0.3158 | 0.9067 | | 0.1203 | 2.72 | 3350 | 0.3351 | 0.92 | | 0.1068 | 2.76 | 3400 | 0.3239 | 0.92 | | 0.1517 | 2.8 | 3450 | 0.3247 | 0.92 | | 0.113 | 2.84 | 3500 | 0.3269 | 0.9133 | | 0.1276 | 2.88 | 3550 | 0.3162 | 0.92 | | 0.1548 | 2.92 | 3600 | 0.3196 | 0.9133 | | 0.1305 | 2.96 | 3650 | 0.3163 | 0.92 | | 0.149 | 3.0 | 3700 | 0.3013 | 0.92 | | 0.0816 | 3.04 | 3750 | 0.3097 | 0.9267 | | 0.0884 | 3.08 | 3800 | 0.3028 | 0.92 | | 0.0727 | 3.12 | 3850 | 0.3487 | 0.9133 | | 0.1018 | 3.17 | 3900 | 0.3447 | 0.92 | | 0.1266 | 3.21 | 3950 | 0.3589 | 0.9133 | | 0.1216 | 3.25 | 4000 | 0.3464 | 0.92 | | 0.091 | 3.29 | 4050 | 0.3454 | 0.92 | | 0.0829 | 3.33 | 4100 | 0.3450 | 0.92 | | 0.1084 | 3.37 | 4150 | 0.3670 | 0.92 | | 0.0754 | 3.41 | 4200 | 0.3661 | 0.92 | | 0.094 | 3.45 | 4250 | 0.3588 | 0.9067 | | 0.0641 | 3.49 | 4300 | 0.3936 | 0.92 | | 0.1138 | 3.53 | 4350 | 0.3616 | 0.92 | | 0.0744 | 3.57 | 4400 | 0.3562 | 0.92 | | 0.0697 | 3.61 | 4450 | 0.3532 | 0.9267 | | 0.1083 | 3.65 | 4500 | 0.3451 | 0.9267 | | 0.0701 | 3.69 | 4550 | 0.3307 | 0.92 | | 0.0849 | 3.73 | 4600 | 0.3797 | 0.92 | | 0.09 | 3.77 | 4650 | 0.3746 | 0.9267 | | 0.0799 | 3.81 | 4700 | 0.3799 | 0.92 | | 0.0589 | 3.86 | 4750 | 0.3805 | 0.92 | | 0.0578 | 3.9 | 4800 | 0.3910 | 0.9133 | | 0.0816 | 3.94 | 4850 | 0.3856 | 0.9133 | | 0.1366 | 3.98 | 4900 | 0.3707 | 0.92 | | 0.0846 | 4.02 | 4950 | 0.3802 | 0.92 | | 0.0401 | 4.06 | 5000 | 0.3842 | 0.92 | | 0.0851 | 4.1 | 5050 | 0.3773 | 0.9267 | | 0.0514 | 4.14 | 5100 | 0.3922 | 0.9133 | | 0.0909 | 4.18 | 5150 | 0.3893 | 0.92 | | 0.0764 | 4.22 | 5200 | 0.3818 | 0.9133 | | 0.1208 | 4.26 | 5250 | 0.4096 | 0.92 | | 0.0689 | 4.3 | 5300 | 0.3940 | 0.9133 | | 0.0524 | 4.34 | 5350 | 0.4020 | 0.9133 | | 0.0733 | 4.38 | 5400 | 0.4002 | 0.9133 | | 0.0699 | 4.42 | 5450 | 0.4013 | 0.9133 | | 0.0712 | 4.46 | 5500 | 0.4037 | 0.9067 | | 0.0557 | 4.5 | 5550 | 0.4121 | 0.92 | | 0.0679 | 4.55 | 5600 | 0.4067 | 0.9133 | | 0.0651 | 4.59 | 5650 | 0.4194 | 0.9133 | | 0.0607 | 4.63 | 5700 | 0.4007 | 0.9133 | | 0.0676 | 4.67 | 5750 | 0.4013 | 0.9133 | | 0.0303 | 4.71 | 5800 | 0.3984 | 0.9133 | | 0.0674 | 4.75 | 5850 | 0.4037 | 0.9133 | | 0.0842 | 4.79 | 5900 | 0.4072 | 0.9133 | | 0.0516 | 4.83 | 5950 | 0.4096 | 0.9133 | | 0.0556 | 4.87 | 6000 | 0.4111 | 0.92 | | 0.0277 | 4.91 | 6050 | 0.4079 | 0.9133 | | 0.0629 | 4.95 | 6100 | 0.4053 | 0.9133 | | 0.0426 | 4.99 | 6150 | 0.4043 | 0.9133 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0 - Datasets 2.3.2 - Tokenizers 0.13.2
{"datasets": ["tweet_eval"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "TweetEval_DistilBERT_5E", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "tweet_eval", "type": "tweet_eval", "config": "sentiment", "split": "train", "args": "sentiment"}, "metrics": [{"type": "accuracy", "value": 0.9133333333333333, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,335
MikaSie/Pegasus_no_extraction_V1
MikaSie
summarization
[ "transformers", "safetensors", "pegasus", "text2text-generation", "summarization", "abstractive", "hybrid", "multistep", "en", "dataset:dennlinger/eur-lex-sum", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-05T17:12:09Z
2024-07-15T17:42:55+00:00
103
0
--- base_model: Pegasus datasets: dennlinger/eur-lex-sum language: en pipeline_tag: summarization tags: - summarization - abstractive - hybrid - multistep model-index: - name: BART results: - task: type: summarization name: Long, Legal Document Summarization dataset: name: eur-lex-sum type: dennlinger/eur-lex-sum metrics: - type: ROUGE-1 value: 0.33051797286301976 - type: ROUGE-2 value: 0.12929367687407428 - type: ROUGE-L value: 0.2259807122596748 - type: BERTScore value: 0.8499303866365102 - type: BARTScore value: -1.8067246456257298 - type: BLANC value: 0.09232238679254928 --- # Model Card for Pegasus_no_extraction_V1 ## Model Details --- ### Model Description This model is a fine-tuned version of Pegasus. The research involves a multi-step summarization approach to long, legal documents. Many decisions in the renewables energy space are heavily dependent on regulations. But these regulations are often long and complicated. The proposed architecture first uses one or more extractive summarization steps to compress the source text, before the final summary is created by the abstractive summarization model. This fine-tuned abstractive model has been trained on a dataset, pre-processed through extractive summarization by No extractive model with No ratio ratio. The research has used multiple extractive-abstractive model combinations, which can be found on https://huggingface.co/MikaSie. To obtain optimal results, feed the model an extractive summary as input as it was designed this way! The dataset used by this model is the [EUR-lex-sum](https://huggingface.co/datasets/dennlinger/eur-lex-sum) dataset. The evaluation metrics can be found in the metadata of this model card. This paper was introduced by the master thesis of Mika Sie at the University Utrecht in collaboration with Power2x. More information can be found in PAPER_LINK. - **Developed by:** Mika Sie - **Funded by:** University Utrecht & Power2X - **Language (NLP):** English - **Finetuned from model:** Pegasus ### Model Sources - **Repository**: https://github.com/MikaSie/Thesis - **Paper**: PAPER_LINK - **Streamlit demo**: STREAMLIT_LINK ## Uses --- ### Direct Use This model can be directly used for summarizing long, legal documents. However, it is recommended to first use an extractive summarization tool, such as No extractive model, to compress the source text before feeding it to this model. This model has been specifically designed to work with extractive summaries. An example using the Huggingface pipeline could be: ```python pip install bert-extractive-summarizer from summarizer import Summarizer from transformers import pipeline extractive_model = Summarizer() text = 'Original document text to be summarized' extractive_summary = Summarizer(text) abstractive_model = pipeline('summarization', model = 'MikaSie/Pegasus_no_extraction_V1', tokenizer = 'MikaSie/Pegasus_no_extraction_V1') result = pipeline(extractive_summary) ``` But more information of implementation can be found in the Thesis report. ### Out-of-Scope Use Using this model without an extractive summarization step may not yield optimal results. It is recommended to follow the proposed multi-step summarization approach outlined in the model description for best performance. ## Bias, Risks, and Limitations --- ### Bias As with any language model, this model may inherit biases present in the training data. It is important to be aware of potential biases in the source text and to critically evaluate the generated summaries. ### Risks - The model may not always generate accurate or comprehensive summaries, especially for complex legal documents. - The model may not generate truthful information. ### Limitations - The model may produce summaries that are overly abstractive or fail to capture important details. - The model's performance may vary depending on the quality and relevance of the extractive summaries used as input. ### Recommendations - Carefully review and validate the generated summaries before relying on them for critical tasks. - Consider using the model in conjunction with human review or other validation mechanisms to ensure the accuracy and completeness of the summaries. - Experiment with different extractive summarization models or techniques to find the most suitable input for the abstractive model. - Provide feedback and contribute to the ongoing research and development of the model to help improve its performance and address its limitations. - Any actions taken based on this content are at your own risk.
null
Non_BioNLP
# Model Card for Pegasus_no_extraction_V1 ## Model Details --- ### Model Description This model is a fine-tuned version of Pegasus. The research involves a multi-step summarization approach to long, legal documents. Many decisions in the renewables energy space are heavily dependent on regulations. But these regulations are often long and complicated. The proposed architecture first uses one or more extractive summarization steps to compress the source text, before the final summary is created by the abstractive summarization model. This fine-tuned abstractive model has been trained on a dataset, pre-processed through extractive summarization by No extractive model with No ratio ratio. The research has used multiple extractive-abstractive model combinations, which can be found on https://huggingface.co/MikaSie. To obtain optimal results, feed the model an extractive summary as input as it was designed this way! The dataset used by this model is the [EUR-lex-sum](https://huggingface.co/datasets/dennlinger/eur-lex-sum) dataset. The evaluation metrics can be found in the metadata of this model card. This paper was introduced by the master thesis of Mika Sie at the University Utrecht in collaboration with Power2x. More information can be found in PAPER_LINK. - **Developed by:** Mika Sie - **Funded by:** University Utrecht & Power2X - **Language (NLP):** English - **Finetuned from model:** Pegasus ### Model Sources - **Repository**: https://github.com/MikaSie/Thesis - **Paper**: PAPER_LINK - **Streamlit demo**: STREAMLIT_LINK ## Uses --- ### Direct Use This model can be directly used for summarizing long, legal documents. However, it is recommended to first use an extractive summarization tool, such as No extractive model, to compress the source text before feeding it to this model. This model has been specifically designed to work with extractive summaries. An example using the Huggingface pipeline could be: ```python pip install bert-extractive-summarizer from summarizer import Summarizer from transformers import pipeline extractive_model = Summarizer() text = 'Original document text to be summarized' extractive_summary = Summarizer(text) abstractive_model = pipeline('summarization', model = 'MikaSie/Pegasus_no_extraction_V1', tokenizer = 'MikaSie/Pegasus_no_extraction_V1') result = pipeline(extractive_summary) ``` But more information of implementation can be found in the Thesis report. ### Out-of-Scope Use Using this model without an extractive summarization step may not yield optimal results. It is recommended to follow the proposed multi-step summarization approach outlined in the model description for best performance. ## Bias, Risks, and Limitations --- ### Bias As with any language model, this model may inherit biases present in the training data. It is important to be aware of potential biases in the source text and to critically evaluate the generated summaries. ### Risks - The model may not always generate accurate or comprehensive summaries, especially for complex legal documents. - The model may not generate truthful information. ### Limitations - The model may produce summaries that are overly abstractive or fail to capture important details. - The model's performance may vary depending on the quality and relevance of the extractive summaries used as input. ### Recommendations - Carefully review and validate the generated summaries before relying on them for critical tasks. - Consider using the model in conjunction with human review or other validation mechanisms to ensure the accuracy and completeness of the summaries. - Experiment with different extractive summarization models or techniques to find the most suitable input for the abstractive model. - Provide feedback and contribute to the ongoing research and development of the model to help improve its performance and address its limitations. - Any actions taken based on this content are at your own risk.
{"base_model": "Pegasus", "datasets": "dennlinger/eur-lex-sum", "language": "en", "pipeline_tag": "summarization", "tags": ["summarization", "abstractive", "hybrid", "multistep"], "model-index": [{"name": "BART", "results": [{"task": {"type": "summarization", "name": "Long, Legal Document Summarization"}, "dataset": {"name": "eur-lex-sum", "type": "dennlinger/eur-lex-sum"}, "metrics": [{"type": "ROUGE-1", "value": 0.33051797286301976}, {"type": "ROUGE-2", "value": 0.12929367687407428}, {"type": "ROUGE-L", "value": 0.2259807122596748}, {"type": "BERTScore", "value": 0.8499303866365102}, {"type": "BARTScore", "value": -1.8067246456257298}, {"type": "BLANC", "value": 0.09232238679254928}]}]}]}
task
[ "SUMMARIZATION" ]
40,336
zhaoyang/BertFinetuning
zhaoyang
null
[ "transformers", "pytorch", "tensorboard", "generated_from_trainer", "en", "dataset:glue", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2021-12-06T08:23:02+00:00
9
0
--- datasets: - glue language: - en license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: bert_finetunning results: - task: type: text-classification name: Text Classification dataset: name: GLUE MRPC type: glue args: mrpc metrics: - type: accuracy value: 0.8259803921568627 name: Accuracy - type: f1 value: 0.8786324786324787 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_finetunning This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE MRPC dataset. It achieves the following results on the evaluation set: - Loss: 0.4018 - Accuracy: 0.8260 - F1: 0.8786 - Combined Score: 0.8523 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Framework versions - Transformers 4.13.0.dev0 - Pytorch 1.9.0 - Datasets 1.16.1 - Tokenizers 0.10.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_finetunning This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the GLUE MRPC dataset. It achieves the following results on the evaluation set: - Loss: 0.4018 - Accuracy: 0.8260 - F1: 0.8786 - Combined Score: 0.8523 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Framework versions - Transformers 4.13.0.dev0 - Pytorch 1.9.0 - Datasets 1.16.1 - Tokenizers 0.10.3
{"datasets": ["glue"], "language": ["en"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bert_finetunning", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE MRPC", "type": "glue", "args": "mrpc"}, "metrics": [{"type": "accuracy", "value": 0.8259803921568627, "name": "Accuracy"}, {"type": "f1", "value": 0.8786324786324787, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,337
asas-ai/opus-mt-ar-en-finetuned-ar-to-en
asas-ai
translation
[ "transformers", "pytorch", "tensorboard", "marian", "text2text-generation", "generated_from_trainer", "translation", "base_model:Helsinki-NLP/opus-mt-ar-en", "base_model:finetune:Helsinki-NLP/opus-mt-ar-en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-15T06:35:11Z
2023-08-19T08:05:28+00:00
24
1
--- base_model: Helsinki-NLP/opus-mt-ar-en license: apache-2.0 metrics: - bleu pipeline_tag: translation tags: - generated_from_trainer model-index: - name: opus-mt-ar-en-finetuned-ar-to-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-ar-en-finetuned-ar-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ar-en](https://huggingface.co/Helsinki-NLP/opus-mt-ar-en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8180 - Bleu: 65.6656 - Gen Len: 60.248 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | No log | 1.0 | 348 | 1.2239 | 50.582 | 62.155 | | 1.5859 | 2.0 | 696 | 1.0136 | 56.9789 | 61.275 | | 1.0284 | 3.0 | 1044 | 0.9322 | 59.8447 | 61.319 | | 1.0284 | 4.0 | 1392 | 0.8872 | 61.7622 | 61.239 | | 0.8538 | 5.0 | 1740 | 0.8595 | 63.1303 | 60.754 | | 0.7418 | 6.0 | 2088 | 0.8434 | 64.0222 | 60.68 | | 0.7418 | 7.0 | 2436 | 0.8305 | 64.8723 | 60.275 | | 0.6897 | 8.0 | 2784 | 0.8222 | 65.4715 | 60.265 | | 0.6394 | 9.0 | 3132 | 0.8189 | 65.5593 | 60.277 | | 0.6394 | 10.0 | 3480 | 0.8180 | 65.6656 | 60.248 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-ar-en-finetuned-ar-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ar-en](https://huggingface.co/Helsinki-NLP/opus-mt-ar-en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8180 - Bleu: 65.6656 - Gen Len: 60.248 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | No log | 1.0 | 348 | 1.2239 | 50.582 | 62.155 | | 1.5859 | 2.0 | 696 | 1.0136 | 56.9789 | 61.275 | | 1.0284 | 3.0 | 1044 | 0.9322 | 59.8447 | 61.319 | | 1.0284 | 4.0 | 1392 | 0.8872 | 61.7622 | 61.239 | | 0.8538 | 5.0 | 1740 | 0.8595 | 63.1303 | 60.754 | | 0.7418 | 6.0 | 2088 | 0.8434 | 64.0222 | 60.68 | | 0.7418 | 7.0 | 2436 | 0.8305 | 64.8723 | 60.275 | | 0.6897 | 8.0 | 2784 | 0.8222 | 65.4715 | 60.265 | | 0.6394 | 9.0 | 3132 | 0.8189 | 65.5593 | 60.277 | | 0.6394 | 10.0 | 3480 | 0.8180 | 65.6656 | 60.248 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
{"base_model": "Helsinki-NLP/opus-mt-ar-en", "license": "apache-2.0", "metrics": ["bleu"], "pipeline_tag": "translation", "tags": ["generated_from_trainer"], "model-index": [{"name": "opus-mt-ar-en-finetuned-ar-to-en", "results": []}]}
task
[ "TRANSLATION" ]
40,338
harshu202/finetuning-sentiment-model-3000-samples
harshu202
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-25T12:26:46Z
2023-08-25T13:15:00+00:00
8
0
--- base_model: distilbert-base-uncased datasets: - imdb license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: finetuning-sentiment-model-3000-samples results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.5033333333333333 name: Accuracy - type: f1 value: 0.6681514476614698 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6891 - Accuracy: 0.5033 - F1: 0.6682 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 0.02 ### Training results ### Framework versions - Transformers 4.32.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6891 - Accuracy: 0.5033 - F1: 0.6682 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 0.02 ### Training results ### Framework versions - Transformers 4.32.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
{"base_model": "distilbert-base-uncased", "datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "finetuning-sentiment-model-3000-samples", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.5033333333333333, "name": "Accuracy"}, {"type": "f1", "value": 0.6681514476614698, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
40,339
utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF
utkmst
null
[ "gguf", "llama-cpp", "ggufa", "llama-3.1", "instruction-tuned", "en", "dataset:OpenAssistant/oasst1", "dataset:databricks/databricks-dolly-15k", "dataset:Open-Orca/OpenOrca", "dataset:mlabonne/open-perfectblend", "dataset:tatsu-lab/alpaca", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", "license:llama3.1", "endpoints_compatible", "region:us", "conversational" ]
2025-03-08T13:56:32Z
2025-03-08T15:58:34+00:00
30
1
--- base_model: - meta-llama/Llama-3.1-8B-Instruct datasets: - OpenAssistant/oasst1 - databricks/databricks-dolly-15k - Open-Orca/OpenOrca - mlabonne/open-perfectblend - tatsu-lab/alpaca language: - en license: llama3.1 tags: - llama-cpp - ggufa - llama-3.1 - instruction-tuned --- # utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF ## Model Description This is a quantized GGUF version of my fine-tuned model [`utkmst/chimera-beta-test2-lora-merged`](https://huggingface.co/utkmst/chimera-beta-test2-lora-merged), which was created by LoRA fine-tuning the Meta Llama-3.1-8B-Instruct model and merging the resulting adapter with the base model. The GGUF conversion was performed using llama.cpp with Q4_K_M quantization for efficient inference. ## Architecture - **Base Model**: meta-llama/Llama-3.1-8B-Instruct - **Size**: 8.03B parameters - **Type**: Decoder-only transformer - **Quantization**: Q4_K_M GGUF format (4-bit quantization with K-means clustering) ## Training Details - **Training Method**: LoRA fine-tuning followed by adapter merging - **LoRA Configuration**: - Rank: 8 - Alpha: 16 - Trainable modules: Attention layers and feed-forward networks - **Training Hyperparameters**: - Learning rate: 2e-4 - Batch size: 2 - Training epochs: 1 - Optimizer: AdamW with constant scheduler ## Dataset The model was trained on a curated mixture of high-quality instruction datasets: - OpenAssistant/oasst1: Human-generated conversations with AI assistants - databricks/databricks-dolly-15k: Instruction-following examples - Open-Orca/OpenOrca: Augmented training data based on GPT-4 generations - mlabonne/open-perfectblend: A carefully balanced blend of open-source instruction data - tatsu-lab/alpaca: Self-instructed data based on demonstrations ## Intended Use This model is designed for: - General purpose assistant capabilities - Question answering and knowledge retrieval - Creative content generation - Instructional guidance It's optimized for deployment in resource-constrained environments due to its quantized nature while maintaining good response quality. ## Limitations - Reduced numerical precision due to quantization may impact performance on certain mathematical or precise reasoning tasks - Base model limitations including potential hallucinations and factual inaccuracies - Limited context window compared to larger models - Knowledge cutoff from the base Llama-3.1 model - May exhibit biases present in training data ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -c 2048 ```
null
Non_BioNLP
# utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF ## Model Description This is a quantized GGUF version of my fine-tuned model [`utkmst/chimera-beta-test2-lora-merged`](https://huggingface.co/utkmst/chimera-beta-test2-lora-merged), which was created by LoRA fine-tuning the Meta Llama-3.1-8B-Instruct model and merging the resulting adapter with the base model. The GGUF conversion was performed using llama.cpp with Q4_K_M quantization for efficient inference. ## Architecture - **Base Model**: meta-llama/Llama-3.1-8B-Instruct - **Size**: 8.03B parameters - **Type**: Decoder-only transformer - **Quantization**: Q4_K_M GGUF format (4-bit quantization with K-means clustering) ## Training Details - **Training Method**: LoRA fine-tuning followed by adapter merging - **LoRA Configuration**: - Rank: 8 - Alpha: 16 - Trainable modules: Attention layers and feed-forward networks - **Training Hyperparameters**: - Learning rate: 2e-4 - Batch size: 2 - Training epochs: 1 - Optimizer: AdamW with constant scheduler ## Dataset The model was trained on a curated mixture of high-quality instruction datasets: - OpenAssistant/oasst1: Human-generated conversations with AI assistants - databricks/databricks-dolly-15k: Instruction-following examples - Open-Orca/OpenOrca: Augmented training data based on GPT-4 generations - mlabonne/open-perfectblend: A carefully balanced blend of open-source instruction data - tatsu-lab/alpaca: Self-instructed data based on demonstrations ## Intended Use This model is designed for: - General purpose assistant capabilities - Question answering and knowledge retrieval - Creative content generation - Instructional guidance It's optimized for deployment in resource-constrained environments due to its quantized nature while maintaining good response quality. ## Limitations - Reduced numerical precision due to quantization may impact performance on certain mathematical or precise reasoning tasks - Base model limitations including potential hallucinations and factual inaccuracies - Limited context window compared to larger models - Knowledge cutoff from the base Llama-3.1 model - May exhibit biases present in training data ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo utkmst/chimera-beta-test2-lora-merged-Q4_K_M-GGUF --hf-file chimera-beta-test2-lora-merged-q4_k_m.gguf -c 2048 ```
{"base_model": ["meta-llama/Llama-3.1-8B-Instruct"], "datasets": ["OpenAssistant/oasst1", "databricks/databricks-dolly-15k", "Open-Orca/OpenOrca", "mlabonne/open-perfectblend", "tatsu-lab/alpaca"], "language": ["en"], "license": "llama3.1", "tags": ["llama-cpp", "ggufa", "llama-3.1", "instruction-tuned"]}
task
[ "QUESTION_ANSWERING" ]
40,340