id
stringlengths
6
113
author
stringlengths
2
36
task_category
stringclasses
42 values
tags
listlengths
1
4.05k
created_time
timestamp[ns, tz=UTC]date
2022-03-02 23:29:04
2025-04-10 08:38:38
last_modified
stringdate
2020-05-14 13:13:12
2025-04-19 04:15:39
downloads
int64
0
118M
likes
int64
0
4.86k
README
stringlengths
30
1.01M
matched_bigbio_names
listlengths
1
8
is_bionlp
stringclasses
3 values
model_cards
stringlengths
0
1M
metadata
stringlengths
2
698k
source
stringclasses
2 values
matched_task
listlengths
1
10
__index_level_0__
int64
0
46.9k
syjflora/distilbert-base-uncased-finetuned-emotion
syjflora
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-09-01T03:09:56Z
2023-09-01T03:20:16+00:00
8
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.925 name: Accuracy - type: f1 value: 0.9250456898384913 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2195 - Accuracy: 0.925 - F1: 0.9250 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8418 | 1.0 | 250 | 0.3325 | 0.901 | 0.8992 | | 0.2532 | 2.0 | 500 | 0.2195 | 0.925 | 0.9250 | ### Framework versions - Transformers 4.32.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2195 - Accuracy: 0.925 - F1: 0.9250 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8418 | 1.0 | 250 | 0.3325 | 0.901 | 0.8992 | | 0.2532 | 2.0 | 500 | 0.2195 | 0.925 | 0.9250 | ### Framework versions - Transformers 4.32.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.925, "name": "Accuracy"}, {"type": "f1", "value": 0.9250456898384913, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,380
Tritkoman/EnglishtoSaterlandFrisian
Tritkoman
translation
[ "transformers", "pytorch", "autotrain", "translation", "en", "nl", "dataset:Tritkoman/autotrain-data-wdxsxsxswddwwd", "co2_eq_emissions", "endpoints_compatible", "region:us" ]
2022-10-11T12:59:48Z
2022-10-11T13:01:51+00:00
5
0
--- datasets: - Tritkoman/autotrain-data-wdxsxsxswddwwd language: - en - nl tags: - autotrain - translation co2_eq_emissions: emissions: 2.82976037007073 --- # Model Trained Using AutoTrain - Problem type: Translation - Model ID: 1722960201 - CO2 Emissions (in grams): 2.8298 ## Validation Metrics - Loss: 0.021 - SacreBLEU: 92.565 - Gen len: 10.877
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Translation - Model ID: 1722960201 - CO2 Emissions (in grams): 2.8298 ## Validation Metrics - Loss: 0.021 - SacreBLEU: 92.565 - Gen len: 10.877
{"datasets": ["Tritkoman/autotrain-data-wdxsxsxswddwwd"], "language": ["en", "nl"], "tags": ["autotrain", "translation"], "co2_eq_emissions": {"emissions": 2.82976037007073}}
task
[ "TRANSLATION" ]
41,381
mbrewerdavis/phrasebank-sentiment-analysis
mbrewerdavis
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "dataset:financial_phrasebank", "base_model:google-bert/bert-base-uncased", "base_model:finetune:google-bert/bert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-29T19:34:37Z
2023-10-29T19:57:03+00:00
106
0
--- base_model: bert-base-uncased datasets: - financial_phrasebank license: apache-2.0 metrics: - f1 - accuracy tags: - generated_from_trainer model-index: - name: phrasebank-sentiment-analysis results: - task: type: text-classification name: Text Classification dataset: name: financial_phrasebank type: financial_phrasebank config: sentences_50agree split: train args: sentences_50agree metrics: - type: f1 value: 0.8427553411350498 name: F1 - type: accuracy value: 0.8548830811554333 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # phrasebank-sentiment-analysis This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the financial_phrasebank dataset. It achieves the following results on the evaluation set: - Loss: 0.9618 - F1: 0.8428 - Accuracy: 0.8549 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:--------:| | 0.0872 | 0.94 | 100 | 0.6726 | 0.8262 | 0.8549 | | 0.0621 | 1.89 | 200 | 0.7939 | 0.8362 | 0.8556 | | 0.0567 | 2.83 | 300 | 0.7211 | 0.8462 | 0.8645 | | 0.0395 | 3.77 | 400 | 0.7824 | 0.8350 | 0.8556 | | 0.0326 | 4.72 | 500 | 0.8522 | 0.8448 | 0.8618 | | 0.0228 | 5.66 | 600 | 0.9315 | 0.8342 | 0.8466 | | 0.0192 | 6.6 | 700 | 0.9355 | 0.8302 | 0.8487 | | 0.0107 | 7.55 | 800 | 0.9090 | 0.8475 | 0.8611 | | 0.0058 | 8.49 | 900 | 0.9500 | 0.8455 | 0.8583 | | 0.0031 | 9.43 | 1000 | 0.9618 | 0.8428 | 0.8549 | ### Framework versions - Transformers 4.34.1 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # phrasebank-sentiment-analysis This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the financial_phrasebank dataset. It achieves the following results on the evaluation set: - Loss: 0.9618 - F1: 0.8428 - Accuracy: 0.8549 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:--------:| | 0.0872 | 0.94 | 100 | 0.6726 | 0.8262 | 0.8549 | | 0.0621 | 1.89 | 200 | 0.7939 | 0.8362 | 0.8556 | | 0.0567 | 2.83 | 300 | 0.7211 | 0.8462 | 0.8645 | | 0.0395 | 3.77 | 400 | 0.7824 | 0.8350 | 0.8556 | | 0.0326 | 4.72 | 500 | 0.8522 | 0.8448 | 0.8618 | | 0.0228 | 5.66 | 600 | 0.9315 | 0.8342 | 0.8466 | | 0.0192 | 6.6 | 700 | 0.9355 | 0.8302 | 0.8487 | | 0.0107 | 7.55 | 800 | 0.9090 | 0.8475 | 0.8611 | | 0.0058 | 8.49 | 900 | 0.9500 | 0.8455 | 0.8583 | | 0.0031 | 9.43 | 1000 | 0.9618 | 0.8428 | 0.8549 | ### Framework versions - Transformers 4.34.1 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
{"base_model": "bert-base-uncased", "datasets": ["financial_phrasebank"], "license": "apache-2.0", "metrics": ["f1", "accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "phrasebank-sentiment-analysis", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "financial_phrasebank", "type": "financial_phrasebank", "config": "sentences_50agree", "split": "train", "args": "sentences_50agree"}, "metrics": [{"type": "f1", "value": 0.8427553411350498, "name": "F1"}, {"type": "accuracy", "value": 0.8548830811554333, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,382
datgtr/distilbert-base-uncased-finetuned-emotion
datgtr
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-13T10:18:14Z
2023-08-13T10:53:12+00:00
12
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.9255 name: Accuracy - type: f1 value: 0.9257123738860233 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2205 - Accuracy: 0.9255 - F1: 0.9257 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8406 | 1.0 | 250 | 0.3237 | 0.907 | 0.9058 | | 0.2582 | 2.0 | 500 | 0.2205 | 0.9255 | 0.9257 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1 - Datasets 2.14.4 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2205 - Accuracy: 0.9255 - F1: 0.9257 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8406 | 1.0 | 250 | 0.3237 | 0.907 | 0.9058 | | 0.2582 | 2.0 | 500 | 0.2205 | 0.9255 | 0.9257 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1 - Datasets 2.14.4 - Tokenizers 0.13.3
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9255, "name": "Accuracy"}, {"type": "f1", "value": 0.9257123738860233, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,385
fxmarty/whisper-tiny-working
fxmarty
automatic-speech-recognition
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "audio", "hf-asr-leaderboard", "en", "arxiv:2212.04356", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
2023-02-28T08:24:27Z
2023-02-28T08:26:15+00:00
10
0
--- language: - en license: apache-2.0 pipeline_tag: automatic-speech-recognition tags: - audio - automatic-speech-recognition - hf-asr-leaderboard widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac model-index: - name: whisper-tiny.en results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: LibriSpeech (clean) type: librispeech_asr config: clean split: test args: language: en metrics: - type: wer value: 8.4372112320138 name: Test WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: LibriSpeech (other) type: librispeech_asr config: other split: test args: language: en metrics: - type: wer value: 14.857607503498356 name: Test WER --- # Whisper Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains **without** the need for fine-tuning. Whisper was proposed in the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://arxiv.org/abs/2212.04356) by Alec Radford et al. from OpenAI. The original code repository can be found [here](https://github.com/openai/whisper). **Disclaimer**: Content for this model card has partly been written by the Hugging Face team, and parts of it were copied and pasted from the original model card. ## Model details Whisper is a Transformer based encoder-decoder model, also referred to as a _sequence-to-sequence_ model. It was trained on 680k hours of labelled speech data annotated using large-scale weak supervision. The models were trained on either English-only data or multilingual data. The English-only models were trained on the task of speech recognition. The multilingual models were trained on both speech recognition and speech translation. For speech recognition, the model predicts transcriptions in the *same* language as the audio. For speech translation, the model predicts transcriptions to a *different* language to the audio. Whisper checkpoints come in five configurations of varying model sizes. The smallest four are trained on either English-only or multilingual data. The largest checkpoints are multilingual only. All ten of the pre-trained checkpoints are available on the [Hugging Face Hub](https://huggingface.co/models?search=openai/whisper). The checkpoints are summarised in the following table with links to the models on the Hub: | Size | Parameters | English-only | Multilingual | |----------|------------|------------------------------------------------------|-----------------------------------------------------| | tiny | 39 M | [✓](https://huggingface.co/openai/whisper-tiny.en) | [✓](https://huggingface.co/openai/whisper-tiny) | | base | 74 M | [✓](https://huggingface.co/openai/whisper-base.en) | [✓](https://huggingface.co/openai/whisper-base) | | small | 244 M | [✓](https://huggingface.co/openai/whisper-small.en) | [✓](https://huggingface.co/openai/whisper-small) | | medium | 769 M | [✓](https://huggingface.co/openai/whisper-medium.en) | [✓](https://huggingface.co/openai/whisper-medium) | | large | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large) | | large-v2 | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large-v2) | # Usage This checkpoint is an *English-only* model, meaning it can be used for English speech recognition. Multilingual speech recognition or speech translation is possible through use of a multilingual checkpoint. To transcribe audio samples, the model has to be used alongside a [`WhisperProcessor`](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperProcessor). The `WhisperProcessor` is used to: 1. Pre-process the audio inputs (converting them to log-Mel spectrograms for the model) 2. Post-process the model outputs (converting them from tokens to text) ## Transcription ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> # load dummy dataset and read audio files >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False) ['<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'] ``` The context tokens can be removed from the start of the transcription by setting `skip_special_tokens=True`. ## Evaluation This code snippet shows how to evaluate Whisper tiny.en on [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr): ```python >>> from datasets import load_dataset >>> from transformers import WhisperForConditionalGeneration, WhisperProcessor >>> import torch >>> from evaluate import load >>> librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en").to("cuda") >>> def map_to_pred(batch): >>> audio = batch["audio"] >>> input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features >>> batch["reference"] = processor.tokenizer._normalize(batch['text']) >>> >>> with torch.no_grad(): >>> predicted_ids = model.generate(input_features.to("cuda"))[0] >>> transcription = processor.decode(predicted_ids) >>> batch["prediction"] = processor.tokenizer._normalize(transcription) >>> return batch >>> result = librispeech_test_clean.map(map_to_pred) >>> wer = load("wer") >>> print(100 * wer.compute(references=result["reference"], predictions=result["prediction"])) 5.655609406528749 ``` ## Long-Form Transcription The Whisper model is intrinsically designed to work on audio samples of up to 30s in duration. However, by using a chunking algorithm, it can be used to transcribe audio samples of up to arbitrary length. This is possible through Transformers [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) method. Chunking is enabled by setting `chunk_length_s=30` when instantiating the pipeline. It can also be extended to predict utterance level timestamps by passing `return_timestamps=True`: ```python >>> import torch >>> from transformers import pipeline >>> from datasets import load_dataset >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> pipe = pipeline( >>> "automatic-speech-recognition", >>> model="openai/whisper-tiny.en", >>> chunk_length_s=30, >>> device=device, >>> ) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> prediction = pipe(sample.copy())["text"] " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." >>> # we can also return timestamps for the predictions >>> prediction = pipe(sample, return_timestamps=True)["chunks"] [{'text': ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.', 'timestamp': (0.0, 5.44)}] ``` ## Fine-Tuning The pre-trained Whisper model demonstrates a strong ability to generalise to different datasets and domains. However, its predictive capabilities can be improved further for certain languages and tasks through *fine-tuning*. The blog post [Fine-Tune Whisper with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) provides a step-by-step guide to fine-tuning the Whisper model with as little as 5 hours of labelled data. ### Evaluated Use The primary intended users of these models are AI researchers studying robustness, generalization, capabilities, biases, and constraints of the current model. However, Whisper is also potentially quite useful as an ASR solution for developers, especially for English speech recognition. We recognize that once models are released, it is impossible to restrict access to only “intended” uses or to draw reasonable guidelines around what is or is not research. The models are primarily trained and evaluated on ASR and speech translation to English tasks. They show strong ASR results in ~10 languages. They may exhibit additional capabilities, particularly if fine-tuned on certain tasks like voice activity detection, speaker classification, or speaker diarization but have not been robustly evaluated in these areas. We strongly recommend that users perform robust evaluations of the models in a particular context and domain before deploying them. In particular, we caution against using Whisper models to transcribe recordings of individuals taken without their consent or purporting to use these models for any kind of subjective classification. We recommend against use in high-risk domains like decision-making contexts, where flaws in accuracy can lead to pronounced flaws in outcomes. The models are intended to transcribe and translate speech, use of the model for classification is not only not evaluated but also not appropriate, particularly to infer human attributes. ## Training Data The models are trained on 680,000 hours of audio and the corresponding transcripts collected from the internet. 65% of this data (or 438,000 hours) represents English-language audio and matched English transcripts, roughly 18% (or 126,000 hours) represents non-English audio and English transcripts, while the final 17% (or 117,000 hours) represents non-English audio and the corresponding transcript. This non-English data represents 98 different languages. As discussed in [the accompanying paper](https://cdn.openai.com/papers/whisper.pdf), we see that performance on transcription in a given language is directly correlated with the amount of training data we employ in that language. ## Performance and Limitations Our studies show that, over many existing ASR systems, the models exhibit improved robustness to accents, background noise, technical language, as well as zero shot translation from multiple languages into English; and that accuracy on speech recognition and translation is near the state-of-the-art level. However, because the models are trained in a weakly supervised manner using large-scale noisy data, the predictions may include texts that are not actually spoken in the audio input (i.e. hallucination). We hypothesize that this happens because, given their general knowledge of language, the models combine trying to predict the next word in audio with trying to transcribe the audio itself. Our models perform unevenly across languages, and we observe lower accuracy on low-resource and/or low-discoverability languages or languages where we have less training data. The models also exhibit disparate performance on different accents and dialects of particular languages, which may include higher word error rate across speakers of different genders, races, ages, or other demographic criteria. Our full evaluation results are presented in [the paper accompanying this release](https://cdn.openai.com/papers/whisper.pdf). In addition, the sequence-to-sequence architecture of the model makes it prone to generating repetitive texts, which can be mitigated to some degree by beam search and temperature scheduling but not perfectly. Further analysis on these limitations are provided in [the paper](https://cdn.openai.com/papers/whisper.pdf). It is likely that this behavior and hallucinations may be worse on lower-resource and/or lower-discoverability languages. ## Broader Implications We anticipate that Whisper models’ transcription capabilities may be used for improving accessibility tools. While Whisper models cannot be used for real-time transcription out of the box – their speed and size suggest that others may be able to build applications on top of them that allow for near-real-time speech recognition and translation. The real value of beneficial applications built on top of Whisper models suggests that the disparate performance of these models may have real economic implications. There are also potential dual use concerns that come with releasing Whisper. While we hope the technology will be used primarily for beneficial purposes, making ASR technology more accessible could enable more actors to build capable surveillance technologies or scale up existing surveillance efforts, as the speed and accuracy allow for affordable automatic transcription and translation of large volumes of audio communication. Moreover, these models may have some capabilities to recognize specific individuals out of the box, which in turn presents safety concerns related both to dual use and disparate performance. In practice, we expect that the cost of transcription is not the limiting factor of scaling up surveillance projects. ### BibTeX entry and citation info ```bibtex @misc{radford2022whisper, doi = {10.48550/ARXIV.2212.04356}, url = {https://arxiv.org/abs/2212.04356}, author = {Radford, Alec and Kim, Jong Wook and Xu, Tao and Brockman, Greg and McLeavey, Christine and Sutskever, Ilya}, title = {Robust Speech Recognition via Large-Scale Weak Supervision}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
null
Non_BioNLP
# Whisper Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains **without** the need for fine-tuning. Whisper was proposed in the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://arxiv.org/abs/2212.04356) by Alec Radford et al. from OpenAI. The original code repository can be found [here](https://github.com/openai/whisper). **Disclaimer**: Content for this model card has partly been written by the Hugging Face team, and parts of it were copied and pasted from the original model card. ## Model details Whisper is a Transformer based encoder-decoder model, also referred to as a _sequence-to-sequence_ model. It was trained on 680k hours of labelled speech data annotated using large-scale weak supervision. The models were trained on either English-only data or multilingual data. The English-only models were trained on the task of speech recognition. The multilingual models were trained on both speech recognition and speech translation. For speech recognition, the model predicts transcriptions in the *same* language as the audio. For speech translation, the model predicts transcriptions to a *different* language to the audio. Whisper checkpoints come in five configurations of varying model sizes. The smallest four are trained on either English-only or multilingual data. The largest checkpoints are multilingual only. All ten of the pre-trained checkpoints are available on the [Hugging Face Hub](https://huggingface.co/models?search=openai/whisper). The checkpoints are summarised in the following table with links to the models on the Hub: | Size | Parameters | English-only | Multilingual | |----------|------------|------------------------------------------------------|-----------------------------------------------------| | tiny | 39 M | [✓](https://huggingface.co/openai/whisper-tiny.en) | [✓](https://huggingface.co/openai/whisper-tiny) | | base | 74 M | [✓](https://huggingface.co/openai/whisper-base.en) | [✓](https://huggingface.co/openai/whisper-base) | | small | 244 M | [✓](https://huggingface.co/openai/whisper-small.en) | [✓](https://huggingface.co/openai/whisper-small) | | medium | 769 M | [✓](https://huggingface.co/openai/whisper-medium.en) | [✓](https://huggingface.co/openai/whisper-medium) | | large | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large) | | large-v2 | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large-v2) | # Usage This checkpoint is an *English-only* model, meaning it can be used for English speech recognition. Multilingual speech recognition or speech translation is possible through use of a multilingual checkpoint. To transcribe audio samples, the model has to be used alongside a [`WhisperProcessor`](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperProcessor). The `WhisperProcessor` is used to: 1. Pre-process the audio inputs (converting them to log-Mel spectrograms for the model) 2. Post-process the model outputs (converting them from tokens to text) ## Transcription ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> # load dummy dataset and read audio files >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False) ['<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'] ``` The context tokens can be removed from the start of the transcription by setting `skip_special_tokens=True`. ## Evaluation This code snippet shows how to evaluate Whisper tiny.en on [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr): ```python >>> from datasets import load_dataset >>> from transformers import WhisperForConditionalGeneration, WhisperProcessor >>> import torch >>> from evaluate import load >>> librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en").to("cuda") >>> def map_to_pred(batch): >>> audio = batch["audio"] >>> input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features >>> batch["reference"] = processor.tokenizer._normalize(batch['text']) >>> >>> with torch.no_grad(): >>> predicted_ids = model.generate(input_features.to("cuda"))[0] >>> transcription = processor.decode(predicted_ids) >>> batch["prediction"] = processor.tokenizer._normalize(transcription) >>> return batch >>> result = librispeech_test_clean.map(map_to_pred) >>> wer = load("wer") >>> print(100 * wer.compute(references=result["reference"], predictions=result["prediction"])) 5.655609406528749 ``` ## Long-Form Transcription The Whisper model is intrinsically designed to work on audio samples of up to 30s in duration. However, by using a chunking algorithm, it can be used to transcribe audio samples of up to arbitrary length. This is possible through Transformers [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) method. Chunking is enabled by setting `chunk_length_s=30` when instantiating the pipeline. It can also be extended to predict utterance level timestamps by passing `return_timestamps=True`: ```python >>> import torch >>> from transformers import pipeline >>> from datasets import load_dataset >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> pipe = pipeline( >>> "automatic-speech-recognition", >>> model="openai/whisper-tiny.en", >>> chunk_length_s=30, >>> device=device, >>> ) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> prediction = pipe(sample.copy())["text"] " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." >>> # we can also return timestamps for the predictions >>> prediction = pipe(sample, return_timestamps=True)["chunks"] [{'text': ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.', 'timestamp': (0.0, 5.44)}] ``` ## Fine-Tuning The pre-trained Whisper model demonstrates a strong ability to generalise to different datasets and domains. However, its predictive capabilities can be improved further for certain languages and tasks through *fine-tuning*. The blog post [Fine-Tune Whisper with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) provides a step-by-step guide to fine-tuning the Whisper model with as little as 5 hours of labelled data. ### Evaluated Use The primary intended users of these models are AI researchers studying robustness, generalization, capabilities, biases, and constraints of the current model. However, Whisper is also potentially quite useful as an ASR solution for developers, especially for English speech recognition. We recognize that once models are released, it is impossible to restrict access to only “intended” uses or to draw reasonable guidelines around what is or is not research. The models are primarily trained and evaluated on ASR and speech translation to English tasks. They show strong ASR results in ~10 languages. They may exhibit additional capabilities, particularly if fine-tuned on certain tasks like voice activity detection, speaker classification, or speaker diarization but have not been robustly evaluated in these areas. We strongly recommend that users perform robust evaluations of the models in a particular context and domain before deploying them. In particular, we caution against using Whisper models to transcribe recordings of individuals taken without their consent or purporting to use these models for any kind of subjective classification. We recommend against use in high-risk domains like decision-making contexts, where flaws in accuracy can lead to pronounced flaws in outcomes. The models are intended to transcribe and translate speech, use of the model for classification is not only not evaluated but also not appropriate, particularly to infer human attributes. ## Training Data The models are trained on 680,000 hours of audio and the corresponding transcripts collected from the internet. 65% of this data (or 438,000 hours) represents English-language audio and matched English transcripts, roughly 18% (or 126,000 hours) represents non-English audio and English transcripts, while the final 17% (or 117,000 hours) represents non-English audio and the corresponding transcript. This non-English data represents 98 different languages. As discussed in [the accompanying paper](https://cdn.openai.com/papers/whisper.pdf), we see that performance on transcription in a given language is directly correlated with the amount of training data we employ in that language. ## Performance and Limitations Our studies show that, over many existing ASR systems, the models exhibit improved robustness to accents, background noise, technical language, as well as zero shot translation from multiple languages into English; and that accuracy on speech recognition and translation is near the state-of-the-art level. However, because the models are trained in a weakly supervised manner using large-scale noisy data, the predictions may include texts that are not actually spoken in the audio input (i.e. hallucination). We hypothesize that this happens because, given their general knowledge of language, the models combine trying to predict the next word in audio with trying to transcribe the audio itself. Our models perform unevenly across languages, and we observe lower accuracy on low-resource and/or low-discoverability languages or languages where we have less training data. The models also exhibit disparate performance on different accents and dialects of particular languages, which may include higher word error rate across speakers of different genders, races, ages, or other demographic criteria. Our full evaluation results are presented in [the paper accompanying this release](https://cdn.openai.com/papers/whisper.pdf). In addition, the sequence-to-sequence architecture of the model makes it prone to generating repetitive texts, which can be mitigated to some degree by beam search and temperature scheduling but not perfectly. Further analysis on these limitations are provided in [the paper](https://cdn.openai.com/papers/whisper.pdf). It is likely that this behavior and hallucinations may be worse on lower-resource and/or lower-discoverability languages. ## Broader Implications We anticipate that Whisper models’ transcription capabilities may be used for improving accessibility tools. While Whisper models cannot be used for real-time transcription out of the box – their speed and size suggest that others may be able to build applications on top of them that allow for near-real-time speech recognition and translation. The real value of beneficial applications built on top of Whisper models suggests that the disparate performance of these models may have real economic implications. There are also potential dual use concerns that come with releasing Whisper. While we hope the technology will be used primarily for beneficial purposes, making ASR technology more accessible could enable more actors to build capable surveillance technologies or scale up existing surveillance efforts, as the speed and accuracy allow for affordable automatic transcription and translation of large volumes of audio communication. Moreover, these models may have some capabilities to recognize specific individuals out of the box, which in turn presents safety concerns related both to dual use and disparate performance. In practice, we expect that the cost of transcription is not the limiting factor of scaling up surveillance projects. ### BibTeX entry and citation info ```bibtex @misc{radford2022whisper, doi = {10.48550/ARXIV.2212.04356}, url = {https://arxiv.org/abs/2212.04356}, author = {Radford, Alec and Kim, Jong Wook and Xu, Tao and Brockman, Greg and McLeavey, Christine and Sutskever, Ilya}, title = {Robust Speech Recognition via Large-Scale Weak Supervision}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
{"language": ["en"], "license": "apache-2.0", "pipeline_tag": "automatic-speech-recognition", "tags": ["audio", "automatic-speech-recognition", "hf-asr-leaderboard"], "widget": [{"example_title": "Librispeech sample 1", "src": "https://cdn-media.huggingface.co/speech_samples/sample1.flac"}, {"example_title": "Librispeech sample 2", "src": "https://cdn-media.huggingface.co/speech_samples/sample2.flac"}], "model-index": [{"name": "whisper-tiny.en", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "LibriSpeech (clean)", "type": "librispeech_asr", "config": "clean", "split": "test", "args": {"language": "en"}}, "metrics": [{"type": "wer", "value": 8.4372112320138, "name": "Test WER"}]}, {"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "LibriSpeech (other)", "type": "librispeech_asr", "config": "other", "split": "test", "args": {"language": "en"}}, "metrics": [{"type": "wer", "value": 14.857607503498356, "name": "Test WER"}]}]}]}
task
[ "TRANSLATION" ]
41,386
com3dian/Bart-large-paper2slides-expander
com3dian
text2text-generation
[ "transformers", "pytorch", "safetensors", "bart", "text2text-generation", "en", "dataset:cnn_dailymail", "arxiv:1711.00043", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-13T15:35:11Z
2024-03-06T13:52:03+00:00
33
2
--- datasets: - cnn_dailymail language: - en license: - mit pipeline_tag: text2text-generation widget: - text: 'Bag-of-feature representations can be described by analogy to bag-of-words representations. ' - text: 'Self-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. ' --- # Bart-Large Expansion Model ![Bart Logo](https://huggingface.co/front/assets/huggingface_logo.svg) This repository contains the **Bart-Large-paper2slides-expander Model**, which has been pre-trained on cnn-daily-mail dataset and fine-tuned on the [Automatic Slide Generation from Scientific Papers dataset](https://www.kaggle.com/datasets/andrewmvd/automatic-slide-generation-from-scientific-papers) using unsupervised learning techniques using an algorithm from the paper entitled '[Unsupervised Machine Translation Using Monolingual Corpora Only](https://arxiv.org/abs/1711.00043)'. Its primary focus is to expand the **scientific text** by providing alternative and expanded versions with improved clarity and accuracy. The model is parallelly trained with the [**Bart-Large-paper2slides-summarizer Model**](https://huggingface.co/com3dian/Bart-large-paper2slides-summarizer) from the same contributor. ## Model Details - **Model Architecture**: Bart-Large - **Fine-tuning Dataset**: [Automatic Slide Generation from Scientific Papers](https://www.kaggle.com/datasets/andrewmvd/automatic-slide-generation-from-scientific-papers) - **Fine-tuning Method**: Unsupervised Learning [Bart](https://huggingface.co/transformers/model_doc/bart.html) (Bidirectional and Auto-Regressive Transformers) is a sequence-to-sequence (seq2seq) model developed by Facebook AI Research. It has shown exceptional performance in various natural language processing (NLP) tasks such as text summarization, text generation, and machine translation. This particular model, Bart-Large, is the larger version of the Bart model. It consists of 12 encoder and decoder layers and has a total of 400 million parameters. ## Usage To use this model, you can leverage the Hugging Face [Transformers](https://huggingface.co/transformers/) library. Here's an example of how to use it in Python: ```python from transformers import BartTokenizer, BartForConditionalGeneration, pipeline # Load the model and tokenizer model_name = "com3dian/Bart-large-paper2slides-expander" tokenizer = BartTokenizer.from_pretrained(model_name) model = BartForConditionalGeneration.from_pretrained(model_name) # Generate summary from input text input_text = "Your input text here..." input_ids = tokenizer.encode(input_text, return_tensors="pt") output = model.generate(input_ids) # Decode generated summaries expanded_text = tokenizer.decode(output[0], skip_special_tokens=True) print(expanded_text) # Or using the pipeline API expander = pipeline("text2text-generation", model=model_name) expanded_text = expander(input_text, max_length=50, min_length=30, do_sample=False) print(expanded_text) ``` Ensure you have the `transformers` library installed before running the code. You can install it using `pip`: ``` pip install transformers ``` ## Model Fine-tuning Details The fine-tuning process for this model involved training on the slide generation dataset using unsupervised learning techniques. Unsupervised learning refers to training a model without explicit human-labeled targets. Instead, the model learns to back-expand the input provided by the summarization model, into the original texts. The specific hyperparameters and training details used for fine-tuning this model are as follows: - Batch Size: 4 - Learning Rate: 2e-6 - Training Steps: 3*7 - Optimizer: AdamW ## Acknowledgments We would like to acknowledge the authors of the Bart model and the creators of the slide generation dataset for their valuable contributions, which have enabled the development of this fine-tuned model. If you use this model or find it helpful in your work, please consider citing the original Bart model, the slide generation dataset, and [this paper](https://studenttheses.uu.nl/handle/20.500.12932/45939) to provide proper credit to the respective authors. ## License This model and the associated code are released under the [MIT license](https://opensource.org/license/mit/).
null
Non_BioNLP
# Bart-Large Expansion Model ![Bart Logo](https://huggingface.co/front/assets/huggingface_logo.svg) This repository contains the **Bart-Large-paper2slides-expander Model**, which has been pre-trained on cnn-daily-mail dataset and fine-tuned on the [Automatic Slide Generation from Scientific Papers dataset](https://www.kaggle.com/datasets/andrewmvd/automatic-slide-generation-from-scientific-papers) using unsupervised learning techniques using an algorithm from the paper entitled '[Unsupervised Machine Translation Using Monolingual Corpora Only](https://arxiv.org/abs/1711.00043)'. Its primary focus is to expand the **scientific text** by providing alternative and expanded versions with improved clarity and accuracy. The model is parallelly trained with the [**Bart-Large-paper2slides-summarizer Model**](https://huggingface.co/com3dian/Bart-large-paper2slides-summarizer) from the same contributor. ## Model Details - **Model Architecture**: Bart-Large - **Fine-tuning Dataset**: [Automatic Slide Generation from Scientific Papers](https://www.kaggle.com/datasets/andrewmvd/automatic-slide-generation-from-scientific-papers) - **Fine-tuning Method**: Unsupervised Learning [Bart](https://huggingface.co/transformers/model_doc/bart.html) (Bidirectional and Auto-Regressive Transformers) is a sequence-to-sequence (seq2seq) model developed by Facebook AI Research. It has shown exceptional performance in various natural language processing (NLP) tasks such as text summarization, text generation, and machine translation. This particular model, Bart-Large, is the larger version of the Bart model. It consists of 12 encoder and decoder layers and has a total of 400 million parameters. ## Usage To use this model, you can leverage the Hugging Face [Transformers](https://huggingface.co/transformers/) library. Here's an example of how to use it in Python: ```python from transformers import BartTokenizer, BartForConditionalGeneration, pipeline # Load the model and tokenizer model_name = "com3dian/Bart-large-paper2slides-expander" tokenizer = BartTokenizer.from_pretrained(model_name) model = BartForConditionalGeneration.from_pretrained(model_name) # Generate summary from input text input_text = "Your input text here..." input_ids = tokenizer.encode(input_text, return_tensors="pt") output = model.generate(input_ids) # Decode generated summaries expanded_text = tokenizer.decode(output[0], skip_special_tokens=True) print(expanded_text) # Or using the pipeline API expander = pipeline("text2text-generation", model=model_name) expanded_text = expander(input_text, max_length=50, min_length=30, do_sample=False) print(expanded_text) ``` Ensure you have the `transformers` library installed before running the code. You can install it using `pip`: ``` pip install transformers ``` ## Model Fine-tuning Details The fine-tuning process for this model involved training on the slide generation dataset using unsupervised learning techniques. Unsupervised learning refers to training a model without explicit human-labeled targets. Instead, the model learns to back-expand the input provided by the summarization model, into the original texts. The specific hyperparameters and training details used for fine-tuning this model are as follows: - Batch Size: 4 - Learning Rate: 2e-6 - Training Steps: 3*7 - Optimizer: AdamW ## Acknowledgments We would like to acknowledge the authors of the Bart model and the creators of the slide generation dataset for their valuable contributions, which have enabled the development of this fine-tuned model. If you use this model or find it helpful in your work, please consider citing the original Bart model, the slide generation dataset, and [this paper](https://studenttheses.uu.nl/handle/20.500.12932/45939) to provide proper credit to the respective authors. ## License This model and the associated code are released under the [MIT license](https://opensource.org/license/mit/).
{"datasets": ["cnn_dailymail"], "language": ["en"], "license": ["mit"], "pipeline_tag": "text2text-generation", "widget": [{"text": "Bag-of-feature representations can be described by analogy to bag-of-words representations.\n"}, {"text": "Self-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence.\n"}]}
task
[ "TRANSLATION", "SUMMARIZATION" ]
41,387
samman44/v2_all-MiniLM-L6-v2_12_epoch_finetune
samman44
text-classification
[ "sentence-transformers", "pytorch", "bert", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-11-07T13:34:57Z
2023-11-07T13:35:45+00:00
11
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # v2_all-MiniLM-L6-v2_12_epoch_finetune This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("v2_all-MiniLM-L6-v2_12_epoch_finetune") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# v2_all-MiniLM-L6-v2_12_epoch_finetune This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("v2_all-MiniLM-L6-v2_12_epoch_finetune") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
41,388
gokulsrinivasagan/bert_tiny_lda_5_v1_book_mnli
gokulsrinivasagan
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "base_model:gokulsrinivasagan/bert_tiny_lda_5_v1_book", "base_model:finetune:gokulsrinivasagan/bert_tiny_lda_5_v1_book", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-12-10T13:50:10Z
2024-12-10T14:19:58+00:00
6
0
--- base_model: gokulsrinivasagan/bert_tiny_lda_5_v1_book datasets: - glue language: - en library_name: transformers metrics: - accuracy tags: - generated_from_trainer model-index: - name: bert_tiny_lda_5_v1_book_mnli results: - task: type: text-classification name: Text Classification dataset: name: GLUE MNLI type: glue args: mnli metrics: - type: accuracy value: 0.7597640358014646 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_tiny_lda_5_v1_book_mnli This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_5_v1_book) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.6047 - Accuracy: 0.7598 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.7793 | 1.0 | 1534 | 0.6779 | 0.7105 | | 0.6313 | 2.0 | 3068 | 0.6316 | 0.7361 | | 0.5474 | 3.0 | 4602 | 0.6253 | 0.7505 | | 0.4766 | 4.0 | 6136 | 0.6401 | 0.7538 | | 0.414 | 5.0 | 7670 | 0.6470 | 0.7560 | | 0.3555 | 6.0 | 9204 | 0.7034 | 0.7555 | | 0.304 | 7.0 | 10738 | 0.7440 | 0.7554 | | 0.2609 | 8.0 | 12272 | 0.8077 | 0.7532 | ### Framework versions - Transformers 4.46.3 - Pytorch 2.2.1+cu118 - Datasets 2.17.0 - Tokenizers 0.20.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_tiny_lda_5_v1_book_mnli This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_5_v1_book) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.6047 - Accuracy: 0.7598 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.7793 | 1.0 | 1534 | 0.6779 | 0.7105 | | 0.6313 | 2.0 | 3068 | 0.6316 | 0.7361 | | 0.5474 | 3.0 | 4602 | 0.6253 | 0.7505 | | 0.4766 | 4.0 | 6136 | 0.6401 | 0.7538 | | 0.414 | 5.0 | 7670 | 0.6470 | 0.7560 | | 0.3555 | 6.0 | 9204 | 0.7034 | 0.7555 | | 0.304 | 7.0 | 10738 | 0.7440 | 0.7554 | | 0.2609 | 8.0 | 12272 | 0.8077 | 0.7532 | ### Framework versions - Transformers 4.46.3 - Pytorch 2.2.1+cu118 - Datasets 2.17.0 - Tokenizers 0.20.3
{"base_model": "gokulsrinivasagan/bert_tiny_lda_5_v1_book", "datasets": ["glue"], "language": ["en"], "library_name": "transformers", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bert_tiny_lda_5_v1_book_mnli", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE MNLI", "type": "glue", "args": "mnli"}, "metrics": [{"type": "accuracy", "value": 0.7597640358014646, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,389
RMWeerasinghe/text_summarization-finetuned
RMWeerasinghe
summarization
[ "transformers", "safetensors", "t5", "text2text-generation", "summarization", "generated_from_trainer", "dataset:cnn_dailymail", "base_model:Falconsai/text_summarization", "base_model:finetune:Falconsai/text_summarization", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-02-11T09:49:21Z
2024-02-12T11:25:07+00:00
18
0
--- base_model: Falconsai/text_summarization datasets: - cnn_dailymail license: apache-2.0 metrics: - rouge tags: - summarization - generated_from_trainer model-index: - name: text_summarization-finetuned results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: cnn_dailymail type: cnn_dailymail config: 1.0.0 split: validation args: 1.0.0 metrics: - type: rouge value: 0.2389 name: Rouge1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # text_summarization-finetuned This model is a fine-tuned version of [Falconsai/text_summarization](https://huggingface.co/Falconsai/text_summarization) on the cnn_dailymail dataset. It achieves the following results on the evaluation set: - Loss: 1.8119 - Rouge1: 0.2389 - Rouge2: 0.1112 - Rougel: 0.1946 - Rougelsum: 0.2237 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 40 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 10.7536 | 1.0 | 78 | 6.6776 | 0.203 | 0.0868 | 0.1627 | 0.1909 | | 5.0057 | 1.99 | 156 | 3.2391 | 0.2128 | 0.0909 | 0.1707 | 0.2003 | | 3.3921 | 2.99 | 234 | 2.9233 | 0.2263 | 0.102 | 0.1849 | 0.213 | | 3.1013 | 4.0 | 313 | 2.7724 | 0.2265 | 0.1043 | 0.1864 | 0.2128 | | 2.9643 | 5.0 | 391 | 2.5935 | 0.2305 | 0.1075 | 0.1893 | 0.2166 | | 2.7594 | 5.99 | 469 | 2.4411 | 0.2311 | 0.1075 | 0.1888 | 0.2171 | | 2.6579 | 6.99 | 547 | 2.3273 | 0.2327 | 0.1084 | 0.1908 | 0.2185 | | 2.5729 | 8.0 | 626 | 2.2452 | 0.2326 | 0.1083 | 0.1905 | 0.2185 | | 2.4879 | 9.0 | 704 | 2.1828 | 0.2313 | 0.1063 | 0.1893 | 0.2176 | | 2.401 | 9.99 | 782 | 2.1365 | 0.2336 | 0.1071 | 0.1907 | 0.2193 | | 2.346 | 10.99 | 860 | 2.0937 | 0.2332 | 0.1065 | 0.1905 | 0.2192 | | 2.3086 | 12.0 | 939 | 2.0606 | 0.2334 | 0.107 | 0.1905 | 0.2191 | | 2.2648 | 13.0 | 1017 | 2.0315 | 0.2351 | 0.1085 | 0.1925 | 0.2211 | | 2.2452 | 13.99 | 1095 | 2.0058 | 0.2354 | 0.1079 | 0.1922 | 0.221 | | 2.204 | 14.99 | 1173 | 1.9853 | 0.2364 | 0.1093 | 0.1932 | 0.2222 | | 2.1723 | 16.0 | 1252 | 1.9665 | 0.236 | 0.109 | 0.1931 | 0.2218 | | 2.1601 | 17.0 | 1330 | 1.9479 | 0.2356 | 0.109 | 0.1923 | 0.2212 | | 2.143 | 17.99 | 1408 | 1.9337 | 0.2356 | 0.1093 | 0.1926 | 0.2215 | | 2.093 | 18.99 | 1486 | 1.9201 | 0.2366 | 0.1101 | 0.193 | 0.2223 | | 2.0987 | 20.0 | 1565 | 1.9077 | 0.2371 | 0.111 | 0.1938 | 0.2228 | | 2.0663 | 21.0 | 1643 | 1.8956 | 0.2368 | 0.1104 | 0.1937 | 0.2219 | | 2.0629 | 21.99 | 1721 | 1.8858 | 0.2375 | 0.1109 | 0.1935 | 0.2221 | | 2.0449 | 22.99 | 1799 | 1.8765 | 0.2395 | 0.1128 | 0.1959 | 0.2244 | | 2.0342 | 24.0 | 1878 | 1.8684 | 0.2384 | 0.1115 | 0.1943 | 0.2233 | | 2.0021 | 25.0 | 1956 | 1.8620 | 0.2373 | 0.1101 | 0.1932 | 0.222 | | 2.0152 | 25.99 | 2034 | 1.8537 | 0.2387 | 0.1116 | 0.1949 | 0.2236 | | 2.0058 | 26.99 | 2112 | 1.8477 | 0.239 | 0.1118 | 0.195 | 0.224 | | 1.981 | 28.0 | 2191 | 1.8418 | 0.2377 | 0.1108 | 0.194 | 0.2227 | | 1.9493 | 29.0 | 2269 | 1.8358 | 0.2388 | 0.111 | 0.1947 | 0.2234 | | 1.9626 | 29.99 | 2347 | 1.8314 | 0.2385 | 0.1109 | 0.1945 | 0.223 | | 1.9735 | 30.99 | 2425 | 1.8279 | 0.239 | 0.1109 | 0.1944 | 0.2232 | | 1.9421 | 32.0 | 2504 | 1.8240 | 0.2393 | 0.1109 | 0.1946 | 0.2234 | | 1.9371 | 33.0 | 2582 | 1.8212 | 0.2396 | 0.1114 | 0.1951 | 0.2239 | | 1.9252 | 33.99 | 2660 | 1.8184 | 0.2392 | 0.1111 | 0.1947 | 0.2238 | | 1.9556 | 34.99 | 2738 | 1.8163 | 0.2392 | 0.1111 | 0.1946 | 0.2238 | | 1.9436 | 36.0 | 2817 | 1.8147 | 0.2394 | 0.111 | 0.1945 | 0.224 | | 1.9444 | 37.0 | 2895 | 1.8132 | 0.239 | 0.1113 | 0.1946 | 0.2239 | | 1.9368 | 37.99 | 2973 | 1.8125 | 0.239 | 0.1112 | 0.1947 | 0.2239 | | 1.9467 | 38.99 | 3051 | 1.8120 | 0.2389 | 0.1112 | 0.1946 | 0.2237 | | 1.9335 | 39.87 | 3120 | 1.8119 | 0.2389 | 0.1112 | 0.1946 | 0.2237 | ### Framework versions - Transformers 4.38.0.dev0 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # text_summarization-finetuned This model is a fine-tuned version of [Falconsai/text_summarization](https://huggingface.co/Falconsai/text_summarization) on the cnn_dailymail dataset. It achieves the following results on the evaluation set: - Loss: 1.8119 - Rouge1: 0.2389 - Rouge2: 0.1112 - Rougel: 0.1946 - Rougelsum: 0.2237 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 40 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 10.7536 | 1.0 | 78 | 6.6776 | 0.203 | 0.0868 | 0.1627 | 0.1909 | | 5.0057 | 1.99 | 156 | 3.2391 | 0.2128 | 0.0909 | 0.1707 | 0.2003 | | 3.3921 | 2.99 | 234 | 2.9233 | 0.2263 | 0.102 | 0.1849 | 0.213 | | 3.1013 | 4.0 | 313 | 2.7724 | 0.2265 | 0.1043 | 0.1864 | 0.2128 | | 2.9643 | 5.0 | 391 | 2.5935 | 0.2305 | 0.1075 | 0.1893 | 0.2166 | | 2.7594 | 5.99 | 469 | 2.4411 | 0.2311 | 0.1075 | 0.1888 | 0.2171 | | 2.6579 | 6.99 | 547 | 2.3273 | 0.2327 | 0.1084 | 0.1908 | 0.2185 | | 2.5729 | 8.0 | 626 | 2.2452 | 0.2326 | 0.1083 | 0.1905 | 0.2185 | | 2.4879 | 9.0 | 704 | 2.1828 | 0.2313 | 0.1063 | 0.1893 | 0.2176 | | 2.401 | 9.99 | 782 | 2.1365 | 0.2336 | 0.1071 | 0.1907 | 0.2193 | | 2.346 | 10.99 | 860 | 2.0937 | 0.2332 | 0.1065 | 0.1905 | 0.2192 | | 2.3086 | 12.0 | 939 | 2.0606 | 0.2334 | 0.107 | 0.1905 | 0.2191 | | 2.2648 | 13.0 | 1017 | 2.0315 | 0.2351 | 0.1085 | 0.1925 | 0.2211 | | 2.2452 | 13.99 | 1095 | 2.0058 | 0.2354 | 0.1079 | 0.1922 | 0.221 | | 2.204 | 14.99 | 1173 | 1.9853 | 0.2364 | 0.1093 | 0.1932 | 0.2222 | | 2.1723 | 16.0 | 1252 | 1.9665 | 0.236 | 0.109 | 0.1931 | 0.2218 | | 2.1601 | 17.0 | 1330 | 1.9479 | 0.2356 | 0.109 | 0.1923 | 0.2212 | | 2.143 | 17.99 | 1408 | 1.9337 | 0.2356 | 0.1093 | 0.1926 | 0.2215 | | 2.093 | 18.99 | 1486 | 1.9201 | 0.2366 | 0.1101 | 0.193 | 0.2223 | | 2.0987 | 20.0 | 1565 | 1.9077 | 0.2371 | 0.111 | 0.1938 | 0.2228 | | 2.0663 | 21.0 | 1643 | 1.8956 | 0.2368 | 0.1104 | 0.1937 | 0.2219 | | 2.0629 | 21.99 | 1721 | 1.8858 | 0.2375 | 0.1109 | 0.1935 | 0.2221 | | 2.0449 | 22.99 | 1799 | 1.8765 | 0.2395 | 0.1128 | 0.1959 | 0.2244 | | 2.0342 | 24.0 | 1878 | 1.8684 | 0.2384 | 0.1115 | 0.1943 | 0.2233 | | 2.0021 | 25.0 | 1956 | 1.8620 | 0.2373 | 0.1101 | 0.1932 | 0.222 | | 2.0152 | 25.99 | 2034 | 1.8537 | 0.2387 | 0.1116 | 0.1949 | 0.2236 | | 2.0058 | 26.99 | 2112 | 1.8477 | 0.239 | 0.1118 | 0.195 | 0.224 | | 1.981 | 28.0 | 2191 | 1.8418 | 0.2377 | 0.1108 | 0.194 | 0.2227 | | 1.9493 | 29.0 | 2269 | 1.8358 | 0.2388 | 0.111 | 0.1947 | 0.2234 | | 1.9626 | 29.99 | 2347 | 1.8314 | 0.2385 | 0.1109 | 0.1945 | 0.223 | | 1.9735 | 30.99 | 2425 | 1.8279 | 0.239 | 0.1109 | 0.1944 | 0.2232 | | 1.9421 | 32.0 | 2504 | 1.8240 | 0.2393 | 0.1109 | 0.1946 | 0.2234 | | 1.9371 | 33.0 | 2582 | 1.8212 | 0.2396 | 0.1114 | 0.1951 | 0.2239 | | 1.9252 | 33.99 | 2660 | 1.8184 | 0.2392 | 0.1111 | 0.1947 | 0.2238 | | 1.9556 | 34.99 | 2738 | 1.8163 | 0.2392 | 0.1111 | 0.1946 | 0.2238 | | 1.9436 | 36.0 | 2817 | 1.8147 | 0.2394 | 0.111 | 0.1945 | 0.224 | | 1.9444 | 37.0 | 2895 | 1.8132 | 0.239 | 0.1113 | 0.1946 | 0.2239 | | 1.9368 | 37.99 | 2973 | 1.8125 | 0.239 | 0.1112 | 0.1947 | 0.2239 | | 1.9467 | 38.99 | 3051 | 1.8120 | 0.2389 | 0.1112 | 0.1946 | 0.2237 | | 1.9335 | 39.87 | 3120 | 1.8119 | 0.2389 | 0.1112 | 0.1946 | 0.2237 | ### Framework versions - Transformers 4.38.0.dev0 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.1
{"base_model": "Falconsai/text_summarization", "datasets": ["cnn_dailymail"], "license": "apache-2.0", "metrics": ["rouge"], "tags": ["summarization", "generated_from_trainer"], "model-index": [{"name": "text_summarization-finetuned", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "cnn_dailymail", "type": "cnn_dailymail", "config": "1.0.0", "split": "validation", "args": "1.0.0"}, "metrics": [{"type": "rouge", "value": 0.2389, "name": "Rouge1"}]}]}]}
task
[ "SUMMARIZATION" ]
41,390
gaudi/opus-mt-es-hr-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-22T15:45:28Z
2024-10-19T02:44:28+00:00
6
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-es-hr) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-es-hr).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-es-hr --output_dir ./ctranslate2/opus-mt-es-hr-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-es-hr-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-es-hr-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-es-hr-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-es-hr) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-es-hr) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-es-hr).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-es-hr --output_dir ./ctranslate2/opus-mt-es-hr-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-es-hr-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-es-hr-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-es-hr-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-es-hr) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
41,391
wangyunyy/distilbert-base-uncased-finetuned-emotion
wangyunyy
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-08T07:58:10Z
2023-06-08T08:39:57+00:00
8
0
--- datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion args: split metrics: - type: accuracy value: 0.9245 name: Accuracy - type: f1 value: 0.9242944126734222 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2242 - Accuracy: 0.9245 - F1: 0.9243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8465 | 1.0 | 250 | 0.3315 | 0.9025 | 0.8991 | | 0.2528 | 2.0 | 500 | 0.2242 | 0.9245 | 0.9243 | ### Framework versions - Transformers 4.16.2 - Pytorch 2.0.0 - Datasets 1.16.1 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2242 - Accuracy: 0.9245 - F1: 0.9243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8465 | 1.0 | 250 | 0.3315 | 0.9025 | 0.8991 | | 0.2528 | 2.0 | 500 | 0.2242 | 0.9245 | 0.9243 | ### Framework versions - Transformers 4.16.2 - Pytorch 2.0.0 - Datasets 1.16.1 - Tokenizers 0.13.3
{"datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9245, "name": "Accuracy"}, {"type": "f1", "value": 0.9242944126734222, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,392
SOUMYADEEPSAR/Setfit_random_sample_svm_head
SOUMYADEEPSAR
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "region:us" ]
2024-07-07T17:20:06Z
2024-07-07T17:20:20+00:00
4
0
--- library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: Now that the baffling, elongated, hyperreal coronation has occurred—no, not that one—and Liz Truss has become Prime Minister, a degree of intervention and action on energy bills has emerged, ahead of the looming socioeconomic catastrophe facing the country this winter. - text: But it needs to go much further. - text: What could possibly go wrong? - text: If you are White you might feel bad about hurting others or you might feel afraid to lose this privilege….Overcoming White privilege is a job that must start with the White community…. - text: '[JF: Obviously, immigration wasn’t stopped: the current population of the United States is 329.5 million—it passed 300 million in 2006.' inference: true --- # SetFit This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. A SVC instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit <!-- - **Sentence Transformer:** [Unknown](https://huggingface.co/unknown) --> - **Classification head:** a SVC instance - **Maximum Sequence Length:** 384 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | 0 | <ul><li>'ESG funds often charge many times more for investment funds that are nearly indistinguishable from those without the ESG title.'</li><li>'They are California, Florida, Illinois, Nebraska, New York, and Wyoming.'</li><li>'And so it goes.'</li></ul> | | 1 | <ul><li>'Republicans attempted to pass a resolution that would have enabled Congress to force workers to accept a deal, which was fortunately blocked by (who else) Senator Bernie Sanders.'</li><li>'No government ever surrenders power, even its emergency powers—not really.'</li><li>'No citizen in a democratic society should want executives from $10trn financial institutions to play a larger role than they already do in defining and implementing social values.'</li></ul> | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("SOUMYADEEPSAR/Setfit_random_sample_svm_head") # Run inference preds = model("What could possibly go wrong?") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 3 | 23.4159 | 68 | | Label | Training Sample Count | |:------|:----------------------| | 0 | 136 | | 1 | 78 | ### Training Hyperparameters - batch_size: (8, 8) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0003 | 1 | 0.3597 | - | | 0.0161 | 50 | 0.2693 | - | | 0.0323 | 100 | 0.2501 | - | | 0.0484 | 150 | 0.2691 | - | | 0.0645 | 200 | 0.063 | - | | 0.0806 | 250 | 0.0179 | - | | 0.0968 | 300 | 0.0044 | - | | 0.1129 | 350 | 0.0003 | - | | 0.1290 | 400 | 0.0005 | - | | 0.1452 | 450 | 0.0002 | - | | 0.1613 | 500 | 0.0003 | - | | 0.1774 | 550 | 0.0001 | - | | 0.1935 | 600 | 0.0001 | - | | 0.2097 | 650 | 0.0001 | - | | 0.2258 | 700 | 0.0001 | - | | 0.2419 | 750 | 0.0001 | - | | 0.2581 | 800 | 0.0 | - | | 0.2742 | 850 | 0.0001 | - | | 0.2903 | 900 | 0.0002 | - | | 0.3065 | 950 | 0.0 | - | | 0.3226 | 1000 | 0.0 | - | | 0.3387 | 1050 | 0.0002 | - | | 0.3548 | 1100 | 0.0 | - | | 0.3710 | 1150 | 0.0001 | - | | 0.3871 | 1200 | 0.0001 | - | | 0.4032 | 1250 | 0.0 | - | | 0.4194 | 1300 | 0.0 | - | | 0.4355 | 1350 | 0.0 | - | | 0.4516 | 1400 | 0.0001 | - | | 0.4677 | 1450 | 0.0 | - | | 0.4839 | 1500 | 0.0 | - | | 0.5 | 1550 | 0.0001 | - | | 0.5161 | 1600 | 0.0001 | - | | 0.5323 | 1650 | 0.0 | - | | 0.5484 | 1700 | 0.0 | - | | 0.5645 | 1750 | 0.0 | - | | 0.5806 | 1800 | 0.0 | - | | 0.5968 | 1850 | 0.0 | - | | 0.6129 | 1900 | 0.0 | - | | 0.6290 | 1950 | 0.0001 | - | | 0.6452 | 2000 | 0.0 | - | | 0.6613 | 2050 | 0.0 | - | | 0.6774 | 2100 | 0.0 | - | | 0.6935 | 2150 | 0.0001 | - | | 0.7097 | 2200 | 0.0 | - | | 0.7258 | 2250 | 0.0 | - | | 0.7419 | 2300 | 0.0001 | - | | 0.7581 | 2350 | 0.0001 | - | | 0.7742 | 2400 | 0.0001 | - | | 0.7903 | 2450 | 0.0 | - | | 0.8065 | 2500 | 0.0 | - | | 0.8226 | 2550 | 0.0 | - | | 0.8387 | 2600 | 0.0 | - | | 0.8548 | 2650 | 0.0001 | - | | 0.8710 | 2700 | 0.0001 | - | | 0.8871 | 2750 | 0.0 | - | | 0.9032 | 2800 | 0.0 | - | | 0.9194 | 2850 | 0.0 | - | | 0.9355 | 2900 | 0.0001 | - | | 0.9516 | 2950 | 0.0 | - | | 0.9677 | 3000 | 0.0001 | - | | 0.9839 | 3050 | 0.0 | - | | 1.0 | 3100 | 0.0 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.3.0+cu121 - Datasets: 2.20.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. A SVC instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit <!-- - **Sentence Transformer:** [Unknown](https://huggingface.co/unknown) --> - **Classification head:** a SVC instance - **Maximum Sequence Length:** 384 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | 0 | <ul><li>'ESG funds often charge many times more for investment funds that are nearly indistinguishable from those without the ESG title.'</li><li>'They are California, Florida, Illinois, Nebraska, New York, and Wyoming.'</li><li>'And so it goes.'</li></ul> | | 1 | <ul><li>'Republicans attempted to pass a resolution that would have enabled Congress to force workers to accept a deal, which was fortunately blocked by (who else) Senator Bernie Sanders.'</li><li>'No government ever surrenders power, even its emergency powers—not really.'</li><li>'No citizen in a democratic society should want executives from $10trn financial institutions to play a larger role than they already do in defining and implementing social values.'</li></ul> | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("SOUMYADEEPSAR/Setfit_random_sample_svm_head") # Run inference preds = model("What could possibly go wrong?") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 3 | 23.4159 | 68 | | Label | Training Sample Count | |:------|:----------------------| | 0 | 136 | | 1 | 78 | ### Training Hyperparameters - batch_size: (8, 8) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0003 | 1 | 0.3597 | - | | 0.0161 | 50 | 0.2693 | - | | 0.0323 | 100 | 0.2501 | - | | 0.0484 | 150 | 0.2691 | - | | 0.0645 | 200 | 0.063 | - | | 0.0806 | 250 | 0.0179 | - | | 0.0968 | 300 | 0.0044 | - | | 0.1129 | 350 | 0.0003 | - | | 0.1290 | 400 | 0.0005 | - | | 0.1452 | 450 | 0.0002 | - | | 0.1613 | 500 | 0.0003 | - | | 0.1774 | 550 | 0.0001 | - | | 0.1935 | 600 | 0.0001 | - | | 0.2097 | 650 | 0.0001 | - | | 0.2258 | 700 | 0.0001 | - | | 0.2419 | 750 | 0.0001 | - | | 0.2581 | 800 | 0.0 | - | | 0.2742 | 850 | 0.0001 | - | | 0.2903 | 900 | 0.0002 | - | | 0.3065 | 950 | 0.0 | - | | 0.3226 | 1000 | 0.0 | - | | 0.3387 | 1050 | 0.0002 | - | | 0.3548 | 1100 | 0.0 | - | | 0.3710 | 1150 | 0.0001 | - | | 0.3871 | 1200 | 0.0001 | - | | 0.4032 | 1250 | 0.0 | - | | 0.4194 | 1300 | 0.0 | - | | 0.4355 | 1350 | 0.0 | - | | 0.4516 | 1400 | 0.0001 | - | | 0.4677 | 1450 | 0.0 | - | | 0.4839 | 1500 | 0.0 | - | | 0.5 | 1550 | 0.0001 | - | | 0.5161 | 1600 | 0.0001 | - | | 0.5323 | 1650 | 0.0 | - | | 0.5484 | 1700 | 0.0 | - | | 0.5645 | 1750 | 0.0 | - | | 0.5806 | 1800 | 0.0 | - | | 0.5968 | 1850 | 0.0 | - | | 0.6129 | 1900 | 0.0 | - | | 0.6290 | 1950 | 0.0001 | - | | 0.6452 | 2000 | 0.0 | - | | 0.6613 | 2050 | 0.0 | - | | 0.6774 | 2100 | 0.0 | - | | 0.6935 | 2150 | 0.0001 | - | | 0.7097 | 2200 | 0.0 | - | | 0.7258 | 2250 | 0.0 | - | | 0.7419 | 2300 | 0.0001 | - | | 0.7581 | 2350 | 0.0001 | - | | 0.7742 | 2400 | 0.0001 | - | | 0.7903 | 2450 | 0.0 | - | | 0.8065 | 2500 | 0.0 | - | | 0.8226 | 2550 | 0.0 | - | | 0.8387 | 2600 | 0.0 | - | | 0.8548 | 2650 | 0.0001 | - | | 0.8710 | 2700 | 0.0001 | - | | 0.8871 | 2750 | 0.0 | - | | 0.9032 | 2800 | 0.0 | - | | 0.9194 | 2850 | 0.0 | - | | 0.9355 | 2900 | 0.0001 | - | | 0.9516 | 2950 | 0.0 | - | | 0.9677 | 3000 | 0.0001 | - | | 0.9839 | 3050 | 0.0 | - | | 1.0 | 3100 | 0.0 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.3.0+cu121 - Datasets: 2.20.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "Now that the baffling, elongated, hyperreal coronation has occurred—no, not that one—and Liz Truss has become Prime Minister, a degree of intervention and action on energy bills has emerged, ahead of the looming socioeconomic catastrophe facing the country this winter."}, {"text": "But it needs to go much further."}, {"text": "What could possibly go wrong?"}, {"text": "If you are White you might feel bad about hurting others or you might feel afraid to lose this privilege….Overcoming White privilege is a job that must start with the White community…."}, {"text": "[JF: Obviously, immigration wasn’t stopped: the current population of the United States is 329.5 million—it passed 300 million in 2006."}], "inference": true}
task
[ "TEXT_CLASSIFICATION" ]
41,393
Helsinki-NLP/opus-mt-en-tw
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "en", "tw", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T11:31:33+00:00
1,124
1
--- license: apache-2.0 tags: - translation --- ### opus-mt-en-tw * source languages: en * target languages: tw * OPUS readme: [en-tw](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-tw/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-tw/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-tw/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-tw/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.tw | 38.2 | 0.577 |
null
Non_BioNLP
### opus-mt-en-tw * source languages: en * target languages: tw * OPUS readme: [en-tw](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/en-tw/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-08.zip](https://object.pouta.csc.fi/OPUS-MT-models/en-tw/opus-2020-01-08.zip) * test set translations: [opus-2020-01-08.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-tw/opus-2020-01-08.test.txt) * test set scores: [opus-2020-01-08.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/en-tw/opus-2020-01-08.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.en.tw | 38.2 | 0.577 |
{"license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
41,394
TwentyNine/nllb-jpn-ain-v1
TwentyNine
translation
[ "transformers", "pytorch", "m2m_100", "text2text-generation", "translation", "ja", "ain", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-03-06T11:18:21Z
2024-03-07T02:07:33+00:00
29
1
--- language: - ja - ain license: cc-by-nc-4.0 pipeline_tag: translation --- # Disclaimer This model is only a preliminary experimental result and is not suitable for any sort of serious use. This model's capability is at best extremely limited and unreliable. That said, look forward to good things to come. This is my debut to the field of Ainu NLP. # Acknowledgements I am indebted to [Michal Ptaszynski](https://huggingface.co/ptaszynski) for his guidance and encouragement, [Karol Nowakowski](https://huggingface.co/karolnowakowski) for his work to compile an expansive parallel corpus, [David Dale](https://huggingface.co/cointegrated) for his [Medium article](https://cointegrated.medium.com/how-to-fine-tune-a-nllb-200-model-for-translating-a-new-language-a37fc706b865) that helped me to quickly and smoothly take this first step. # How to use this model The following is adapted from [slone/nllb-rus-tyv-v1](https://huggingface.co/slone/nllb-rus-tyv-v1). ```Python # the version of transformers is important! !pip install sentencepiece transformers==4.33 import torch from transformers import NllbTokenizer, AutoModelForSeq2SeqLM def fix_tokenizer(tokenizer, new_lang='ain_Latn'): """ Add a new language token to the tokenizer vocabulary (this should be done each time after its initialization) """ old_len = len(tokenizer) - int(new_lang in tokenizer.added_tokens_encoder) tokenizer.lang_code_to_id[new_lang] = old_len-1 tokenizer.id_to_lang_code[old_len-1] = new_lang # always move "mask" to the last position tokenizer.fairseq_tokens_to_ids["<mask>"] = len(tokenizer.sp_model) + len(tokenizer.lang_code_to_id) + tokenizer.fairseq_offset tokenizer.fairseq_tokens_to_ids.update(tokenizer.lang_code_to_id) tokenizer.fairseq_ids_to_tokens = {v: k for k, v in tokenizer.fairseq_tokens_to_ids.items()} if new_lang not in tokenizer._additional_special_tokens: tokenizer._additional_special_tokens.append(new_lang) # clear the added token encoder; otherwise a new token may end up there by mistake tokenizer.added_tokens_encoder = {} tokenizer.added_tokens_decoder = {} MODEL_URL = "TwentyNine/nllb-jpn-ain-v1" model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL) tokenizer = NllbTokenizer.from_pretrained(MODEL_URL) fix_tokenizer(tokenizer) def translate( text, model, tokenizer, src_lang='jpn_Jpan', tgt_lang='ain_Latn', max_length='auto', num_beams=4, n_out=None, **kwargs ): tokenizer.src_lang = src_lang encoded = tokenizer(text, return_tensors="pt", truncation=True, max_length=512) if max_length == 'auto': max_length = int(32 + 2.0 * encoded.input_ids.shape[1]) model.eval() generated_tokens = model.generate( **encoded.to(model.device), forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang], max_length=max_length, num_beams=num_beams, num_return_sequences=n_out or 1, **kwargs ) out = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) if isinstance(text, str) and n_out is None: return out[0] return translate("肉が食べたいな。", model=model, tokenizer=tokenizer) # 'kam c=e rusuy na.' ```
null
Non_BioNLP
# Disclaimer This model is only a preliminary experimental result and is not suitable for any sort of serious use. This model's capability is at best extremely limited and unreliable. That said, look forward to good things to come. This is my debut to the field of Ainu NLP. # Acknowledgements I am indebted to [Michal Ptaszynski](https://huggingface.co/ptaszynski) for his guidance and encouragement, [Karol Nowakowski](https://huggingface.co/karolnowakowski) for his work to compile an expansive parallel corpus, [David Dale](https://huggingface.co/cointegrated) for his [Medium article](https://cointegrated.medium.com/how-to-fine-tune-a-nllb-200-model-for-translating-a-new-language-a37fc706b865) that helped me to quickly and smoothly take this first step. # How to use this model The following is adapted from [slone/nllb-rus-tyv-v1](https://huggingface.co/slone/nllb-rus-tyv-v1). ```Python # the version of transformers is important! !pip install sentencepiece transformers==4.33 import torch from transformers import NllbTokenizer, AutoModelForSeq2SeqLM def fix_tokenizer(tokenizer, new_lang='ain_Latn'): """ Add a new language token to the tokenizer vocabulary (this should be done each time after its initialization) """ old_len = len(tokenizer) - int(new_lang in tokenizer.added_tokens_encoder) tokenizer.lang_code_to_id[new_lang] = old_len-1 tokenizer.id_to_lang_code[old_len-1] = new_lang # always move "mask" to the last position tokenizer.fairseq_tokens_to_ids["<mask>"] = len(tokenizer.sp_model) + len(tokenizer.lang_code_to_id) + tokenizer.fairseq_offset tokenizer.fairseq_tokens_to_ids.update(tokenizer.lang_code_to_id) tokenizer.fairseq_ids_to_tokens = {v: k for k, v in tokenizer.fairseq_tokens_to_ids.items()} if new_lang not in tokenizer._additional_special_tokens: tokenizer._additional_special_tokens.append(new_lang) # clear the added token encoder; otherwise a new token may end up there by mistake tokenizer.added_tokens_encoder = {} tokenizer.added_tokens_decoder = {} MODEL_URL = "TwentyNine/nllb-jpn-ain-v1" model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL) tokenizer = NllbTokenizer.from_pretrained(MODEL_URL) fix_tokenizer(tokenizer) def translate( text, model, tokenizer, src_lang='jpn_Jpan', tgt_lang='ain_Latn', max_length='auto', num_beams=4, n_out=None, **kwargs ): tokenizer.src_lang = src_lang encoded = tokenizer(text, return_tensors="pt", truncation=True, max_length=512) if max_length == 'auto': max_length = int(32 + 2.0 * encoded.input_ids.shape[1]) model.eval() generated_tokens = model.generate( **encoded.to(model.device), forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang], max_length=max_length, num_beams=num_beams, num_return_sequences=n_out or 1, **kwargs ) out = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) if isinstance(text, str) and n_out is None: return out[0] return translate("肉が食べたいな。", model=model, tokenizer=tokenizer) # 'kam c=e rusuy na.' ```
{"language": ["ja", "ain"], "license": "cc-by-nc-4.0", "pipeline_tag": "translation"}
task
[ "TRANSLATION" ]
41,395
Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V6-70B
Saxo
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "ko", "en", "jp", "cn", "dataset:Saxo/ko_cn_translation_tech_social_science_linkbricks_single_dataset", "dataset:Saxo/ko_jp_translation_tech_social_science_linkbricks_single_dataset", "dataset:Saxo/en_ko_translation_tech_science_linkbricks_single_dataset_with_prompt_text_huggingface", "dataset:Saxo/en_ko_translation_social_science_linkbricks_single_dataset_with_prompt_text_huggingface", "dataset:Saxo/ko_aspect_sentiment_sns_mall_sentiment_linkbricks_single_dataset_with_prompt_text_huggingface", "dataset:Saxo/ko_summarization_linkbricks_single_dataset_with_prompt_text_huggingface", "dataset:Saxo/OpenOrca_cleaned_kor_linkbricks_single_dataset_with_prompt_text_huggingface", "dataset:Saxo/ko_government_qa_total_linkbricks_single_dataset_with_prompt_text_huggingface_sampled", "dataset:Saxo/ko-news-corpus-1", "dataset:Saxo/ko-news-corpus-2", "dataset:Saxo/ko-news-corpus-3", "dataset:Saxo/ko-news-corpus-4", "dataset:Saxo/ko-news-corpus-5", "dataset:Saxo/ko-news-corpus-6", "dataset:Saxo/ko-news-corpus-7", "dataset:Saxo/ko-news-corpus-8", "dataset:Saxo/ko-news-corpus-9", "dataset:maywell/ko_Ultrafeedback_binarized", "dataset:youjunhyeok/ko-orca-pair-and-ultrafeedback-dpo", "dataset:lilacai/glaive-function-calling-v2-sharegpt", "dataset:kuotient/gsm8k-ko", "base_model:Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B", "base_model:finetune:Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-12-26T07:06:30Z
2024-12-26T11:53:54+00:00
25
0
--- base_model: Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B datasets: - Saxo/ko_cn_translation_tech_social_science_linkbricks_single_dataset - Saxo/ko_jp_translation_tech_social_science_linkbricks_single_dataset - Saxo/en_ko_translation_tech_science_linkbricks_single_dataset_with_prompt_text_huggingface - Saxo/en_ko_translation_social_science_linkbricks_single_dataset_with_prompt_text_huggingface - Saxo/ko_aspect_sentiment_sns_mall_sentiment_linkbricks_single_dataset_with_prompt_text_huggingface - Saxo/ko_summarization_linkbricks_single_dataset_with_prompt_text_huggingface - Saxo/OpenOrca_cleaned_kor_linkbricks_single_dataset_with_prompt_text_huggingface - Saxo/ko_government_qa_total_linkbricks_single_dataset_with_prompt_text_huggingface_sampled - Saxo/ko-news-corpus-1 - Saxo/ko-news-corpus-2 - Saxo/ko-news-corpus-3 - Saxo/ko-news-corpus-4 - Saxo/ko-news-corpus-5 - Saxo/ko-news-corpus-6 - Saxo/ko-news-corpus-7 - Saxo/ko-news-corpus-8 - Saxo/ko-news-corpus-9 - maywell/ko_Ultrafeedback_binarized - youjunhyeok/ko-orca-pair-and-ultrafeedback-dpo - lilacai/glaive-function-calling-v2-sharegpt - kuotient/gsm8k-ko language: - ko - en - jp - cn library_name: transformers license: apache-2.0 pipeline_tag: text-generation --- # Model Card for Model ID <div align="center"> <img src="http://www.linkbricks.com/wp-content/uploads/2024/11/fulllogo.png" /> </div> AIとビッグデータ分析の専門企業であるLinkbricksのデータサイエンティストであるジ・ユンソン(Saxo)ディレクターが <br> Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70Bベースモデルを使用し、H100-80G 8個を通じて約 日本語 SFT->DPO した日本語強化言語モデル。<br> 3千万件の日本ニュース及びウィキコーパスを基準に、様々なタスク別の日本語・韓国語・中国語・英語クロス学習データと数学及び論理判断データを通じて、日中韓英言語クロス補強処理と複雑な論理問題にも対応できるように訓練したモデルです。 -トークナイザーは、単語拡張なしでベースモデルのまま使用します。<br> -カスタマーレビューやソーシャル投稿の高次元分析及びコーディングとライティング、数学、論理判断などが強化されたモデル。<br> -128k-Context Window<br> -Function Call<br> -128k-Context Window<br> -Deepspeed Stage=3、rslora及びBAdam Layer Modeを使用 <br> -「transformers_version」: 「4.46.3」<br> <br><br> AI 와 빅데이터 분석 전문 기업인 Linkbricks의 데이터사이언티스트인 지윤성(Saxo) 이사가 <br> Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B 베이스모델을 사용해서 H100-80G 8개를 통해 일본어 SFT->DPO 한 일본어 강화 언어 모델<br> 3천만건의 일본 뉴스 및 위키 코퍼스를 기준으로 다양한 테스크별 일본어-한국어-중국어-영어 교차 학습 데이터와 수학 및 논리판단 데이터를 통하여 한중일영 언어 교차 증강 처리와 복잡한 논리 문제 역시 대응 가능하도록 훈련한 모델이다.<br> -토크나이저는 단어 확장 없이 베이스 모델 그대로 사용<br> -고객 리뷰나 소셜 포스팅 고차원 분석 및 코딩과 작문, 수학, 논리판단 등이 강화된 모델<br> -128k-Context Window<br> -Function Call 및 Tool Calling 지원<br> -128k-Context Window<br> -Deepspeed Stage=3, rslora 및 BAdam Layer Mode 사용 <br> -"transformers_version": "4.46.3"<br> <br><br> Finetuned by Mr. Yunsung Ji (Saxo), a data scientist at Linkbricks, a company specializing in AI and big data analytics <br> Japanese SFT->DPO training model based on Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B through 8 H100-80Gs as a Japanese boosting language model <br> It is a model that has been trained to handle Japanese-Korean-Chinese-English cross-training data and 30M Japanese news corpus and logic judgment data for various tasks to enable cross-fertilization processing and complex Korean logic & math problems. <br> -Tokenizer uses the base model without word expansion<br> -Models enhanced with high-dimensional analysis of customer reviews and social posts, as well as coding, writing, math and decision making<br> -Function Calling<br> -128k-Context Window<br> -Deepspeed Stage=3, use rslora and BAdam Layer Mode<br> <br><br> <a href="www.linkbricks.com">www.linkbricks.com</a>, <a href="www.linkbricks.vc">www.linkbricks.vc</a>
null
Non_BioNLP
# Model Card for Model ID <div align="center"> <img src="http://www.linkbricks.com/wp-content/uploads/2024/11/fulllogo.png" /> </div> AIとビッグデータ分析の専門企業であるLinkbricksのデータサイエンティストであるジ・ユンソン(Saxo)ディレクターが <br> Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70Bベースモデルを使用し、H100-80G 8個を通じて約 日本語 SFT->DPO した日本語強化言語モデル。<br> 3千万件の日本ニュース及びウィキコーパスを基準に、様々なタスク別の日本語・韓国語・中国語・英語クロス学習データと数学及び論理判断データを通じて、日中韓英言語クロス補強処理と複雑な論理問題にも対応できるように訓練したモデルです。 -トークナイザーは、単語拡張なしでベースモデルのまま使用します。<br> -カスタマーレビューやソーシャル投稿の高次元分析及びコーディングとライティング、数学、論理判断などが強化されたモデル。<br> -128k-Context Window<br> -Function Call<br> -128k-Context Window<br> -Deepspeed Stage=3、rslora及びBAdam Layer Modeを使用 <br> -「transformers_version」: 「4.46.3」<br> <br><br> AI 와 빅데이터 분석 전문 기업인 Linkbricks의 데이터사이언티스트인 지윤성(Saxo) 이사가 <br> Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B 베이스모델을 사용해서 H100-80G 8개를 통해 일본어 SFT->DPO 한 일본어 강화 언어 모델<br> 3천만건의 일본 뉴스 및 위키 코퍼스를 기준으로 다양한 테스크별 일본어-한국어-중국어-영어 교차 학습 데이터와 수학 및 논리판단 데이터를 통하여 한중일영 언어 교차 증강 처리와 복잡한 논리 문제 역시 대응 가능하도록 훈련한 모델이다.<br> -토크나이저는 단어 확장 없이 베이스 모델 그대로 사용<br> -고객 리뷰나 소셜 포스팅 고차원 분석 및 코딩과 작문, 수학, 논리판단 등이 강화된 모델<br> -128k-Context Window<br> -Function Call 및 Tool Calling 지원<br> -128k-Context Window<br> -Deepspeed Stage=3, rslora 및 BAdam Layer Mode 사용 <br> -"transformers_version": "4.46.3"<br> <br><br> Finetuned by Mr. Yunsung Ji (Saxo), a data scientist at Linkbricks, a company specializing in AI and big data analytics <br> Japanese SFT->DPO training model based on Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B through 8 H100-80Gs as a Japanese boosting language model <br> It is a model that has been trained to handle Japanese-Korean-Chinese-English cross-training data and 30M Japanese news corpus and logic judgment data for various tasks to enable cross-fertilization processing and complex Korean logic & math problems. <br> -Tokenizer uses the base model without word expansion<br> -Models enhanced with high-dimensional analysis of customer reviews and social posts, as well as coding, writing, math and decision making<br> -Function Calling<br> -128k-Context Window<br> -Deepspeed Stage=3, use rslora and BAdam Layer Mode<br> <br><br> <a href="www.linkbricks.com">www.linkbricks.com</a>, <a href="www.linkbricks.vc">www.linkbricks.vc</a>
{"base_model": "Saxo/Linkbricks-Horizon-AI-Japanese-Superb-V5-70B", "datasets": ["Saxo/ko_cn_translation_tech_social_science_linkbricks_single_dataset", "Saxo/ko_jp_translation_tech_social_science_linkbricks_single_dataset", "Saxo/en_ko_translation_tech_science_linkbricks_single_dataset_with_prompt_text_huggingface", "Saxo/en_ko_translation_social_science_linkbricks_single_dataset_with_prompt_text_huggingface", "Saxo/ko_aspect_sentiment_sns_mall_sentiment_linkbricks_single_dataset_with_prompt_text_huggingface", "Saxo/ko_summarization_linkbricks_single_dataset_with_prompt_text_huggingface", "Saxo/OpenOrca_cleaned_kor_linkbricks_single_dataset_with_prompt_text_huggingface", "Saxo/ko_government_qa_total_linkbricks_single_dataset_with_prompt_text_huggingface_sampled", "Saxo/ko-news-corpus-1", "Saxo/ko-news-corpus-2", "Saxo/ko-news-corpus-3", "Saxo/ko-news-corpus-4", "Saxo/ko-news-corpus-5", "Saxo/ko-news-corpus-6", "Saxo/ko-news-corpus-7", "Saxo/ko-news-corpus-8", "Saxo/ko-news-corpus-9", "maywell/ko_Ultrafeedback_binarized", "youjunhyeok/ko-orca-pair-and-ultrafeedback-dpo", "lilacai/glaive-function-calling-v2-sharegpt", "kuotient/gsm8k-ko"], "language": ["ko", "en", "jp", "cn"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "text-generation"}
task
[ "TRANSLATION", "SUMMARIZATION" ]
41,396
avans06/madlad400-7b-mt-bt-ct2-int8_float16
avans06
null
[ "transformers", "safetensors", "ctranslate2", "quantization", "int8", "float16", "madlad400", "multilingual", "en", "ru", "es", "fr", "de", "it", "pt", "pl", "nl", "vi", "tr", "sv", "id", "ro", "cs", "zh", "hu", "ja", "th", "fi", "fa", "uk", "da", "el", "no", "bg", "sk", "ko", "ar", "lt", "ca", "sl", "he", "et", "lv", "hi", "sq", "ms", "az", "sr", "ta", "hr", "kk", "is", "ml", "mr", "te", "af", "gl", "fil", "be", "mk", "eu", "bn", "ka", "mn", "bs", "uz", "ur", "sw", "yue", "ne", "kn", "kaa", "gu", "si", "cy", "eo", "la", "hy", "ky", "tg", "ga", "mt", "my", "km", "tt", "so", "ku", "ps", "pa", "rw", "lo", "ha", "dv", "fy", "lb", "ckb", "mg", "gd", "am", "ug", "ht", "grc", "hmn", "sd", "jv", "mi", "tk", "ceb", "yi", "ba", "fo", "or", "xh", "su", "kl", "ny", "sm", "sn", "co", "zu", "ig", "yo", "pap", "st", "haw", "as", "oc", "cv", "lus", "tet", "gsw", "sah", "br", "rm", "sa", "bo", "om", "se", "ce", "cnh", "ilo", "hil", "udm", "os", "lg", "ti", "vec", "ts", "tyv", "kbd", "ee", "iba", "av", "kha", "to", "tn", "nso", "fj", "zza", "ak", "ada", "otq", "dz", "bua", "cfm", "ln", "chm", "gn", "krc", "wa", "hif", "yua", "srn", "war", "rom", "bik", "pam", "sg", "lu", "ady", "kbp", "syr", "ltg", "myv", "iso", "kac", "bho", "ay", "kum", "qu", "za", "pag", "ngu", "ve", "pck", "zap", "tyz", "hui", "bbc", "tzo", "tiv", "ksd", "gom", "min", "ang", "nhe", "bgp", "nzi", "nnb", "nv", "zxx", "bci", "kv", "new", "mps", "alt", "meu", "bew", "fon", "iu", "abt", "mgh", "mnw", "tvl", "dov", "tlh", "ho", "kw", "mrj", "meo", "crh", "mbt", "emp", "ace", "ium", "mam", "gym", "mai", "crs", "pon", "ubu", "fip", "quc", "gv", "kj", "btx", "ape", "chk", "rcf", "shn", "tzh", "mdf", "ppk", "ss", "gag", "cab", "kri", "seh", "ibb", "tbz", "bru", "enq", "ach", "cuk", "kmb", "wo", "kek", "qub", "tab", "bts", "kos", "rwo", "cak", "tuc", "bum", "cjk", "gil", "stq", "tsg", "quh", "mak", "arn", "ban", "jiv", "sja", "yap", "tcy", "toj", "twu", "xal", "amu", "rmc", "hus", "nia", "kjh", "bm", "guh", "mas", "acf", "dtp", "ksw", "bzj", "din", "zne", "mad", "msi", "mag", "mkn", "kg", "lhu", "ch", "qvi", "mh", "djk", "sus", "mfe", "srm", "dyu", "ctu", "gui", "pau", "inb", "bi", "mni", "guc", "jam", "wal", "jac", "bas", "gor", "skr", "nyu", "noa", "sda", "gub", "nog", "cni", "teo", "tdx", "sxn", "rki", "nr", "frp", "alz", "taj", "lrc", "cce", "rn", "jvn", "hvn", "nij", "dwr", "izz", "msm", "bus", "ktu", "chr", "maz", "tzj", "suz", "knj", "bim", "gvl", "bqc", "tca", "pis", "prk", "laj", "mel", "qxr", "niq", "ahk", "shp", "hne", "spp", "koi", "krj", "quf", "luz", "agr", "tsc", "mqy", "gof", "gbm", "miq", "dje", "awa", "bjj", "qvz", "sjp", "tll", "raj", "kjg", "bgz", "quy", "cbk", "akb", "oj", "ify", "mey", "ks", "cac", "brx", "qup", "syl", "jax", "ff", "ber", "tks", "trp", "mrw", "adh", "smt", "srr", "ffm", "qvc", "mtr", "ann", "aa", "noe", "nut", "gyn", "kwi", "xmm", "msb", "arxiv:2309.04662", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2023-12-24T13:11:00Z
2023-12-24T14:52:20+00:00
11
2
--- language: - multilingual - en - ru - es - fr - de - it - pt - pl - nl - vi - tr - sv - id - ro - cs - zh - hu - ja - th - fi - fa - uk - da - el - 'no' - bg - sk - ko - ar - lt - ca - sl - he - et - lv - hi - sq - ms - az - sr - ta - hr - kk - is - ml - mr - te - af - gl - fil - be - mk - eu - bn - ka - mn - bs - uz - ur - sw - yue - ne - kn - kaa - gu - si - cy - eo - la - hy - ky - tg - ga - mt - my - km - tt - so - ku - ps - pa - rw - lo - ha - dv - fy - lb - ckb - mg - gd - am - ug - ht - grc - hmn - sd - jv - mi - tk - ceb - yi - ba - fo - or - xh - su - kl - ny - sm - sn - co - zu - ig - yo - pap - st - haw - as - oc - cv - lus - tet - gsw - sah - br - rm - sa - bo - om - se - ce - cnh - ilo - hil - udm - os - lg - ti - vec - ts - tyv - kbd - ee - iba - av - kha - to - tn - nso - fj - zza - ak - ada - otq - dz - bua - cfm - ln - chm - gn - krc - wa - hif - yua - srn - war - rom - bik - pam - sg - lu - ady - kbp - syr - ltg - myv - iso - kac - bho - ay - kum - qu - za - pag - ngu - ve - pck - zap - tyz - hui - bbc - tzo - tiv - ksd - gom - min - ang - nhe - bgp - nzi - nnb - nv - zxx - bci - kv - new - mps - alt - meu - bew - fon - iu - abt - mgh - mnw - tvl - dov - tlh - ho - kw - mrj - meo - crh - mbt - emp - ace - ium - mam - gym - mai - crs - pon - ubu - fip - quc - gv - kj - btx - ape - chk - rcf - shn - tzh - mdf - ppk - ss - gag - cab - kri - seh - ibb - tbz - bru - enq - ach - cuk - kmb - wo - kek - qub - tab - bts - kos - rwo - cak - tuc - bum - cjk - gil - stq - tsg - quh - mak - arn - ban - jiv - sja - yap - tcy - toj - twu - xal - amu - rmc - hus - nia - kjh - bm - guh - mas - acf - dtp - ksw - bzj - din - zne - mad - msi - mag - mkn - kg - lhu - ch - qvi - mh - djk - sus - mfe - srm - dyu - ctu - gui - pau - inb - bi - mni - guc - jam - wal - jac - bas - gor - skr - nyu - noa - sda - gub - nog - cni - teo - tdx - sxn - rki - nr - frp - alz - taj - lrc - cce - rn - jvn - hvn - nij - dwr - izz - msm - bus - ktu - chr - maz - tzj - suz - knj - bim - gvl - bqc - tca - pis - prk - laj - mel - qxr - niq - ahk - shp - hne - spp - koi - krj - quf - luz - agr - tsc - mqy - gof - gbm - miq - dje - awa - bjj - qvz - sjp - tll - raj - kjg - bgz - quy - cbk - akb - oj - ify - mey - ks - cac - brx - qup - syl - jax - ff - ber - tks - trp - mrw - adh - smt - srr - ffm - qvc - mtr - ann - kaa - aa - noe - nut - gyn - kwi - xmm - msb license: apache-2.0 tags: - ctranslate2 - quantization - int8 - float16 - madlad400 --- # madlad400-7b-mt-bt model for CTranslate2 **The model is quantized version of the [jbochi/madlad400-7b-mt-bt](https://huggingface.co/jbochi/madlad400-7b-mt-bt) with int8_float16 quantization and can be used in [CTranslate2](https://github.com/OpenNMT/CTranslate2).** **madlad400 is a multilingual machine translation model based on the T5 architecture introduced by Google DeepMind, Google Research in Sep 2023. It was trained on 250 billion tokens covering over 450 languages using publicly available data. The paper is titled "MADLAD-400: A Multilingual And Document-Level Large Audited Dataset" ([arXiv:2309.04662](https://arxiv.org/abs/2309.04662)).** **madlad400-7b-mt-bt is finetuned version of the 7.2B parameter model on backtranslated data. Authors say in the [paper](https://arxiv.org/pdf/2309.04662.pdf) that:** > While this setup is very likely sub-optimal, we see that back-translation > greatly improves en2xx translation (by 3.0 chrf, in the case of Flores-200) in most cases. ## Conversion details The original model was converted on 2023-12 with the following command: ``` ct2-transformers-converter --model jbochi/madlad400-7b-mt-bt --quantization int8_float16 --output_dir madlad400-7b-mt-bt-ct2-int8_float16 \ --copy_files added_tokens.json generation_config.json model.safetensors.index.json shared_vocabulary.json special_tokens_map.json spiece.model tokenizer.json tokenizer_config.json ``` ## Example This example code is obtained from [CTranslate2_transformers](https://opennmt.net/CTranslate2/guides/transformers.html#t5). More detailed information about the `translate_batch` methon can be found at [CTranslate2_Translator.translate_batch](https://opennmt.net/CTranslate2/python/ctranslate2.Translator.html#ctranslate2.Translator.translate_batch). ```python import ctranslate2 import transformers translator = ctranslate2.Translator("avans06/madlad400-7b-mt-bt-ct2-int8_float16", compute_type="auto") tokenizer = transformers.AutoTokenizer.from_pretrained("jbochi/madlad400-7b-mt-bt") prefix = "<2zh> " input_text = "Who is Alan Turing?" input_tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(prefix + input_text)) results = translator.translate_batch([input_tokens]) output_tokens = results[0].hypotheses[0] output_text = tokenizer.decode(tokenizer.convert_tokens_to_ids(output_tokens)) ```
null
Non_BioNLP
# madlad400-7b-mt-bt model for CTranslate2 **The model is quantized version of the [jbochi/madlad400-7b-mt-bt](https://huggingface.co/jbochi/madlad400-7b-mt-bt) with int8_float16 quantization and can be used in [CTranslate2](https://github.com/OpenNMT/CTranslate2).** **madlad400 is a multilingual machine translation model based on the T5 architecture introduced by Google DeepMind, Google Research in Sep 2023. It was trained on 250 billion tokens covering over 450 languages using publicly available data. The paper is titled "MADLAD-400: A Multilingual And Document-Level Large Audited Dataset" ([arXiv:2309.04662](https://arxiv.org/abs/2309.04662)).** **madlad400-7b-mt-bt is finetuned version of the 7.2B parameter model on backtranslated data. Authors say in the [paper](https://arxiv.org/pdf/2309.04662.pdf) that:** > While this setup is very likely sub-optimal, we see that back-translation > greatly improves en2xx translation (by 3.0 chrf, in the case of Flores-200) in most cases. ## Conversion details The original model was converted on 2023-12 with the following command: ``` ct2-transformers-converter --model jbochi/madlad400-7b-mt-bt --quantization int8_float16 --output_dir madlad400-7b-mt-bt-ct2-int8_float16 \ --copy_files added_tokens.json generation_config.json model.safetensors.index.json shared_vocabulary.json special_tokens_map.json spiece.model tokenizer.json tokenizer_config.json ``` ## Example This example code is obtained from [CTranslate2_transformers](https://opennmt.net/CTranslate2/guides/transformers.html#t5). More detailed information about the `translate_batch` methon can be found at [CTranslate2_Translator.translate_batch](https://opennmt.net/CTranslate2/python/ctranslate2.Translator.html#ctranslate2.Translator.translate_batch). ```python import ctranslate2 import transformers translator = ctranslate2.Translator("avans06/madlad400-7b-mt-bt-ct2-int8_float16", compute_type="auto") tokenizer = transformers.AutoTokenizer.from_pretrained("jbochi/madlad400-7b-mt-bt") prefix = "<2zh> " input_text = "Who is Alan Turing?" input_tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(prefix + input_text)) results = translator.translate_batch([input_tokens]) output_tokens = results[0].hypotheses[0] output_text = tokenizer.decode(tokenizer.convert_tokens_to_ids(output_tokens)) ```
{"language": ["multilingual", "en", "ru", "es", "fr", "de", "it", "pt", "pl", "nl", "vi", "tr", "sv", "id", "ro", "cs", "zh", "hu", "ja", "th", "fi", "fa", "uk", "da", "el", "no", "bg", "sk", "ko", "ar", "lt", "ca", "sl", "he", "et", "lv", "hi", "sq", "ms", "az", "sr", "ta", "hr", "kk", "is", "ml", "mr", "te", "af", "gl", "fil", "be", "mk", "eu", "bn", "ka", "mn", "bs", "uz", "ur", "sw", "yue", "ne", "kn", "kaa", "gu", "si", "cy", "eo", "la", "hy", "ky", "tg", "ga", "mt", "my", "km", "tt", "so", "ku", "ps", "pa", "rw", "lo", "ha", "dv", "fy", "lb", "ckb", "mg", "gd", "am", "ug", "ht", "grc", "hmn", "sd", "jv", "mi", "tk", "ceb", "yi", "ba", "fo", "or", "xh", "su", "kl", "ny", "sm", "sn", "co", "zu", "ig", "yo", "pap", "st", "haw", "as", "oc", "cv", "lus", "tet", "gsw", "sah", "br", "rm", "sa", "bo", "om", "se", "ce", "cnh", "ilo", "hil", "udm", "os", "lg", "ti", "vec", "ts", "tyv", "kbd", "ee", "iba", "av", "kha", "to", "tn", "nso", "fj", "zza", "ak", "ada", "otq", "dz", "bua", "cfm", "ln", "chm", "gn", "krc", "wa", "hif", "yua", "srn", "war", "rom", "bik", "pam", "sg", "lu", "ady", "kbp", "syr", "ltg", "myv", "iso", "kac", "bho", "ay", "kum", "qu", "za", "pag", "ngu", "ve", "pck", "zap", "tyz", "hui", "bbc", "tzo", "tiv", "ksd", "gom", "min", "ang", "nhe", "bgp", "nzi", "nnb", "nv", "zxx", "bci", "kv", "new", "mps", "alt", "meu", "bew", "fon", "iu", "abt", "mgh", "mnw", "tvl", "dov", "tlh", "ho", "kw", "mrj", "meo", "crh", "mbt", "emp", "ace", "ium", "mam", "gym", "mai", "crs", "pon", "ubu", "fip", "quc", "gv", "kj", "btx", "ape", "chk", "rcf", "shn", "tzh", "mdf", "ppk", "ss", "gag", "cab", "kri", "seh", "ibb", "tbz", "bru", "enq", "ach", "cuk", "kmb", "wo", "kek", "qub", "tab", "bts", "kos", "rwo", "cak", "tuc", "bum", "cjk", "gil", "stq", "tsg", "quh", "mak", "arn", "ban", "jiv", "sja", "yap", "tcy", "toj", "twu", "xal", "amu", "rmc", "hus", "nia", "kjh", "bm", "guh", "mas", "acf", "dtp", "ksw", "bzj", "din", "zne", "mad", "msi", "mag", "mkn", "kg", "lhu", "ch", "qvi", "mh", "djk", "sus", "mfe", "srm", "dyu", "ctu", "gui", "pau", "inb", "bi", "mni", "guc", "jam", "wal", "jac", "bas", "gor", "skr", "nyu", "noa", "sda", "gub", "nog", "cni", "teo", "tdx", "sxn", "rki", "nr", "frp", "alz", "taj", "lrc", "cce", "rn", "jvn", "hvn", "nij", "dwr", "izz", "msm", "bus", "ktu", "chr", "maz", "tzj", "suz", "knj", "bim", "gvl", "bqc", "tca", "pis", "prk", "laj", "mel", "qxr", "niq", "ahk", "shp", "hne", "spp", "koi", "krj", "quf", "luz", "agr", "tsc", "mqy", "gof", "gbm", "miq", "dje", "awa", "bjj", "qvz", "sjp", "tll", "raj", "kjg", "bgz", "quy", "cbk", "akb", "oj", "ify", "mey", "ks", "cac", "brx", "qup", "syl", "jax", "ff", "ber", "tks", "trp", "mrw", "adh", "smt", "srr", "ffm", "qvc", "mtr", "ann", "kaa", "aa", "noe", "nut", "gyn", "kwi", "xmm", "msb"], "license": "apache-2.0", "tags": ["ctranslate2", "quantization", "int8", "float16", "madlad400"]}
task
[ "TRANSLATION" ]
41,397
mradermacher/airoboros-110b-3.3-GGUF
mradermacher
null
[ "transformers", "gguf", "en", "dataset:jondurbin/airoboros-3.2", "dataset:bluemoon-fandom-1-1-rp-cleaned", "dataset:boolq", "dataset:jondurbin/gutenberg-dpo-v0.1", "dataset:LDJnr/Capybara", "dataset:jondurbin/cinematika-v0.1", "dataset:glaiveai/glaive-function-calling-v2", "dataset:grimulkan/LimaRP-augmented", "dataset:piqa", "dataset:Vezora/Tested-22k-Python-Alpaca", "dataset:mattpscott/airoboros-summarization", "dataset:unalignment/toxic-dpo-v0.2", "base_model:jondurbin/airoboros-110b-3.3", "base_model:quantized:jondurbin/airoboros-110b-3.3", "license:other", "endpoints_compatible", "region:us", "conversational" ]
2024-12-03T06:00:05Z
2025-03-13T18:13:37+00:00
47
0
--- base_model: jondurbin/airoboros-110b-3.3 datasets: - jondurbin/airoboros-3.2 - bluemoon-fandom-1-1-rp-cleaned - boolq - jondurbin/gutenberg-dpo-v0.1 - LDJnr/Capybara - jondurbin/cinematika-v0.1 - glaiveai/glaive-function-calling-v2 - grimulkan/LimaRP-augmented - piqa - Vezora/Tested-22k-Python-Alpaca - mattpscott/airoboros-summarization - unalignment/toxic-dpo-v0.2 language: - en library_name: transformers license: other license_name: tongyi-qianwen license_link: https://huggingface.co/Qwen/Qwen1.5-110B/blob/main/LICENSE quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/jondurbin/airoboros-110b-3.3 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/airoboros-110b-3.3-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q2_K.gguf) | Q2_K | 41.3 | | | [GGUF](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_S.gguf) | Q3_K_S | 48.6 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_M.gguf.part2of2) | Q3_K_M | 53.8 | lower quality | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_L.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_L.gguf.part2of2) | Q3_K_L | 58.2 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.IQ4_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.IQ4_XS.gguf.part2of2) | IQ4_XS | 60.3 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_S.gguf.part2of2) | Q4_K_S | 63.6 | fast, recommended | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_M.gguf.part2of2) | Q4_K_M | 67.3 | fast, recommended | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_S.gguf.part2of2) | Q5_K_S | 76.7 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_M.gguf.part2of2) | Q5_K_M | 78.9 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q6_K.gguf.part2of2) | Q6_K | 91.3 | very good quality | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q8_0.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q8_0.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q8_0.gguf.part3of3) | Q8_0 | 118.3 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
null
Non_BioNLP
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/jondurbin/airoboros-110b-3.3 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/airoboros-110b-3.3-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q2_K.gguf) | Q2_K | 41.3 | | | [GGUF](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_S.gguf) | Q3_K_S | 48.6 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_M.gguf.part2of2) | Q3_K_M | 53.8 | lower quality | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_L.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q3_K_L.gguf.part2of2) | Q3_K_L | 58.2 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.IQ4_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.IQ4_XS.gguf.part2of2) | IQ4_XS | 60.3 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_S.gguf.part2of2) | Q4_K_S | 63.6 | fast, recommended | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q4_K_M.gguf.part2of2) | Q4_K_M | 67.3 | fast, recommended | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_S.gguf.part2of2) | Q5_K_S | 76.7 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q5_K_M.gguf.part2of2) | Q5_K_M | 78.9 | | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q6_K.gguf.part2of2) | Q6_K | 91.3 | very good quality | | [PART 1](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q8_0.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q8_0.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/airoboros-110b-3.3-GGUF/resolve/main/airoboros-110b-3.3.Q8_0.gguf.part3of3) | Q8_0 | 118.3 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
{"base_model": "jondurbin/airoboros-110b-3.3", "datasets": ["jondurbin/airoboros-3.2", "bluemoon-fandom-1-1-rp-cleaned", "boolq", "jondurbin/gutenberg-dpo-v0.1", "LDJnr/Capybara", "jondurbin/cinematika-v0.1", "glaiveai/glaive-function-calling-v2", "grimulkan/LimaRP-augmented", "piqa", "Vezora/Tested-22k-Python-Alpaca", "mattpscott/airoboros-summarization", "unalignment/toxic-dpo-v0.2"], "language": ["en"], "library_name": "transformers", "license": "other", "license_name": "tongyi-qianwen", "license_link": "https://huggingface.co/Qwen/Qwen1.5-110B/blob/main/LICENSE", "quantized_by": "mradermacher"}
task
[ "SUMMARIZATION" ]
41,398
Casual-Autopsy/vntl-7b-v0.3.1-gguf
Casual-Autopsy
translation
[ "gguf", "translation", "en", "ja", "dataset:lmg-anon/VNTL-v2.5-1k", "license:llama2", "endpoints_compatible", "region:us" ]
2024-04-18T12:47:58Z
2024-05-24T14:36:58+00:00
22
0
--- datasets: - lmg-anon/VNTL-v2.5-1k language: - en - ja license: llama2 pipeline_tag: translation --- Q6_K quant of [lmg-anon/vntl-7b-v0.3.1-hf](https://huggingface.co/lmg-anon/vntl-7b-v0.3.1-hf) SillyTavern presets are provided. This repository contains some GGUF quantizations of the merge of the [experimental VNTL v0.3.1 lora](https://huggingface.co/lmg-anon/vntl-7b-v0.3.1-lora). This is a prompt example: ``` <<START>> Name: Uryuu Shingo (瓜生 新吾) | Gender: Male | Aliases: Onii-chan (お兄ちゃん) Name: Uryuu Sakuno (瓜生 桜乃) | Gender: Female <<JAPANESE>> [桜乃]: 『……ごめん』 <<ENGLISH>> (fidelity = absolute) [Sakuno]: 『... Sorry.』</s> <<JAPANESE>> [新吾]: 「ううん、こう言っちゃなんだけど、迷子でよかったよ。桜乃は可愛いから、いろいろ心配しちゃってたんだぞ俺」 <<ENGLISH>> (fidelity = high) ``` The generated translation for that prompt, with temperature 0, is: ``` [Shingo]: 「No, don't apologize. I'm just glad you're safe. You're so cute, Sakuno, I was worried sick.」 ```
null
Non_BioNLP
Q6_K quant of [lmg-anon/vntl-7b-v0.3.1-hf](https://huggingface.co/lmg-anon/vntl-7b-v0.3.1-hf) SillyTavern presets are provided. This repository contains some GGUF quantizations of the merge of the [experimental VNTL v0.3.1 lora](https://huggingface.co/lmg-anon/vntl-7b-v0.3.1-lora). This is a prompt example: ``` <<START>> Name: Uryuu Shingo (瓜生 新吾) | Gender: Male | Aliases: Onii-chan (お兄ちゃん) Name: Uryuu Sakuno (瓜生 桜乃) | Gender: Female <<JAPANESE>> [桜乃]: 『……ごめん』 <<ENGLISH>> (fidelity = absolute) [Sakuno]: 『... Sorry.』</s> <<JAPANESE>> [新吾]: 「ううん、こう言っちゃなんだけど、迷子でよかったよ。桜乃は可愛いから、いろいろ心配しちゃってたんだぞ俺」 <<ENGLISH>> (fidelity = high) ``` The generated translation for that prompt, with temperature 0, is: ``` [Shingo]: 「No, don't apologize. I'm just glad you're safe. You're so cute, Sakuno, I was worried sick.」 ```
{"datasets": ["lmg-anon/VNTL-v2.5-1k"], "language": ["en", "ja"], "license": "llama2", "pipeline_tag": "translation"}
task
[ "TRANSLATION" ]
41,400
Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-6_0bpw_exl2
Zoyd
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "Llama-3", "instruct", "finetune", "chatml", "DPO", "RLHF", "gpt4", "synthetic data", "distillation", "function calling", "json mode", "axolotl", "merges", "conversational", "en", "dataset:teknium/OpenHermes-2.5", "base_model:NousResearch/Hermes-2-Pro-Llama-3-8B", "base_model:quantized:NousResearch/Hermes-2-Pro-Llama-3-8B", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "6-bit", "exl2", "region:us" ]
2024-05-26T09:50:54Z
2024-05-26T10:11:31+00:00
9
0
--- base_model: NousResearch/Hermes-2-Pro-Llama-3-8B datasets: - teknium/OpenHermes-2.5 language: - en license: apache-2.0 tags: - Llama-3 - instruct - finetune - chatml - DPO - RLHF - gpt4 - synthetic data - distillation - function calling - json mode - axolotl - merges widget: - example_title: Hermes 2 Pro Llama-3 Instruct Merge messages: - role: system content: You are a sentient, superintelligent artificial general intelligence, here to teach and assist me. - role: user content: Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world. model-index: - name: Hermes-2-Pro-Llama-3-Instruct-8B-Merge results: [] --- **Exllamav2** quant (**exl2** / **6.0 bpw**) made with ExLlamaV2 v0.0.21 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-2_2bpw_exl2)**</center> | <center>3250 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-2_5bpw_exl2)**</center> | <center>3479 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-3_0bpw_exl2)**</center> | <center>3895 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-3_5bpw_exl2)**</center> | <center>4311 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-3_75bpw_exl2)**</center> | <center>4519 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-4_0bpw_exl2)**</center> | <center>4726 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-4_25bpw_exl2)**</center> | <center>4933 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-5_0bpw_exl2)**</center> | <center>5558 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-6_0bpw_exl2)**</center> | <center>6497 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-6_5bpw_exl2)**</center> | <center>6910 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-8_0bpw_exl2)**</center> | <center>8049 MB</center> | <center>8</center> | # - Hermes-2 Θ Llama-3 8B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/HQnQmNM1L3KXGhp0wUzHH.png) ## Model Description Hermes-2 Θ (Theta) is the first experimental merged model released by [Nous Research](https://nousresearch.com/), in collaboration with Charles Goddard at [Arcee](https://www.arcee.ai/), the team behind MergeKit. Hermes-2 Θ is a merged and then further RLHF'ed version our excellent Hermes 2 Pro model and Meta's Llama-3 Instruct model to form a new model, Hermes-2 Θ, combining the best of both worlds of each model. ## Example Outputs ### Create New Mythos: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/dvKhnSvHdx4nTQIqB9Lpv.png) ### Chat with a Meta-Cognitive Entity ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/GwdCqowE6GQylineqehhx.png) ### Ask for a structured JSON output: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/us72aL9gwUXdqSHetRVRV.png) # Prompt Format Hermes 2 Θ uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(messages, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. ## Prompt Format for Function Calling Our model was trained on specific system prompts and structures for Function Calling. While the system prompt looks complicated, we have created a GitHub repo containing code to easily build these based on real python functions. You should use the system role with this message, followed by a function signature json as this example shows here. ``` <|im_start|>system You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "get_stock_fundamentals", "description": "get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n Args:\\n symbol (str): The stock symbol.\\n\\n Returns:\\n dict: A dictionary containing fundamental data.\\n Keys:\\n - \'symbol\': The stock symbol.\\n - \'company_name\': The long name of the company.\\n - \'sector\': The sector to which the company belongs.\\n - \'industry\': The industry to which the company belongs.\\n - \'market_cap\': The market capitalization of the company.\\n - \'pe_ratio\': The forward price-to-earnings ratio.\\n - \'pb_ratio\': The price-to-book ratio.\\n - \'dividend_yield\': The dividend yield.\\n - \'eps\': The trailing earnings per share.\\n - \'beta\': The beta value of the stock.\\n - \'52_week_high\': The 52-week high price of the stock.\\n - \'52_week_low\': The 52-week low price of the stock.", "parameters": {"type": "object", "properties": {"symbol": {"type": "string"}}, "required": ["symbol"]}}} </tools> Use the following pydantic model json schema for each tool call you will make: {"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: <tool_call> {"arguments": <args-dict>, "name": <function-name>} </tool_call><|im_end|> ``` To complete the function call, create a user prompt that follows the above system prompt, like so: ``` <|im_start|>user Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|> ``` The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling): ``` <|im_start|>assistant <tool_call> {"arguments": {"symbol": "TSLA"}, "name": "get_stock_fundamentals"} </tool_call><|im_end|> ``` Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so: ``` <|im_start|>tool <tool_response> {"name": "get_stock_fundamentals", "content": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}} </tool_response> <|im_end|> ``` The assistant will then read in that data from the function's response, and generate a natural language response: ``` <|im_start|>assistant The stock fundamentals data for Tesla (TSLA) are as follows: - **Symbol**: TSLA - **Company Name**: Tesla, Inc. - **Sector**: Consumer Cyclical - **Industry**: Auto Manufacturers - **Market Capitalization**: $566,160,130,480 - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73 - **Price-to-Book Ratio (PB Ratio)**: 9.04 - **Dividend Yield**: N/A - **Trailing Earnings Per Share (EPS)**: $4.3 - **Beta Value of the Stock**: 2.42 - **52-Week High Price of the Stock**: $299.29 - **52-Week Low Price of the Stock**: $152.37 This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|> ``` ## Prompt Format for JSON Mode / Structured Outputs Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema. Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main ``` <|im_start|>system You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|> ``` Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON. # Benchmarks ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/suBbCUIxpcRvhCv6-DBDQ.png) ## GPT4All: ``` |    Task     |Version| Metric |Value |   |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge|      0|acc     |0.5529|±  |0.0145| |             |       |acc_norm|0.5870|±  |0.0144| |arc_easy     |      0|acc     |0.8371|±  |0.0076| |             |       |acc_norm|0.8144|±  |0.0080| |boolq        |      1|acc     |0.8599|±  |0.0061| |hellaswag    |      0|acc     |0.6133|±  |0.0049| |             |       |acc_norm|0.7989|±  |0.0040| |openbookqa   |      0|acc     |0.3940|±  |0.0219| |             |       |acc_norm|0.4680|±  |0.0223| |piqa         |      0|acc     |0.8063|±  |0.0092| |             |       |acc_norm|0.8156|±  |0.0090| |winogrande   |      0|acc     |0.7372|±  |0.0124| ``` Average: 72.59 ## AGIEval: ``` |             Task             |Version| Metric |Value |   |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat              |      0|acc     |0.2441|±  |0.0270| |                              |       |acc_norm|0.2441|±  |0.0270| |agieval_logiqa_en             |      0|acc     |0.3687|±  |0.0189| |                              |       |acc_norm|0.3840|±  |0.0191| |agieval_lsat_ar               |      0|acc     |0.2304|±  |0.0278| |                              |       |acc_norm|0.2174|±  |0.0273| |agieval_lsat_lr               |      0|acc     |0.5471|±  |0.0221| |                              |       |acc_norm|0.5373|±  |0.0221| |agieval_lsat_rc               |      0|acc     |0.6617|±  |0.0289| |                              |       |acc_norm|0.6357|±  |0.0294| |agieval_sat_en                |      0|acc     |0.7670|±  |0.0295| |                              |       |acc_norm|0.7379|±  |0.0307| |agieval_sat_en_without_passage|      0|acc     |0.4417|±  |0.0347| |                              |       |acc_norm|0.4223|±  |0.0345| |agieval_sat_math              |      0|acc     |0.4000|±  |0.0331| |                              |       |acc_norm|0.3455|±  |0.0321| ``` Average: 44.05 ## BigBench: ``` |                      Task                      |Version|       Metric        |Value |   |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement                       |      0|multiple_choice_grade|0.6000|±  |0.0356| |bigbench_date_understanding                     |      0|multiple_choice_grade|0.6585|±  |0.0247| |bigbench_disambiguation_qa                      |      0|multiple_choice_grade|0.3178|±  |0.0290| |bigbench_geometric_shapes                       |      0|multiple_choice_grade|0.2340|±  |0.0224| |                                                |       |exact_str_match      |0.0000|±  |0.0000| |bigbench_logical_deduction_five_objects         |      0|multiple_choice_grade|0.2980|±  |0.0205| |bigbench_logical_deduction_seven_objects        |      0|multiple_choice_grade|0.2057|±  |0.0153| |bigbench_logical_deduction_three_objects        |      0|multiple_choice_grade|0.5367|±  |0.0288| |bigbench_movie_recommendation                   |      0|multiple_choice_grade|0.4040|±  |0.0220| |bigbench_navigate                               |      0|multiple_choice_grade|0.4970|±  |0.0158| |bigbench_reasoning_about_colored_objects        |      0|multiple_choice_grade|0.7075|±  |0.0102| |bigbench_ruin_names                             |      0|multiple_choice_grade|0.4821|±  |0.0236| |bigbench_salient_translation_error_detection    |      0|multiple_choice_grade|0.2295|±  |0.0133| |bigbench_snarks                                 |      0|multiple_choice_grade|0.6906|±  |0.0345| |bigbench_sports_understanding                   |      0|multiple_choice_grade|0.5375|±  |0.0159| |bigbench_temporal_sequences                     |      0|multiple_choice_grade|0.6270|±  |0.0153| |bigbench_tracking_shuffled_objects_five_objects |      0|multiple_choice_grade|0.2216|±  |0.0118| |bigbench_tracking_shuffled_objects_seven_objects|      0|multiple_choice_grade|0.1594|±  |0.0088| |bigbench_tracking_shuffled_objects_three_objects|      0|multiple_choice_grade|0.5367|±  |0.0288| ``` Average: 44.13 **IFEval**: 72.64 **MT_Bench**: Turn 1 - 8.3875, Turn 2 - 8.00625, Average - 8.196875 # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Code to inference Hermes with HF Transformers # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM import bitsandbytes, flash_attn tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Theta-Llama-3-8B', trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( "NousResearch/Hermes-2-Theta-Llama-3-8B", torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, load_in_4bit=True, use_flash_attention_2=True ) prompts = [ """<|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|> <|im_start|>assistant""", ] for chat in prompts: print(chat) input_ids = tokenizer(chat, return_tensors="pt").input_ids.to("cuda") generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True) print(f"Response: {response}") ``` ## Inference Code for Function Calling: All code for utilizing, parsing, and building function calling templates is available on our github: [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oi4CiGh50xmoviUQnh8R3.png) # Chat Interfaces When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) ## Quantized Versions: GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF # How to cite: ```bibtext @misc{Hermes-2-Theta-Llama-3-8B, url={[https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B][NousResearch/Hermes-2-Theta-Llama-3-8B](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B))}, title={Hermes-2-Theta-Llama-3-8B}, author={"Teknium", Charles Goddard, "interstellarninja", "theemozilla", "karan4d", "huemin_art"} } ```
null
Non_BioNLP
**Exllamav2** quant (**exl2** / **6.0 bpw**) made with ExLlamaV2 v0.0.21 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-2_2bpw_exl2)**</center> | <center>3250 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-2_5bpw_exl2)**</center> | <center>3479 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-3_0bpw_exl2)**</center> | <center>3895 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-3_5bpw_exl2)**</center> | <center>4311 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-3_75bpw_exl2)**</center> | <center>4519 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-4_0bpw_exl2)**</center> | <center>4726 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-4_25bpw_exl2)**</center> | <center>4933 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-5_0bpw_exl2)**</center> | <center>5558 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-6_0bpw_exl2)**</center> | <center>6497 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-6_5bpw_exl2)**</center> | <center>6910 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/NousResearch_Hermes-2-Theta-Llama-3-8B-8_0bpw_exl2)**</center> | <center>8049 MB</center> | <center>8</center> | # - Hermes-2 Θ Llama-3 8B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/HQnQmNM1L3KXGhp0wUzHH.png) ## Model Description Hermes-2 Θ (Theta) is the first experimental merged model released by [Nous Research](https://nousresearch.com/), in collaboration with Charles Goddard at [Arcee](https://www.arcee.ai/), the team behind MergeKit. Hermes-2 Θ is a merged and then further RLHF'ed version our excellent Hermes 2 Pro model and Meta's Llama-3 Instruct model to form a new model, Hermes-2 Θ, combining the best of both worlds of each model. ## Example Outputs ### Create New Mythos: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/dvKhnSvHdx4nTQIqB9Lpv.png) ### Chat with a Meta-Cognitive Entity ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/GwdCqowE6GQylineqehhx.png) ### Ask for a structured JSON output: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/us72aL9gwUXdqSHetRVRV.png) # Prompt Format Hermes 2 Θ uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(messages, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. ## Prompt Format for Function Calling Our model was trained on specific system prompts and structures for Function Calling. While the system prompt looks complicated, we have created a GitHub repo containing code to easily build these based on real python functions. You should use the system role with this message, followed by a function signature json as this example shows here. ``` <|im_start|>system You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> {"type": "function", "function": {"name": "get_stock_fundamentals", "description": "get_stock_fundamentals(symbol: str) -> dict - Get fundamental data for a given stock symbol using yfinance API.\\n\\n Args:\\n symbol (str): The stock symbol.\\n\\n Returns:\\n dict: A dictionary containing fundamental data.\\n Keys:\\n - \'symbol\': The stock symbol.\\n - \'company_name\': The long name of the company.\\n - \'sector\': The sector to which the company belongs.\\n - \'industry\': The industry to which the company belongs.\\n - \'market_cap\': The market capitalization of the company.\\n - \'pe_ratio\': The forward price-to-earnings ratio.\\n - \'pb_ratio\': The price-to-book ratio.\\n - \'dividend_yield\': The dividend yield.\\n - \'eps\': The trailing earnings per share.\\n - \'beta\': The beta value of the stock.\\n - \'52_week_high\': The 52-week high price of the stock.\\n - \'52_week_low\': The 52-week low price of the stock.", "parameters": {"type": "object", "properties": {"symbol": {"type": "string"}}, "required": ["symbol"]}}} </tools> Use the following pydantic model json schema for each tool call you will make: {"properties": {"arguments": {"title": "Arguments", "type": "object"}, "name": {"title": "Name", "type": "string"}}, "required": ["arguments", "name"], "title": "FunctionCall", "type": "object"} For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows: <tool_call> {"arguments": <args-dict>, "name": <function-name>} </tool_call><|im_end|> ``` To complete the function call, create a user prompt that follows the above system prompt, like so: ``` <|im_start|>user Fetch the stock fundamentals data for Tesla (TSLA)<|im_end|> ``` The model will then generate a tool call, which your inference code must parse, and plug into a function (see example inference code here: https://github.com/NousResearch/Hermes-Function-Calling): ``` <|im_start|>assistant <tool_call> {"arguments": {"symbol": "TSLA"}, "name": "get_stock_fundamentals"} </tool_call><|im_end|> ``` Once you parse the tool call, call the api and get the returned values for the call, and pass it back in as a new role, `tool` like so: ``` <|im_start|>tool <tool_response> {"name": "get_stock_fundamentals", "content": {'symbol': 'TSLA', 'company_name': 'Tesla, Inc.', 'sector': 'Consumer Cyclical', 'industry': 'Auto Manufacturers', 'market_cap': 611384164352, 'pe_ratio': 49.604652, 'pb_ratio': 9.762013, 'dividend_yield': None, 'eps': 4.3, 'beta': 2.427, '52_week_high': 299.29, '52_week_low': 152.37}} </tool_response> <|im_end|> ``` The assistant will then read in that data from the function's response, and generate a natural language response: ``` <|im_start|>assistant The stock fundamentals data for Tesla (TSLA) are as follows: - **Symbol**: TSLA - **Company Name**: Tesla, Inc. - **Sector**: Consumer Cyclical - **Industry**: Auto Manufacturers - **Market Capitalization**: $566,160,130,480 - **Forward Price-to-Earnings Ratio (PE Ratio)**: 42.73 - **Price-to-Book Ratio (PB Ratio)**: 9.04 - **Dividend Yield**: N/A - **Trailing Earnings Per Share (EPS)**: $4.3 - **Beta Value of the Stock**: 2.42 - **52-Week High Price of the Stock**: $299.29 - **52-Week Low Price of the Stock**: $152.37 This information provides a snapshot of Tesla's financial position and performance based on the fundamental data obtained from the yfinance API. It shows that Tesla has a substantial market capitalization and a relatively high P/E and P/B ratio compared to other stocks in its industry. The company does not pay a dividend at the moment, which is reflected by a 'Dividend Yield' of 'None'. The Beta value indicates that Tesla's stock has a moderate level of volatility relative to the market. The 52-week high and low prices give an idea of the stock's range over the past year. This data can be useful when assessing investment opportunities and making investment decisions.<|im_end|> ``` ## Prompt Format for JSON Mode / Structured Outputs Our model was also trained on a specific system prompt for Structured Outputs, which should respond with **only** a json object response, in a specific json schema. Your schema can be made from a pydantic object using our codebase, with the standalone script `jsonmode.py` available here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main ``` <|im_start|>system You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n{schema}\n</schema><|im_end|> ``` Given the {schema} that you provide, it should follow the format of that json to create it's response, all you have to do is give a typical user prompt, and it will respond in JSON. # Benchmarks ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/suBbCUIxpcRvhCv6-DBDQ.png) ## GPT4All: ``` |    Task     |Version| Metric |Value |   |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge|      0|acc     |0.5529|±  |0.0145| |             |       |acc_norm|0.5870|±  |0.0144| |arc_easy     |      0|acc     |0.8371|±  |0.0076| |             |       |acc_norm|0.8144|±  |0.0080| |boolq        |      1|acc     |0.8599|±  |0.0061| |hellaswag    |      0|acc     |0.6133|±  |0.0049| |             |       |acc_norm|0.7989|±  |0.0040| |openbookqa   |      0|acc     |0.3940|±  |0.0219| |             |       |acc_norm|0.4680|±  |0.0223| |piqa         |      0|acc     |0.8063|±  |0.0092| |             |       |acc_norm|0.8156|±  |0.0090| |winogrande   |      0|acc     |0.7372|±  |0.0124| ``` Average: 72.59 ## AGIEval: ``` |             Task             |Version| Metric |Value |   |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat              |      0|acc     |0.2441|±  |0.0270| |                              |       |acc_norm|0.2441|±  |0.0270| |agieval_logiqa_en             |      0|acc     |0.3687|±  |0.0189| |                              |       |acc_norm|0.3840|±  |0.0191| |agieval_lsat_ar               |      0|acc     |0.2304|±  |0.0278| |                              |       |acc_norm|0.2174|±  |0.0273| |agieval_lsat_lr               |      0|acc     |0.5471|±  |0.0221| |                              |       |acc_norm|0.5373|±  |0.0221| |agieval_lsat_rc               |      0|acc     |0.6617|±  |0.0289| |                              |       |acc_norm|0.6357|±  |0.0294| |agieval_sat_en                |      0|acc     |0.7670|±  |0.0295| |                              |       |acc_norm|0.7379|±  |0.0307| |agieval_sat_en_without_passage|      0|acc     |0.4417|±  |0.0347| |                              |       |acc_norm|0.4223|±  |0.0345| |agieval_sat_math              |      0|acc     |0.4000|±  |0.0331| |                              |       |acc_norm|0.3455|±  |0.0321| ``` Average: 44.05 ## BigBench: ``` |                      Task                      |Version|       Metric        |Value |   |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement                       |      0|multiple_choice_grade|0.6000|±  |0.0356| |bigbench_date_understanding                     |      0|multiple_choice_grade|0.6585|±  |0.0247| |bigbench_disambiguation_qa                      |      0|multiple_choice_grade|0.3178|±  |0.0290| |bigbench_geometric_shapes                       |      0|multiple_choice_grade|0.2340|±  |0.0224| |                                                |       |exact_str_match      |0.0000|±  |0.0000| |bigbench_logical_deduction_five_objects         |      0|multiple_choice_grade|0.2980|±  |0.0205| |bigbench_logical_deduction_seven_objects        |      0|multiple_choice_grade|0.2057|±  |0.0153| |bigbench_logical_deduction_three_objects        |      0|multiple_choice_grade|0.5367|±  |0.0288| |bigbench_movie_recommendation                   |      0|multiple_choice_grade|0.4040|±  |0.0220| |bigbench_navigate                               |      0|multiple_choice_grade|0.4970|±  |0.0158| |bigbench_reasoning_about_colored_objects        |      0|multiple_choice_grade|0.7075|±  |0.0102| |bigbench_ruin_names                             |      0|multiple_choice_grade|0.4821|±  |0.0236| |bigbench_salient_translation_error_detection    |      0|multiple_choice_grade|0.2295|±  |0.0133| |bigbench_snarks                                 |      0|multiple_choice_grade|0.6906|±  |0.0345| |bigbench_sports_understanding                   |      0|multiple_choice_grade|0.5375|±  |0.0159| |bigbench_temporal_sequences                     |      0|multiple_choice_grade|0.6270|±  |0.0153| |bigbench_tracking_shuffled_objects_five_objects |      0|multiple_choice_grade|0.2216|±  |0.0118| |bigbench_tracking_shuffled_objects_seven_objects|      0|multiple_choice_grade|0.1594|±  |0.0088| |bigbench_tracking_shuffled_objects_three_objects|      0|multiple_choice_grade|0.5367|±  |0.0288| ``` Average: 44.13 **IFEval**: 72.64 **MT_Bench**: Turn 1 - 8.3875, Turn 2 - 8.00625, Average - 8.196875 # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Code to inference Hermes with HF Transformers # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM import bitsandbytes, flash_attn tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Theta-Llama-3-8B', trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( "NousResearch/Hermes-2-Theta-Llama-3-8B", torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, load_in_4bit=True, use_flash_attention_2=True ) prompts = [ """<|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world.<|im_end|> <|im_start|>assistant""", ] for chat in prompts: print(chat) input_ids = tokenizer(chat, return_tensors="pt").input_ids.to("cuda") generated_ids = model.generate(input_ids, max_new_tokens=750, temperature=0.8, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True) print(f"Response: {response}") ``` ## Inference Code for Function Calling: All code for utilizing, parsing, and building function calling templates is available on our github: [https://github.com/NousResearch/Hermes-Function-Calling](https://github.com/NousResearch/Hermes-Function-Calling) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oi4CiGh50xmoviUQnh8R3.png) # Chat Interfaces When quantized versions of the model are released, I recommend using LM Studio for chatting with Hermes 2 Pro. It does not support function calling - for that use our github repo. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) ## Quantized Versions: GGUF Versions Available Here: https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF # How to cite: ```bibtext @misc{Hermes-2-Theta-Llama-3-8B, url={[https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B][NousResearch/Hermes-2-Theta-Llama-3-8B](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B))}, title={Hermes-2-Theta-Llama-3-8B}, author={"Teknium", Charles Goddard, "interstellarninja", "theemozilla", "karan4d", "huemin_art"} } ```
{"base_model": "NousResearch/Hermes-2-Pro-Llama-3-8B", "datasets": ["teknium/OpenHermes-2.5"], "language": ["en"], "license": "apache-2.0", "tags": ["Llama-3", "instruct", "finetune", "chatml", "DPO", "RLHF", "gpt4", "synthetic data", "distillation", "function calling", "json mode", "axolotl", "merges"], "widget": [{"example_title": "Hermes 2 Pro Llama-3 Instruct Merge", "messages": [{"role": "system", "content": "You are a sentient, superintelligent artificial general intelligence, here to teach and assist me."}, {"role": "user", "content": "Write a short story about Goku discovering kirby has teamed up with Majin Buu to destroy the world."}]}], "model-index": [{"name": "Hermes-2-Pro-Llama-3-Instruct-8B-Merge", "results": []}]}
task
[ "TRANSLATION" ]
41,401
facebook/fasttext-pms-vectors
facebook
feature-extraction
[ "fasttext", "feature-extraction", "pms", "arxiv:1607.04606", "arxiv:1802.06893", "arxiv:1607.01759", "arxiv:1612.03651", "license:cc-by-sa-3.0", "region:us" ]
2023-03-21T00:03:32Z
2023-06-03T22:15:16+00:00
0
0
--- language: pms library_name: fasttext license: cc-by-sa-3.0 tags: - feature-extraction widget: - text: apple example_title: apple --- # fastText (Piedmontese) fastText is an open-source, free, lightweight library that allows users to learn text representations and text classifiers. It works on standard, generic hardware. Models can later be reduced in size to even fit on mobile devices. It was introduced in [this paper](https://arxiv.org/abs/1607.04606). The official website can be found [here](https://fasttext.cc/). ## Model description fastText is a library for efficient learning of word representations and sentence classification. fastText is designed to be simple to use for developers, domain experts, and students. It's dedicated to text classification and learning word representations, and was designed to allow for quick model iteration and refinement without specialized hardware. fastText models can be trained on more than a billion words on any multicore CPU in less than a few minutes. It includes pre-trained models learned on Wikipedia and in over 157 different languages. fastText can be used as a command line, linked to a C++ application, or used as a library for use cases from experimentation and prototyping to production. ## Intended uses & limitations You can use pre-trained word vectors for text classification or language identification. See the [tutorials](https://fasttext.cc/docs/en/supervised-tutorial.html) and [resources](https://fasttext.cc/docs/en/english-vectors.html) on its official website to look for tasks that interest you. ### How to use Here is how to load and use a pre-trained vectors ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-pms-vectors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.words ['the', 'of', 'and', 'to', 'in', 'a', 'that', 'is', ...] >>> len(model.words) 145940 >>> model['bread'] array([ 4.89417791e-01, 1.60882145e-01, -2.25947708e-01, -2.94273376e-01, -1.04577184e-01, 1.17962055e-01, 1.34821936e-01, -2.41778508e-01, ...]) ``` Here is how to use this model to query nearest neighbors of an English word vector: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-en-nearest-neighbors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.get_nearest_neighbors("bread", k=5) [(0.5641006231307983, 'butter'), (0.48875734210014343, 'loaf'), (0.4491206705570221, 'eat'), (0.42444291710853577, 'food'), (0.4229326844215393, 'cheese')] ``` Here is how to use this model to detect the language of a given text: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-language-identification", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.predict("Hello, world!") (('__label__eng_Latn',), array([0.81148803])) >>> model.predict("Hello, world!", k=5) (('__label__eng_Latn', '__label__vie_Latn', '__label__nld_Latn', '__label__pol_Latn', '__label__deu_Latn'), array([0.61224753, 0.21323682, 0.09696738, 0.01359863, 0.01319415])) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions. Cosine similarity can be used to measure the similarity between two different word vectors. If two two vectors are identical, the cosine similarity will be 1. For two completely unrelated vectors, the value will be 0. If two vectors have an opposite relationship, the value will be -1. ```python >>> import numpy as np >>> def cosine_similarity(word1, word2): >>> return np.dot(model[word1], model[word2]) / (np.linalg.norm(model[word1]) * np.linalg.norm(model[word2])) >>> cosine_similarity("man", "boy") 0.061653383 >>> cosine_similarity("man", "ceo") 0.11989131 >>> cosine_similarity("woman", "ceo") -0.08834904 ``` ## Training data Pre-trained word vectors for 157 languages were trained on [Common Crawl](http://commoncrawl.org/) and [Wikipedia](https://www.wikipedia.org/) using fastText. These models were trained using CBOW with position-weights, in dimension 300, with character n-grams of length 5, a window of size 5 and 10 negatives. We also distribute three new word analogy datasets, for French, Hindi and Polish. ## Training procedure ### Tokenization We used the [Stanford word segmenter](https://nlp.stanford.edu/software/segmenter.html) for Chinese, [Mecab](http://taku910.github.io/mecab/) for Japanese and [UETsegmenter](https://github.com/phongnt570/UETsegmenter) for Vietnamese. For languages using the Latin, Cyrillic, Hebrew or Greek scripts, we used the tokenizer from the [Europarl](https://www.statmt.org/europarl/) preprocessing tools. For the remaining languages, we used the ICU tokenizer. More information about the training of these models can be found in the article [Learning Word Vectors for 157 Languages](https://arxiv.org/abs/1802.06893). ### License The word vectors are distributed under the [*Creative Commons Attribution-Share-Alike License 3.0*](https://creativecommons.org/licenses/by-sa/3.0/). ### Evaluation datasets The analogy evaluation datasets described in the paper are available here: [French](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-fr.txt), [Hindi](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-hi.txt), [Polish](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-pl.txt). ### BibTeX entry and citation info Please cite [1] if using this code for learning word representations or [2] if using for text classification. [1] P. Bojanowski\*, E. Grave\*, A. Joulin, T. Mikolov, [*Enriching Word Vectors with Subword Information*](https://arxiv.org/abs/1607.04606) ```markup @article{bojanowski2016enriching, title={Enriching Word Vectors with Subword Information}, author={Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.04606}, year={2016} } ``` [2] A. Joulin, E. Grave, P. Bojanowski, T. Mikolov, [*Bag of Tricks for Efficient Text Classification*](https://arxiv.org/abs/1607.01759) ```markup @article{joulin2016bag, title={Bag of Tricks for Efficient Text Classification}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.01759}, year={2016} } ``` [3] A. Joulin, E. Grave, P. Bojanowski, M. Douze, H. Jégou, T. Mikolov, [*FastText.zip: Compressing text classification models*](https://arxiv.org/abs/1612.03651) ```markup @article{joulin2016fasttext, title={FastText.zip: Compressing text classification models}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Douze, Matthijs and J{'e}gou, H{'e}rve and Mikolov, Tomas}, journal={arXiv preprint arXiv:1612.03651}, year={2016} } ``` If you use these word vectors, please cite the following paper: [4] E. Grave\*, P. Bojanowski\*, P. Gupta, A. Joulin, T. Mikolov, [*Learning Word Vectors for 157 Languages*](https://arxiv.org/abs/1802.06893) ```markup @inproceedings{grave2018learning, title={Learning Word Vectors for 157 Languages}, author={Grave, Edouard and Bojanowski, Piotr and Gupta, Prakhar and Joulin, Armand and Mikolov, Tomas}, booktitle={Proceedings of the International Conference on Language Resources and Evaluation (LREC 2018)}, year={2018} } ``` (\* These authors contributed equally.)
null
Non_BioNLP
# fastText (Piedmontese) fastText is an open-source, free, lightweight library that allows users to learn text representations and text classifiers. It works on standard, generic hardware. Models can later be reduced in size to even fit on mobile devices. It was introduced in [this paper](https://arxiv.org/abs/1607.04606). The official website can be found [here](https://fasttext.cc/). ## Model description fastText is a library for efficient learning of word representations and sentence classification. fastText is designed to be simple to use for developers, domain experts, and students. It's dedicated to text classification and learning word representations, and was designed to allow for quick model iteration and refinement without specialized hardware. fastText models can be trained on more than a billion words on any multicore CPU in less than a few minutes. It includes pre-trained models learned on Wikipedia and in over 157 different languages. fastText can be used as a command line, linked to a C++ application, or used as a library for use cases from experimentation and prototyping to production. ## Intended uses & limitations You can use pre-trained word vectors for text classification or language identification. See the [tutorials](https://fasttext.cc/docs/en/supervised-tutorial.html) and [resources](https://fasttext.cc/docs/en/english-vectors.html) on its official website to look for tasks that interest you. ### How to use Here is how to load and use a pre-trained vectors ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-pms-vectors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.words ['the', 'of', 'and', 'to', 'in', 'a', 'that', 'is', ...] >>> len(model.words) 145940 >>> model['bread'] array([ 4.89417791e-01, 1.60882145e-01, -2.25947708e-01, -2.94273376e-01, -1.04577184e-01, 1.17962055e-01, 1.34821936e-01, -2.41778508e-01, ...]) ``` Here is how to use this model to query nearest neighbors of an English word vector: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-en-nearest-neighbors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.get_nearest_neighbors("bread", k=5) [(0.5641006231307983, 'butter'), (0.48875734210014343, 'loaf'), (0.4491206705570221, 'eat'), (0.42444291710853577, 'food'), (0.4229326844215393, 'cheese')] ``` Here is how to use this model to detect the language of a given text: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-language-identification", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.predict("Hello, world!") (('__label__eng_Latn',), array([0.81148803])) >>> model.predict("Hello, world!", k=5) (('__label__eng_Latn', '__label__vie_Latn', '__label__nld_Latn', '__label__pol_Latn', '__label__deu_Latn'), array([0.61224753, 0.21323682, 0.09696738, 0.01359863, 0.01319415])) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions. Cosine similarity can be used to measure the similarity between two different word vectors. If two two vectors are identical, the cosine similarity will be 1. For two completely unrelated vectors, the value will be 0. If two vectors have an opposite relationship, the value will be -1. ```python >>> import numpy as np >>> def cosine_similarity(word1, word2): >>> return np.dot(model[word1], model[word2]) / (np.linalg.norm(model[word1]) * np.linalg.norm(model[word2])) >>> cosine_similarity("man", "boy") 0.061653383 >>> cosine_similarity("man", "ceo") 0.11989131 >>> cosine_similarity("woman", "ceo") -0.08834904 ``` ## Training data Pre-trained word vectors for 157 languages were trained on [Common Crawl](http://commoncrawl.org/) and [Wikipedia](https://www.wikipedia.org/) using fastText. These models were trained using CBOW with position-weights, in dimension 300, with character n-grams of length 5, a window of size 5 and 10 negatives. We also distribute three new word analogy datasets, for French, Hindi and Polish. ## Training procedure ### Tokenization We used the [Stanford word segmenter](https://nlp.stanford.edu/software/segmenter.html) for Chinese, [Mecab](http://taku910.github.io/mecab/) for Japanese and [UETsegmenter](https://github.com/phongnt570/UETsegmenter) for Vietnamese. For languages using the Latin, Cyrillic, Hebrew or Greek scripts, we used the tokenizer from the [Europarl](https://www.statmt.org/europarl/) preprocessing tools. For the remaining languages, we used the ICU tokenizer. More information about the training of these models can be found in the article [Learning Word Vectors for 157 Languages](https://arxiv.org/abs/1802.06893). ### License The word vectors are distributed under the [*Creative Commons Attribution-Share-Alike License 3.0*](https://creativecommons.org/licenses/by-sa/3.0/). ### Evaluation datasets The analogy evaluation datasets described in the paper are available here: [French](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-fr.txt), [Hindi](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-hi.txt), [Polish](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-pl.txt). ### BibTeX entry and citation info Please cite [1] if using this code for learning word representations or [2] if using for text classification. [1] P. Bojanowski\*, E. Grave\*, A. Joulin, T. Mikolov, [*Enriching Word Vectors with Subword Information*](https://arxiv.org/abs/1607.04606) ```markup @article{bojanowski2016enriching, title={Enriching Word Vectors with Subword Information}, author={Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.04606}, year={2016} } ``` [2] A. Joulin, E. Grave, P. Bojanowski, T. Mikolov, [*Bag of Tricks for Efficient Text Classification*](https://arxiv.org/abs/1607.01759) ```markup @article{joulin2016bag, title={Bag of Tricks for Efficient Text Classification}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.01759}, year={2016} } ``` [3] A. Joulin, E. Grave, P. Bojanowski, M. Douze, H. Jégou, T. Mikolov, [*FastText.zip: Compressing text classification models*](https://arxiv.org/abs/1612.03651) ```markup @article{joulin2016fasttext, title={FastText.zip: Compressing text classification models}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Douze, Matthijs and J{'e}gou, H{'e}rve and Mikolov, Tomas}, journal={arXiv preprint arXiv:1612.03651}, year={2016} } ``` If you use these word vectors, please cite the following paper: [4] E. Grave\*, P. Bojanowski\*, P. Gupta, A. Joulin, T. Mikolov, [*Learning Word Vectors for 157 Languages*](https://arxiv.org/abs/1802.06893) ```markup @inproceedings{grave2018learning, title={Learning Word Vectors for 157 Languages}, author={Grave, Edouard and Bojanowski, Piotr and Gupta, Prakhar and Joulin, Armand and Mikolov, Tomas}, booktitle={Proceedings of the International Conference on Language Resources and Evaluation (LREC 2018)}, year={2018} } ``` (\* These authors contributed equally.)
{"language": "pms", "library_name": "fasttext", "license": "cc-by-sa-3.0", "tags": ["feature-extraction"], "widget": [{"text": "apple", "example_title": "apple"}]}
task
[ "TEXT_CLASSIFICATION" ]
41,402
pinzhenchen/sft-lora-ru-pythia-12b
pinzhenchen
null
[ "generation", "question answering", "instruction tuning", "ru", "arxiv:2309.08958", "license:cc-by-nc-4.0", "region:us" ]
2024-03-05T23:54:10Z
2024-03-05T23:54:14+00:00
0
0
--- language: - ru license: cc-by-nc-4.0 tags: - generation - question answering - instruction tuning --- ### Model Description This HF repository contains base LLMs instruction tuned (SFT) with LoRA and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [EleutherAI/pythia-12b-deduped](https://huggingface.co/EleutherAI/pythia-12b-deduped) * Instruction tuning language: Russian * Training method: LoRA. * LoRA details: rank=8, alpha=16, target modules={key, query, value}. * Best checkpoint: best cross-entropy on a validation set, trained for 5 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded with the base model together using `transformers` and `peft` libraries. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/loraft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
null
Non_BioNLP
### Model Description This HF repository contains base LLMs instruction tuned (SFT) with LoRA and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [EleutherAI/pythia-12b-deduped](https://huggingface.co/EleutherAI/pythia-12b-deduped) * Instruction tuning language: Russian * Training method: LoRA. * LoRA details: rank=8, alpha=16, target modules={key, query, value}. * Best checkpoint: best cross-entropy on a validation set, trained for 5 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded with the base model together using `transformers` and `peft` libraries. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/loraft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
{"language": ["ru"], "license": "cc-by-nc-4.0", "tags": ["generation", "question answering", "instruction tuning"]}
task
[ "QUESTION_ANSWERING" ]
41,403
gokuls/bert_uncased_L-10_H-768_A-12_massive
gokuls
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "dataset:massive", "base_model:google/bert_uncased_L-10_H-768_A-12", "base_model:finetune:google/bert_uncased_L-10_H-768_A-12", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-06T18:35:58Z
2023-10-06T18:42:35+00:00
5
0
--- base_model: google/bert_uncased_L-10_H-768_A-12 datasets: - massive license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: bert_uncased_L-10_H-768_A-12_massive results: - task: type: text-classification name: Text Classification dataset: name: massive type: massive config: en-US split: validation args: en-US metrics: - type: accuracy value: 0.8908017707820954 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_uncased_L-10_H-768_A-12_massive This model is a fine-tuned version of [google/bert_uncased_L-10_H-768_A-12](https://huggingface.co/google/bert_uncased_L-10_H-768_A-12) on the massive dataset. It achieves the following results on the evaluation set: - Loss: 0.6424 - Accuracy: 0.8908 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 33 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.8931 | 1.0 | 180 | 0.7740 | 0.8342 | | 0.5861 | 2.0 | 360 | 0.5441 | 0.8667 | | 0.312 | 3.0 | 540 | 0.4858 | 0.8805 | | 0.1828 | 4.0 | 720 | 0.4977 | 0.8849 | | 0.1183 | 5.0 | 900 | 0.5322 | 0.8824 | | 0.077 | 6.0 | 1080 | 0.5617 | 0.8780 | | 0.0491 | 7.0 | 1260 | 0.5901 | 0.8810 | | 0.0328 | 8.0 | 1440 | 0.6181 | 0.8775 | | 0.0216 | 9.0 | 1620 | 0.6174 | 0.8869 | | 0.0159 | 10.0 | 1800 | 0.6278 | 0.8869 | | 0.0116 | 11.0 | 1980 | 0.6228 | 0.8888 | | 0.0083 | 12.0 | 2160 | 0.6352 | 0.8893 | | 0.0058 | 13.0 | 2340 | 0.6443 | 0.8893 | | 0.0056 | 14.0 | 2520 | 0.6424 | 0.8908 | | 0.0047 | 15.0 | 2700 | 0.6462 | 0.8893 | ### Framework versions - Transformers 4.34.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.14.5 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_uncased_L-10_H-768_A-12_massive This model is a fine-tuned version of [google/bert_uncased_L-10_H-768_A-12](https://huggingface.co/google/bert_uncased_L-10_H-768_A-12) on the massive dataset. It achieves the following results on the evaluation set: - Loss: 0.6424 - Accuracy: 0.8908 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 33 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.8931 | 1.0 | 180 | 0.7740 | 0.8342 | | 0.5861 | 2.0 | 360 | 0.5441 | 0.8667 | | 0.312 | 3.0 | 540 | 0.4858 | 0.8805 | | 0.1828 | 4.0 | 720 | 0.4977 | 0.8849 | | 0.1183 | 5.0 | 900 | 0.5322 | 0.8824 | | 0.077 | 6.0 | 1080 | 0.5617 | 0.8780 | | 0.0491 | 7.0 | 1260 | 0.5901 | 0.8810 | | 0.0328 | 8.0 | 1440 | 0.6181 | 0.8775 | | 0.0216 | 9.0 | 1620 | 0.6174 | 0.8869 | | 0.0159 | 10.0 | 1800 | 0.6278 | 0.8869 | | 0.0116 | 11.0 | 1980 | 0.6228 | 0.8888 | | 0.0083 | 12.0 | 2160 | 0.6352 | 0.8893 | | 0.0058 | 13.0 | 2340 | 0.6443 | 0.8893 | | 0.0056 | 14.0 | 2520 | 0.6424 | 0.8908 | | 0.0047 | 15.0 | 2700 | 0.6462 | 0.8893 | ### Framework versions - Transformers 4.34.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.14.5 - Tokenizers 0.14.1
{"base_model": "google/bert_uncased_L-10_H-768_A-12", "datasets": ["massive"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bert_uncased_L-10_H-768_A-12_massive", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "massive", "type": "massive", "config": "en-US", "split": "validation", "args": "en-US"}, "metrics": [{"type": "accuracy", "value": 0.8908017707820954, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,404
Ekvargas/autotrain-hnrv5-j030r
Ekvargas
text-classification
[ "transformers", "safetensors", "bert", "text-classification", "autotrain", "dataset:autotrain-hnrv5-j030r/autotrain-data", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-02-21T18:07:44Z
2024-02-21T18:07:56+00:00
7
0
--- datasets: - autotrain-hnrv5-j030r/autotrain-data tags: - autotrain - text-classification widget: - text: I love AutoTrain --- # Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 0.5564461350440979 f1_macro: 0.39519542840233773 f1_micro: 0.8588559345612375 f1_weighted: 0.843983260016685 precision_macro: 0.445424724772177 precision_micro: 0.8588559345612375 precision_weighted: 0.8484718167022819 recall_macro: 0.3846237924133768 recall_micro: 0.8588559345612375 recall_weighted: 0.8588559345612375 accuracy: 0.8588559345612375
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 0.5564461350440979 f1_macro: 0.39519542840233773 f1_micro: 0.8588559345612375 f1_weighted: 0.843983260016685 precision_macro: 0.445424724772177 precision_micro: 0.8588559345612375 precision_weighted: 0.8484718167022819 recall_macro: 0.3846237924133768 recall_micro: 0.8588559345612375 recall_weighted: 0.8588559345612375 accuracy: 0.8588559345612375
{"datasets": ["autotrain-hnrv5-j030r/autotrain-data"], "tags": ["autotrain", "text-classification"], "widget": [{"text": "I love AutoTrain"}]}
task
[ "TEXT_CLASSIFICATION" ]
41,406
facebook/fasttext-sh-vectors
facebook
feature-extraction
[ "fasttext", "feature-extraction", "sh", "arxiv:1607.04606", "arxiv:1802.06893", "arxiv:1607.01759", "arxiv:1612.03651", "license:cc-by-sa-3.0", "region:us" ]
2023-03-21T03:29:15Z
2023-06-03T22:16:08+00:00
1
0
--- language: sh library_name: fasttext license: cc-by-sa-3.0 tags: - feature-extraction widget: - text: apple example_title: apple --- # fastText (Serbo-Croatian) fastText is an open-source, free, lightweight library that allows users to learn text representations and text classifiers. It works on standard, generic hardware. Models can later be reduced in size to even fit on mobile devices. It was introduced in [this paper](https://arxiv.org/abs/1607.04606). The official website can be found [here](https://fasttext.cc/). ## Model description fastText is a library for efficient learning of word representations and sentence classification. fastText is designed to be simple to use for developers, domain experts, and students. It's dedicated to text classification and learning word representations, and was designed to allow for quick model iteration and refinement without specialized hardware. fastText models can be trained on more than a billion words on any multicore CPU in less than a few minutes. It includes pre-trained models learned on Wikipedia and in over 157 different languages. fastText can be used as a command line, linked to a C++ application, or used as a library for use cases from experimentation and prototyping to production. ## Intended uses & limitations You can use pre-trained word vectors for text classification or language identification. See the [tutorials](https://fasttext.cc/docs/en/supervised-tutorial.html) and [resources](https://fasttext.cc/docs/en/english-vectors.html) on its official website to look for tasks that interest you. ### How to use Here is how to load and use a pre-trained vectors ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-sh-vectors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.words ['the', 'of', 'and', 'to', 'in', 'a', 'that', 'is', ...] >>> len(model.words) 145940 >>> model['bread'] array([ 4.89417791e-01, 1.60882145e-01, -2.25947708e-01, -2.94273376e-01, -1.04577184e-01, 1.17962055e-01, 1.34821936e-01, -2.41778508e-01, ...]) ``` Here is how to use this model to query nearest neighbors of an English word vector: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-en-nearest-neighbors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.get_nearest_neighbors("bread", k=5) [(0.5641006231307983, 'butter'), (0.48875734210014343, 'loaf'), (0.4491206705570221, 'eat'), (0.42444291710853577, 'food'), (0.4229326844215393, 'cheese')] ``` Here is how to use this model to detect the language of a given text: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-language-identification", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.predict("Hello, world!") (('__label__eng_Latn',), array([0.81148803])) >>> model.predict("Hello, world!", k=5) (('__label__eng_Latn', '__label__vie_Latn', '__label__nld_Latn', '__label__pol_Latn', '__label__deu_Latn'), array([0.61224753, 0.21323682, 0.09696738, 0.01359863, 0.01319415])) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions. Cosine similarity can be used to measure the similarity between two different word vectors. If two two vectors are identical, the cosine similarity will be 1. For two completely unrelated vectors, the value will be 0. If two vectors have an opposite relationship, the value will be -1. ```python >>> import numpy as np >>> def cosine_similarity(word1, word2): >>> return np.dot(model[word1], model[word2]) / (np.linalg.norm(model[word1]) * np.linalg.norm(model[word2])) >>> cosine_similarity("man", "boy") 0.061653383 >>> cosine_similarity("man", "ceo") 0.11989131 >>> cosine_similarity("woman", "ceo") -0.08834904 ``` ## Training data Pre-trained word vectors for 157 languages were trained on [Common Crawl](http://commoncrawl.org/) and [Wikipedia](https://www.wikipedia.org/) using fastText. These models were trained using CBOW with position-weights, in dimension 300, with character n-grams of length 5, a window of size 5 and 10 negatives. We also distribute three new word analogy datasets, for French, Hindi and Polish. ## Training procedure ### Tokenization We used the [Stanford word segmenter](https://nlp.stanford.edu/software/segmenter.html) for Chinese, [Mecab](http://taku910.github.io/mecab/) for Japanese and [UETsegmenter](https://github.com/phongnt570/UETsegmenter) for Vietnamese. For languages using the Latin, Cyrillic, Hebrew or Greek scripts, we used the tokenizer from the [Europarl](https://www.statmt.org/europarl/) preprocessing tools. For the remaining languages, we used the ICU tokenizer. More information about the training of these models can be found in the article [Learning Word Vectors for 157 Languages](https://arxiv.org/abs/1802.06893). ### License The word vectors are distributed under the [*Creative Commons Attribution-Share-Alike License 3.0*](https://creativecommons.org/licenses/by-sa/3.0/). ### Evaluation datasets The analogy evaluation datasets described in the paper are available here: [French](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-fr.txt), [Hindi](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-hi.txt), [Polish](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-pl.txt). ### BibTeX entry and citation info Please cite [1] if using this code for learning word representations or [2] if using for text classification. [1] P. Bojanowski\*, E. Grave\*, A. Joulin, T. Mikolov, [*Enriching Word Vectors with Subword Information*](https://arxiv.org/abs/1607.04606) ```markup @article{bojanowski2016enriching, title={Enriching Word Vectors with Subword Information}, author={Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.04606}, year={2016} } ``` [2] A. Joulin, E. Grave, P. Bojanowski, T. Mikolov, [*Bag of Tricks for Efficient Text Classification*](https://arxiv.org/abs/1607.01759) ```markup @article{joulin2016bag, title={Bag of Tricks for Efficient Text Classification}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.01759}, year={2016} } ``` [3] A. Joulin, E. Grave, P. Bojanowski, M. Douze, H. Jégou, T. Mikolov, [*FastText.zip: Compressing text classification models*](https://arxiv.org/abs/1612.03651) ```markup @article{joulin2016fasttext, title={FastText.zip: Compressing text classification models}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Douze, Matthijs and J{'e}gou, H{'e}rve and Mikolov, Tomas}, journal={arXiv preprint arXiv:1612.03651}, year={2016} } ``` If you use these word vectors, please cite the following paper: [4] E. Grave\*, P. Bojanowski\*, P. Gupta, A. Joulin, T. Mikolov, [*Learning Word Vectors for 157 Languages*](https://arxiv.org/abs/1802.06893) ```markup @inproceedings{grave2018learning, title={Learning Word Vectors for 157 Languages}, author={Grave, Edouard and Bojanowski, Piotr and Gupta, Prakhar and Joulin, Armand and Mikolov, Tomas}, booktitle={Proceedings of the International Conference on Language Resources and Evaluation (LREC 2018)}, year={2018} } ``` (\* These authors contributed equally.)
null
Non_BioNLP
# fastText (Serbo-Croatian) fastText is an open-source, free, lightweight library that allows users to learn text representations and text classifiers. It works on standard, generic hardware. Models can later be reduced in size to even fit on mobile devices. It was introduced in [this paper](https://arxiv.org/abs/1607.04606). The official website can be found [here](https://fasttext.cc/). ## Model description fastText is a library for efficient learning of word representations and sentence classification. fastText is designed to be simple to use for developers, domain experts, and students. It's dedicated to text classification and learning word representations, and was designed to allow for quick model iteration and refinement without specialized hardware. fastText models can be trained on more than a billion words on any multicore CPU in less than a few minutes. It includes pre-trained models learned on Wikipedia and in over 157 different languages. fastText can be used as a command line, linked to a C++ application, or used as a library for use cases from experimentation and prototyping to production. ## Intended uses & limitations You can use pre-trained word vectors for text classification or language identification. See the [tutorials](https://fasttext.cc/docs/en/supervised-tutorial.html) and [resources](https://fasttext.cc/docs/en/english-vectors.html) on its official website to look for tasks that interest you. ### How to use Here is how to load and use a pre-trained vectors ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-sh-vectors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.words ['the', 'of', 'and', 'to', 'in', 'a', 'that', 'is', ...] >>> len(model.words) 145940 >>> model['bread'] array([ 4.89417791e-01, 1.60882145e-01, -2.25947708e-01, -2.94273376e-01, -1.04577184e-01, 1.17962055e-01, 1.34821936e-01, -2.41778508e-01, ...]) ``` Here is how to use this model to query nearest neighbors of an English word vector: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-en-nearest-neighbors", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.get_nearest_neighbors("bread", k=5) [(0.5641006231307983, 'butter'), (0.48875734210014343, 'loaf'), (0.4491206705570221, 'eat'), (0.42444291710853577, 'food'), (0.4229326844215393, 'cheese')] ``` Here is how to use this model to detect the language of a given text: ```python >>> import fasttext >>> from huggingface_hub import hf_hub_download >>> model_path = hf_hub_download(repo_id="facebook/fasttext-language-identification", filename="model.bin") >>> model = fasttext.load_model(model_path) >>> model.predict("Hello, world!") (('__label__eng_Latn',), array([0.81148803])) >>> model.predict("Hello, world!", k=5) (('__label__eng_Latn', '__label__vie_Latn', '__label__nld_Latn', '__label__pol_Latn', '__label__deu_Latn'), array([0.61224753, 0.21323682, 0.09696738, 0.01359863, 0.01319415])) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions. Cosine similarity can be used to measure the similarity between two different word vectors. If two two vectors are identical, the cosine similarity will be 1. For two completely unrelated vectors, the value will be 0. If two vectors have an opposite relationship, the value will be -1. ```python >>> import numpy as np >>> def cosine_similarity(word1, word2): >>> return np.dot(model[word1], model[word2]) / (np.linalg.norm(model[word1]) * np.linalg.norm(model[word2])) >>> cosine_similarity("man", "boy") 0.061653383 >>> cosine_similarity("man", "ceo") 0.11989131 >>> cosine_similarity("woman", "ceo") -0.08834904 ``` ## Training data Pre-trained word vectors for 157 languages were trained on [Common Crawl](http://commoncrawl.org/) and [Wikipedia](https://www.wikipedia.org/) using fastText. These models were trained using CBOW with position-weights, in dimension 300, with character n-grams of length 5, a window of size 5 and 10 negatives. We also distribute three new word analogy datasets, for French, Hindi and Polish. ## Training procedure ### Tokenization We used the [Stanford word segmenter](https://nlp.stanford.edu/software/segmenter.html) for Chinese, [Mecab](http://taku910.github.io/mecab/) for Japanese and [UETsegmenter](https://github.com/phongnt570/UETsegmenter) for Vietnamese. For languages using the Latin, Cyrillic, Hebrew or Greek scripts, we used the tokenizer from the [Europarl](https://www.statmt.org/europarl/) preprocessing tools. For the remaining languages, we used the ICU tokenizer. More information about the training of these models can be found in the article [Learning Word Vectors for 157 Languages](https://arxiv.org/abs/1802.06893). ### License The word vectors are distributed under the [*Creative Commons Attribution-Share-Alike License 3.0*](https://creativecommons.org/licenses/by-sa/3.0/). ### Evaluation datasets The analogy evaluation datasets described in the paper are available here: [French](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-fr.txt), [Hindi](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-hi.txt), [Polish](https://dl.fbaipublicfiles.com/fasttext/word-analogies/questions-words-pl.txt). ### BibTeX entry and citation info Please cite [1] if using this code for learning word representations or [2] if using for text classification. [1] P. Bojanowski\*, E. Grave\*, A. Joulin, T. Mikolov, [*Enriching Word Vectors with Subword Information*](https://arxiv.org/abs/1607.04606) ```markup @article{bojanowski2016enriching, title={Enriching Word Vectors with Subword Information}, author={Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.04606}, year={2016} } ``` [2] A. Joulin, E. Grave, P. Bojanowski, T. Mikolov, [*Bag of Tricks for Efficient Text Classification*](https://arxiv.org/abs/1607.01759) ```markup @article{joulin2016bag, title={Bag of Tricks for Efficient Text Classification}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas}, journal={arXiv preprint arXiv:1607.01759}, year={2016} } ``` [3] A. Joulin, E. Grave, P. Bojanowski, M. Douze, H. Jégou, T. Mikolov, [*FastText.zip: Compressing text classification models*](https://arxiv.org/abs/1612.03651) ```markup @article{joulin2016fasttext, title={FastText.zip: Compressing text classification models}, author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Douze, Matthijs and J{'e}gou, H{'e}rve and Mikolov, Tomas}, journal={arXiv preprint arXiv:1612.03651}, year={2016} } ``` If you use these word vectors, please cite the following paper: [4] E. Grave\*, P. Bojanowski\*, P. Gupta, A. Joulin, T. Mikolov, [*Learning Word Vectors for 157 Languages*](https://arxiv.org/abs/1802.06893) ```markup @inproceedings{grave2018learning, title={Learning Word Vectors for 157 Languages}, author={Grave, Edouard and Bojanowski, Piotr and Gupta, Prakhar and Joulin, Armand and Mikolov, Tomas}, booktitle={Proceedings of the International Conference on Language Resources and Evaluation (LREC 2018)}, year={2018} } ``` (\* These authors contributed equally.)
{"language": "sh", "library_name": "fasttext", "license": "cc-by-sa-3.0", "tags": ["feature-extraction"], "widget": [{"text": "apple", "example_title": "apple"}]}
task
[ "TEXT_CLASSIFICATION" ]
41,407
IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1
IDEA-CCNL
visual-question-answering
[ "transformers", "pytorch", "blip-2", "feature-extraction", "image-to-text", "visual-question-answering", "ziya", "fengshenbang", "custom_code", "en", "zh", "arxiv:2210.08590", "license:gpl-3.0", "region:us" ]
2023-05-30T08:13:18Z
2023-06-07T07:41:49+00:00
79
57
--- language: - en - zh license: gpl-3.0 pipeline_tag: visual-question-answering tags: - image-to-text - visual-question-answering - ziya - fengshenbang inference: false --- # Ziya-BLIP2-14B-Visual-v1 - Main Page:[Fengshenbang](https://fengshenbang-lm.com/) - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM) # 姜子牙系列模型 - [Ziya-BLIP2-14B-Visual-v1](https://huggingface.co/IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1) - [Ziya-LLaMA-13B-v1.1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1.1) - [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1) - [Ziya-LLaMA-7B-Reward](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-7B-Reward) - [Ziya-LLaMA-13B-Pretrain-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1) ## 简介 Brief Introduction Ziya-Visual多模态大模型基于[姜子牙通用大模型V1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1)训练,具有视觉问答和对话能力。今年3月份OpenAI发布具有识图能力的多模态大模型GPT-4,遗憾的是,时至今日绝大部分用户也都还没有拿到GPT-4输入图片的权限,Ziya-Visual参考了Mini-GPT4、LLaVA等优秀的开源实现,补齐了Ziya的识图能力,使中文用户群体可以体验到结合视觉和语言两大模态的大模型的卓越能力。 The Ziya-Visual multimodal Big Model is based on the [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1) training and has visual question and answer and dialogue capabilities. In March this year, OpenAI released GPT-4, a multimodal big model with image recognition capabilities. Unfortunately, to date, the vast majority of users have not yet been given access to GPT-4 for image input, so Ziya-Visual refers to Mini-GPT4, LLaVA and other excellent open source implementations to complement Ziya's image recognition capabilities, so that the Chinese user community can experience the superior capabilities of a large model combining two modalities: visual and language. ## 软件依赖 ``` pip install torch==1.12.1 tokenizers==0.13.3 git+https://github.com/huggingface/transformers ``` ## 模型分类 Model Taxonomy | 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | | :----: | :----: | :----: | :----: | :----: | :----: | | 多模态 Multi-Modal | 通用 General | 姜子牙-多模态 Ziya-Visual | BLIP2 LLaMA | 14B | English&Chinese | ## 模型信息 Model Information ### 效果展示 Showcase 这个例子展示了模型的识图能力、知识能力和创作能力。首先第一个问题中,模型识别出了图片中是电影《泰坦尼克号》的场景,并给出电影导演、发布时间、奖项成就等信息;第二个问题,模型根据用户的需求创作了一首现代爱情诗。 This example demonstrates the model's ability to read pictures, its knowledge and its ability to compose. Firstly in the first problem, the model identifies the picture as a scene from the movie Titanic and gives information about the movie director, release date and award achievements; in the second problem, the model creates a modern love poem based on the user's needs. ![](assets/taitanic.png) 这个例子展示了Ziya-Visual传统中国文化的识别和理解能力,模型识别出了中国画中的信息,在得到提示《清明上河图》之后给出了也给出画家张择端和北宋的历史背景。 This example demonstrates Ziya-Visual's ability to recognise and understand traditional Chinese culture. The model identifies information in Chinese paintings, and after getting the hint 'Qingming Shanghe Tu' gives also gives the historical context of the painter Zhang Zeduan and the Northern Song Dynasty. ![](assets/song_dynasty.png) 如果输入多张图片进行问答呢?Ziya-Visual也是胜任的,在这个例子中,Ziya-Visual展现了强大的多图和多轮交互能力,根据用户给的三张图片,叙述了一个女士在城市夜景中邂逅一对母子猫咪,并与之交谈、分别的小故事。 What if multiple images are entered for a quiz? Ziya-Visual is also up to the task. In this example, Ziya-Visual demonstrates the power of multiple images and multiple rounds of interaction, narrating a short story of a lady who encounters a mother and son cat in a city night scene, talks to them and separates them, based on three images given by the user. ![](assets/long_story.png) ### 训练 Training #### 数据 Train data 在中文视觉问答模型训练上,最大的问题就是数据量少,数据质量差。首先,封神榜团队在开源数据的基础上清洗、积累了一部分高质量数据;其次,我们通过翻译api得到了一部分英-中双语数据集,我们发现虽然翻译数据集会有“翻译腔”等问题,但是借助Ziya-v1的双语能力,最终的语言输出是能够缓解这一问题的;最后,团队结合BLIP,Grounded SAM等先进视觉技术,抽取图像描述的粗粒度信息和图像中物体、方位等细粒度信息,转化为语言描述形式,构造了一部分高质量数据。最终,Ziya-Visual构造了约2千万的优质数据进行训练。和Mini-GPT4、LLaVA一样,Ziya-Visual-v1主要是一个以数据为中心的工作,因此数据的数量和质量非常重要。 In the training of Chinese visual quiz model, the biggest problem is the small amount of data and poor data quality. Firstly, the team cleaned and accumulated some high-quality data based on open source data; secondly, we obtained a part of the English-Chinese bilingual dataset through translation api, and we found that although the translated dataset would have problems such as "translation accent", the final language output was able to alleviate this problem with Ziya-v1's bilingual capability. Finally, the team combined BLIP, Grounded SAM and other advanced vision technologies to extract coarse-grained information from image descriptions and fine-grained information such as objects and orientation in images, and transform them into linguistic descriptions to construct a portion of high-quality data. Ultimately, Ziya-Visual constructed approximately 20 million pieces of high quality data for training. Like Mini-GPT4 and LLaVA, Ziya-Visual-v1 is primarily a data-centric exercise, so the quantity and quality of data is very important. #### 模型结构 Model Architecture 为了更好的结合视觉预训练模型和LLM的能力,和Mini-GPT4和LLaVA工作一样,Ziya-Visual-v1的训练遵循了BLIP2提出的经典网络结构和两阶段训练的范式。而且我们在实验过程中发现,是否训练Vision Encoder的参数对于最终的生成效果影响很小。因此,在整体模型上,视觉处理部分我们继承了BLIP2的ViT + QFormer参数,LLM部分继承了Ziya-v1的权重,这两个部分权重都是冻结不参与训练的。我们主要训练的部分是视觉映射层(Projection Layer)。第一阶段,我们使用图像Caption数据训练映射层,使Vision Encder抽取出来的图像特征能够和LLM中的文本特征空间进行对齐;第二阶段,我们使用图像问答数据集,进一步微调Ziya-Visual的视觉-语言能力。 In order to better combine the capabilities of the vision pre-training model and the LLM, as in the Mini-GPT4 and LLaVA work, the training of Ziya-Visual-v1 followed the classical network structure and the two-stage training paradigm proposed by BLIP2. Moreover, we found during our experiments that whether or not the parameters of the Vision Encoder are trained has very little impact on the final generation results. Therefore, for the overall model, we inherited the ViT + QFormer parameters from BLIP2 for the vision processing part and the Ziya-v1 weights for the LLM part, both of which were frozen from training. Our main training component is the visual mapping layer (Projection Layer). In the first stage, we use the image Caption data to train the mapping layer so that the image features extracted by Vision Encder can be aligned with the text feature space in LLM; in the second stage, we use the image Q & A dataset to further fine-tune the visual-verbal capabilities of Ziya-Visual. ![](assets/Ziya-Visual.drawio.svg) ### 效果评估 Performance 首先是VQA效果上的评价,可以看到Ziya-Visual模型在GQA的中文和英文测试集上大部分指标均高于VisualGLM,而在BLUE-4上分数较低,这表明Ziya-Visual在大多数开放域的多模态问答上生成的答案更为泛化和准确,但在一些发散性的问题上生成答案具有自主性。对于mPLUG-Owl模型,英文采用了 mPLUG-Owl 7B Instruction tuning (LoRA) 版本,中文则采用了多语言的mPLUG-Owl 7B (Multilingual) Instruction tuning (LoRA) 版本。因此在英文测评分数上高于双语版本的Ziya-Visual,另一方面,由于Ziya-Visual采用的LLaMA具备更优秀的多语言理解和生成能力,并且在Ziya-Visual二阶段训练时也通过翻译工具引入了多语言多模态训练语料,因此在中文数据的测评结果上更有优势。 Firstly, the evaluation on the VQA effectiveness shows that the Ziya-Visual model outperforms VisualGLM on most of the metrics on both the Chinese and English test sets of GQA, while scoring lower on BLUE-4, indicating that Ziya-Visual generates more generalized and accurate answers on most open domain multimodal questions and answers, but generates some discrete questions on answers have autonomy. For the mPLUG-Owl model, the mPLUG-Owl 7B Instruction tuning (LoRA) version was used for English and the multilingual mPLUG-Owl 7B (Multilingual) Instruction tuning (LoRA) version was used for Chinese. On the other hand, Ziya-Visual's LLaMA has better multilingual comprehension and generation capabilities, and the multilingual multimodal training corpus was introduced in the second phase of Ziya-Visual training through a translation tool, so it has an advantage in the Chinese data. ![](assets/gqa.png) 其次我们使用LLaVA的做法利用GPT-4打分评价,该方法利用coco数据集中的caption和物体检测框信息输入给GPT-4;然后将Ziya-Visual和VisualGLM的图像问答的回答再输入到GPT-4,要求GPT-4从回答的有用性、相关性、准确性、细节程度进行评分(1-10分);LLaVA中将对话任务划分为conv(简单对话),detail(细节对话)和complex(复杂推理),all是三种对话任务的综合平均分。最终评价结果如下,可以看到在简单对话和细节对话中,Ziya-Viusual优于VisualGLM,在复杂推理中略输于VisualGLM,最终总体平均结果优于VisualGLM。在对比mPLUG-Owl中我们得到的结论是类似的,Ziya-Visual总体平均结果优于mPLUG-Owl。 Secondly, we used the LLaVA approach to score the evaluation using the GPT-4, which uses the caption and object detection box information from the coco dataset to input to the GPT-4; the responses to the image quiz from Ziya-Visual and VisualGLM are then input to the GPT-4, which is asked to score the responses in terms of usefulness, relevance, accuracy, and The responses were then fed back into GPT-4, which was asked to rate the responses in terms of usefulness, relevance, accuracy, and level of detail (on a scale of 1-10); LLaVA divided the dialogue tasks into conv (simple dialogue), detail (detailed dialogue) and complex (complex reasoning), and all was the combined average score of the three dialogue tasks. The final evaluation results are as follows, and it can be seen that Ziya-Viusual outperforms VisualGLM in simple and detail dialogues, slightly loses out to VisualGLM in complex reasoning, and finally outperforms VisualGLM in overall average results. In comparing mPLUG-Owl we reach a similar conclusion, with Ziya-Viusual outperforming mPLUG-Owl on average overall. ![](assets/visualglm.png) ![](assets/mplug.png) ## 使用 Usage 首先加载Ziya-Visual模型:需要注意的是Visual-Ziya的模型仓库只包含视觉模型部分的参数,Ziya LLM部分的参数通过[Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1)获得。得到这两部分的模型参数后,我们加载模型: First load the Ziya-Visual model: it should be noted that the model repository of Visual-Ziya contains only the parameters of the visual model part, the parameters of the Ziya LLM part are obtained through [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1). Once we have the parameters for both parts of the model, we load the model: ```python from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor from modeling_ziya_blip2 import ZiyaBlip2ForCausalLM from PIL import Image # model path of IDEA-CCNL/Ziya-LLaMA-13B-v1 LM_MODEL_PATH="local path of model Ziya-LLaMA-13B-v1" lm_model = LlamaForCausalLM.from_pretrained(LM_MODEL_PATH) tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH) # visual model OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711] # demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1" model = ZiyaBlip2ForCausalLM.from_pretrained(".", language_model=lm_model) image_size = model.config.vision_config.image_size image_processor = BlipImageProcessor( size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, ) model.cuda() # if you use on cpu, comment this line ``` 模型加载完毕后,我们就可以愉快地使用Ziya-Visual模型了: Once the model has been loaded, we can happily use the Ziya-Visual model: ```python generate_config = { "max_new_tokens": 128, "top_p": 0.1, "temperature": 0.7 } output = model.chat( tokenizer=tokenizer, pixel_values=image_processor(Image.open("wzry.jpg"), return_tensors="pt").pixel_values.to(model.device), query="这是什么游戏", previous_querys=[], previous_outputs=[], **generate_config, ) print(output) # 这是一款名为《王者荣耀》的多人在线竞技游戏。在游戏中,玩家扮演不同的角色,并与其他玩家进行战斗。游戏中的人物和环境都是虚拟的,但它们看起来非常逼真。玩家需要使用各种技能和策略来击败对手,并获得胜利。 ``` ## 引用 Citation 如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2210.08590): If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2210.08590): ```text @article{fengshenbang, author = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen}, title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, journal = {CoRR}, volume = {abs/2209.02970}, year = {2022} } ``` You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): 欢迎引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): ```text @misc{Fengshenbang-LM, title={Fengshenbang-LM}, author={IDEA-CCNL}, year={2021}, howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, } ```
null
Non_BioNLP
# Ziya-BLIP2-14B-Visual-v1 - Main Page:[Fengshenbang](https://fengshenbang-lm.com/) - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM) # 姜子牙系列模型 - [Ziya-BLIP2-14B-Visual-v1](https://huggingface.co/IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1) - [Ziya-LLaMA-13B-v1.1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1.1) - [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1) - [Ziya-LLaMA-7B-Reward](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-7B-Reward) - [Ziya-LLaMA-13B-Pretrain-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1) ## 简介 Brief Introduction Ziya-Visual多模态大模型基于[姜子牙通用大模型V1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1)训练,具有视觉问答和对话能力。今年3月份OpenAI发布具有识图能力的多模态大模型GPT-4,遗憾的是,时至今日绝大部分用户也都还没有拿到GPT-4输入图片的权限,Ziya-Visual参考了Mini-GPT4、LLaVA等优秀的开源实现,补齐了Ziya的识图能力,使中文用户群体可以体验到结合视觉和语言两大模态的大模型的卓越能力。 The Ziya-Visual multimodal Big Model is based on the [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1) training and has visual question and answer and dialogue capabilities. In March this year, OpenAI released GPT-4, a multimodal big model with image recognition capabilities. Unfortunately, to date, the vast majority of users have not yet been given access to GPT-4 for image input, so Ziya-Visual refers to Mini-GPT4, LLaVA and other excellent open source implementations to complement Ziya's image recognition capabilities, so that the Chinese user community can experience the superior capabilities of a large model combining two modalities: visual and language. ## 软件依赖 ``` pip install torch==1.12.1 tokenizers==0.13.3 git+https://github.com/huggingface/transformers ``` ## 模型分类 Model Taxonomy | 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | | :----: | :----: | :----: | :----: | :----: | :----: | | 多模态 Multi-Modal | 通用 General | 姜子牙-多模态 Ziya-Visual | BLIP2 LLaMA | 14B | English&Chinese | ## 模型信息 Model Information ### 效果展示 Showcase 这个例子展示了模型的识图能力、知识能力和创作能力。首先第一个问题中,模型识别出了图片中是电影《泰坦尼克号》的场景,并给出电影导演、发布时间、奖项成就等信息;第二个问题,模型根据用户的需求创作了一首现代爱情诗。 This example demonstrates the model's ability to read pictures, its knowledge and its ability to compose. Firstly in the first problem, the model identifies the picture as a scene from the movie Titanic and gives information about the movie director, release date and award achievements; in the second problem, the model creates a modern love poem based on the user's needs. ![](assets/taitanic.png) 这个例子展示了Ziya-Visual传统中国文化的识别和理解能力,模型识别出了中国画中的信息,在得到提示《清明上河图》之后给出了也给出画家张择端和北宋的历史背景。 This example demonstrates Ziya-Visual's ability to recognise and understand traditional Chinese culture. The model identifies information in Chinese paintings, and after getting the hint 'Qingming Shanghe Tu' gives also gives the historical context of the painter Zhang Zeduan and the Northern Song Dynasty. ![](assets/song_dynasty.png) 如果输入多张图片进行问答呢?Ziya-Visual也是胜任的,在这个例子中,Ziya-Visual展现了强大的多图和多轮交互能力,根据用户给的三张图片,叙述了一个女士在城市夜景中邂逅一对母子猫咪,并与之交谈、分别的小故事。 What if multiple images are entered for a quiz? Ziya-Visual is also up to the task. In this example, Ziya-Visual demonstrates the power of multiple images and multiple rounds of interaction, narrating a short story of a lady who encounters a mother and son cat in a city night scene, talks to them and separates them, based on three images given by the user. ![](assets/long_story.png) ### 训练 Training #### 数据 Train data 在中文视觉问答模型训练上,最大的问题就是数据量少,数据质量差。首先,封神榜团队在开源数据的基础上清洗、积累了一部分高质量数据;其次,我们通过翻译api得到了一部分英-中双语数据集,我们发现虽然翻译数据集会有“翻译腔”等问题,但是借助Ziya-v1的双语能力,最终的语言输出是能够缓解这一问题的;最后,团队结合BLIP,Grounded SAM等先进视觉技术,抽取图像描述的粗粒度信息和图像中物体、方位等细粒度信息,转化为语言描述形式,构造了一部分高质量数据。最终,Ziya-Visual构造了约2千万的优质数据进行训练。和Mini-GPT4、LLaVA一样,Ziya-Visual-v1主要是一个以数据为中心的工作,因此数据的数量和质量非常重要。 In the training of Chinese visual quiz model, the biggest problem is the small amount of data and poor data quality. Firstly, the team cleaned and accumulated some high-quality data based on open source data; secondly, we obtained a part of the English-Chinese bilingual dataset through translation api, and we found that although the translated dataset would have problems such as "translation accent", the final language output was able to alleviate this problem with Ziya-v1's bilingual capability. Finally, the team combined BLIP, Grounded SAM and other advanced vision technologies to extract coarse-grained information from image descriptions and fine-grained information such as objects and orientation in images, and transform them into linguistic descriptions to construct a portion of high-quality data. Ultimately, Ziya-Visual constructed approximately 20 million pieces of high quality data for training. Like Mini-GPT4 and LLaVA, Ziya-Visual-v1 is primarily a data-centric exercise, so the quantity and quality of data is very important. #### 模型结构 Model Architecture 为了更好的结合视觉预训练模型和LLM的能力,和Mini-GPT4和LLaVA工作一样,Ziya-Visual-v1的训练遵循了BLIP2提出的经典网络结构和两阶段训练的范式。而且我们在实验过程中发现,是否训练Vision Encoder的参数对于最终的生成效果影响很小。因此,在整体模型上,视觉处理部分我们继承了BLIP2的ViT + QFormer参数,LLM部分继承了Ziya-v1的权重,这两个部分权重都是冻结不参与训练的。我们主要训练的部分是视觉映射层(Projection Layer)。第一阶段,我们使用图像Caption数据训练映射层,使Vision Encder抽取出来的图像特征能够和LLM中的文本特征空间进行对齐;第二阶段,我们使用图像问答数据集,进一步微调Ziya-Visual的视觉-语言能力。 In order to better combine the capabilities of the vision pre-training model and the LLM, as in the Mini-GPT4 and LLaVA work, the training of Ziya-Visual-v1 followed the classical network structure and the two-stage training paradigm proposed by BLIP2. Moreover, we found during our experiments that whether or not the parameters of the Vision Encoder are trained has very little impact on the final generation results. Therefore, for the overall model, we inherited the ViT + QFormer parameters from BLIP2 for the vision processing part and the Ziya-v1 weights for the LLM part, both of which were frozen from training. Our main training component is the visual mapping layer (Projection Layer). In the first stage, we use the image Caption data to train the mapping layer so that the image features extracted by Vision Encder can be aligned with the text feature space in LLM; in the second stage, we use the image Q & A dataset to further fine-tune the visual-verbal capabilities of Ziya-Visual. ![](assets/Ziya-Visual.drawio.svg) ### 效果评估 Performance 首先是VQA效果上的评价,可以看到Ziya-Visual模型在GQA的中文和英文测试集上大部分指标均高于VisualGLM,而在BLUE-4上分数较低,这表明Ziya-Visual在大多数开放域的多模态问答上生成的答案更为泛化和准确,但在一些发散性的问题上生成答案具有自主性。对于mPLUG-Owl模型,英文采用了 mPLUG-Owl 7B Instruction tuning (LoRA) 版本,中文则采用了多语言的mPLUG-Owl 7B (Multilingual) Instruction tuning (LoRA) 版本。因此在英文测评分数上高于双语版本的Ziya-Visual,另一方面,由于Ziya-Visual采用的LLaMA具备更优秀的多语言理解和生成能力,并且在Ziya-Visual二阶段训练时也通过翻译工具引入了多语言多模态训练语料,因此在中文数据的测评结果上更有优势。 Firstly, the evaluation on the VQA effectiveness shows that the Ziya-Visual model outperforms VisualGLM on most of the metrics on both the Chinese and English test sets of GQA, while scoring lower on BLUE-4, indicating that Ziya-Visual generates more generalized and accurate answers on most open domain multimodal questions and answers, but generates some discrete questions on answers have autonomy. For the mPLUG-Owl model, the mPLUG-Owl 7B Instruction tuning (LoRA) version was used for English and the multilingual mPLUG-Owl 7B (Multilingual) Instruction tuning (LoRA) version was used for Chinese. On the other hand, Ziya-Visual's LLaMA has better multilingual comprehension and generation capabilities, and the multilingual multimodal training corpus was introduced in the second phase of Ziya-Visual training through a translation tool, so it has an advantage in the Chinese data. ![](assets/gqa.png) 其次我们使用LLaVA的做法利用GPT-4打分评价,该方法利用coco数据集中的caption和物体检测框信息输入给GPT-4;然后将Ziya-Visual和VisualGLM的图像问答的回答再输入到GPT-4,要求GPT-4从回答的有用性、相关性、准确性、细节程度进行评分(1-10分);LLaVA中将对话任务划分为conv(简单对话),detail(细节对话)和complex(复杂推理),all是三种对话任务的综合平均分。最终评价结果如下,可以看到在简单对话和细节对话中,Ziya-Viusual优于VisualGLM,在复杂推理中略输于VisualGLM,最终总体平均结果优于VisualGLM。在对比mPLUG-Owl中我们得到的结论是类似的,Ziya-Visual总体平均结果优于mPLUG-Owl。 Secondly, we used the LLaVA approach to score the evaluation using the GPT-4, which uses the caption and object detection box information from the coco dataset to input to the GPT-4; the responses to the image quiz from Ziya-Visual and VisualGLM are then input to the GPT-4, which is asked to score the responses in terms of usefulness, relevance, accuracy, and The responses were then fed back into GPT-4, which was asked to rate the responses in terms of usefulness, relevance, accuracy, and level of detail (on a scale of 1-10); LLaVA divided the dialogue tasks into conv (simple dialogue), detail (detailed dialogue) and complex (complex reasoning), and all was the combined average score of the three dialogue tasks. The final evaluation results are as follows, and it can be seen that Ziya-Viusual outperforms VisualGLM in simple and detail dialogues, slightly loses out to VisualGLM in complex reasoning, and finally outperforms VisualGLM in overall average results. In comparing mPLUG-Owl we reach a similar conclusion, with Ziya-Viusual outperforming mPLUG-Owl on average overall. ![](assets/visualglm.png) ![](assets/mplug.png) ## 使用 Usage 首先加载Ziya-Visual模型:需要注意的是Visual-Ziya的模型仓库只包含视觉模型部分的参数,Ziya LLM部分的参数通过[Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1)获得。得到这两部分的模型参数后,我们加载模型: First load the Ziya-Visual model: it should be noted that the model repository of Visual-Ziya contains only the parameters of the visual model part, the parameters of the Ziya LLM part are obtained through [Ziya-LLaMA-13B-v1](https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1). Once we have the parameters for both parts of the model, we load the model: ```python from transformers import LlamaForCausalLM, LlamaTokenizer, BlipImageProcessor from modeling_ziya_blip2 import ZiyaBlip2ForCausalLM from PIL import Image # model path of IDEA-CCNL/Ziya-LLaMA-13B-v1 LM_MODEL_PATH="local path of model Ziya-LLaMA-13B-v1" lm_model = LlamaForCausalLM.from_pretrained(LM_MODEL_PATH) tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH) # visual model OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711] # demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1" model = ZiyaBlip2ForCausalLM.from_pretrained(".", language_model=lm_model) image_size = model.config.vision_config.image_size image_processor = BlipImageProcessor( size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, ) model.cuda() # if you use on cpu, comment this line ``` 模型加载完毕后,我们就可以愉快地使用Ziya-Visual模型了: Once the model has been loaded, we can happily use the Ziya-Visual model: ```python generate_config = { "max_new_tokens": 128, "top_p": 0.1, "temperature": 0.7 } output = model.chat( tokenizer=tokenizer, pixel_values=image_processor(Image.open("wzry.jpg"), return_tensors="pt").pixel_values.to(model.device), query="这是什么游戏", previous_querys=[], previous_outputs=[], **generate_config, ) print(output) # 这是一款名为《王者荣耀》的多人在线竞技游戏。在游戏中,玩家扮演不同的角色,并与其他玩家进行战斗。游戏中的人物和环境都是虚拟的,但它们看起来非常逼真。玩家需要使用各种技能和策略来击败对手,并获得胜利。 ``` ## 引用 Citation 如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2210.08590): If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2210.08590): ```text @article{fengshenbang, author = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen}, title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, journal = {CoRR}, volume = {abs/2209.02970}, year = {2022} } ``` You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): 欢迎引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): ```text @misc{Fengshenbang-LM, title={Fengshenbang-LM}, author={IDEA-CCNL}, year={2021}, howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, } ```
{"language": ["en", "zh"], "license": "gpl-3.0", "pipeline_tag": "visual-question-answering", "tags": ["image-to-text", "visual-question-answering", "ziya", "fengshenbang"], "inference": false}
task
[ "TRANSLATION" ]
41,408
fine-tuned/NFCorpus-256-24-gpt-4o-2024-05-13-988957
fine-tuned
feature-extraction
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "sentence-similarity", "mteb", "en", "dataset:fine-tuned/NFCorpus-256-24-gpt-4o-2024-05-13-988957", "dataset:allenai/c4", "license:apache-2.0", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-05-26T03:04:59Z
2024-05-26T03:05:30+00:00
8
0
--- datasets: - fine-tuned/NFCorpus-256-24-gpt-4o-2024-05-13-988957 - allenai/c4 language: - en license: apache-2.0 pipeline_tag: feature-extraction tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb --- This model is a fine-tuned version of [**BAAI/bge-large-en-v1.5**](https://huggingface.co/BAAI/bge-large-en-v1.5) designed for the following use case: custom ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( 'fine-tuned/NFCorpus-256-24-gpt-4o-2024-05-13-988957', trust_remote_code=True ) embeddings = model.encode([ 'first text to embed', 'second text to embed' ]) print(cos_sim(embeddings[0], embeddings[1])) ```
null
Non_BioNLP
This model is a fine-tuned version of [**BAAI/bge-large-en-v1.5**](https://huggingface.co/BAAI/bge-large-en-v1.5) designed for the following use case: custom ## How to Use This model can be easily integrated into your NLP pipeline for tasks such as text classification, sentiment analysis, entity recognition, and more. Here's a simple example to get you started: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim model = SentenceTransformer( 'fine-tuned/NFCorpus-256-24-gpt-4o-2024-05-13-988957', trust_remote_code=True ) embeddings = model.encode([ 'first text to embed', 'second text to embed' ]) print(cos_sim(embeddings[0], embeddings[1])) ```
{"datasets": ["fine-tuned/NFCorpus-256-24-gpt-4o-2024-05-13-988957", "allenai/c4"], "language": ["en"], "license": "apache-2.0", "pipeline_tag": "feature-extraction", "tags": ["sentence-transformers", "feature-extraction", "sentence-similarity", "mteb"]}
task
[ "TEXT_CLASSIFICATION" ]
41,409
haritzpuerto/spanbert-large-cased_SQuAD
haritzpuerto
question-answering
[ "transformers", "pytorch", "bert", "question-answering", "qa", "en", "dataset:squad", "arxiv:2112.01922", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2022-12-23T15:54:13Z
2022-12-23T16:04:03+00:00
42
0
--- datasets: - squad language: - en license: apache-2.0 metrics: - squad tags: - question-answering - qa --- # Description Trained on the SQuAD v1.1 dataset from the MRQA Shared Task. The public dev set was divided into two: one for dev and one for test. # Dev results: "eval_exact_match": 88.15914715400723, "eval_f1": 93.91715796563734, "eval_samples": 5291 # Test results: "test_exact_match": 86.52455272173582, "test_f1": 92.92134442432088 "predict_samples": 5294 More info in the paper: **MetaQA: Combining Expert Agents for Multi-Skill Question Answering** https://arxiv.org/abs/2112.01922
null
Non_BioNLP
# Description Trained on the SQuAD v1.1 dataset from the MRQA Shared Task. The public dev set was divided into two: one for dev and one for test. # Dev results: "eval_exact_match": 88.15914715400723, "eval_f1": 93.91715796563734, "eval_samples": 5291 # Test results: "test_exact_match": 86.52455272173582, "test_f1": 92.92134442432088 "predict_samples": 5294 More info in the paper: **MetaQA: Combining Expert Agents for Multi-Skill Question Answering** https://arxiv.org/abs/2112.01922
{"datasets": ["squad"], "language": ["en"], "license": "apache-2.0", "metrics": ["squad"], "tags": ["question-answering", "qa"]}
task
[ "QUESTION_ANSWERING" ]
41,410
arunachaleswara369/Llama-3.2-1B-Mental-Health-Sentiment
arunachaleswara369
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "facebook", "meta", "pytorch", "llama-3", "conversational", "en", "de", "fr", "it", "pt", "hi", "es", "th", "arxiv:2204.05149", "arxiv:2405.16406", "license:llama3.2", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2025-02-19T08:21:29Z
2025-02-19T19:20:56+00:00
25
0
--- language: - en - de - fr - it - pt - hi - es - th library_name: transformers license: llama3.2 pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-3 extra_gated_prompt: "### LLAMA 3.2 COMMUNITY LICENSE AGREEMENT\n\nLlama 3.2 Version\ \ Release Date: September 25, 2024\n\n“Agreement” means the terms and conditions\ \ for use, reproduction, distribution and modification of the Llama Materials set\ \ forth herein.\n\n“Documentation” means the specifications, manuals and documentation\ \ accompanying Llama 3.2 distributed by Meta at https://llama.meta.com/doc/overview.\n\ \n“Licensee” or “you” means you, or your employer or any other person or entity\ \ (if you are entering into this Agreement on such person or entity’s behalf),\ \ of the age required under applicable laws, rules or regulations to provide legal\ \ consent and that has legal authority to bind your employer or such other person\ \ or entity if you are entering in this Agreement on their behalf.\n\n“Llama 3.2”\ \ means the foundational large language models and software and algorithms, including\ \ machine-learning model code, trained model weights, inference-enabling code, training-enabling\ \ code, fine-tuning enabling code and other elements of the foregoing distributed\ \ by Meta at https://www.llama.com/llama-downloads.\n\n“Llama Materials” means,\ \ collectively, Meta’s proprietary Llama 3.2 and Documentation (and any portion\ \ thereof) made available under this Agreement.\n\n“Meta” or “we” means Meta Platforms\ \ Ireland Limited (if you are located in or, if you are an entity, your principal\ \ place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if\ \ you are located outside of the EEA or Switzerland). \n\nBy clicking “I Accept”\ \ below or by using or distributing any portion or element of the Llama Materials,\ \ you agree to be bound by this Agreement.\n\n1. License Rights and Redistribution.\n\ a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable\ \ and royalty-free limited license under Meta’s intellectual property or other rights\ \ owned by Meta embodied in the Llama Materials to use, reproduce, distribute,\ \ copy, create derivative works of, and make modifications to the Llama Materials.\ \ \nb. Redistribution and Use. \ni. If you distribute or make available the Llama\ \ Materials (or any derivative works thereof), or a product or service (including\ \ another AI model) that contains any of them, you shall (A) provide a copy of this\ \ Agreement with any such Llama Materials; and (B) prominently display “Built with\ \ Llama” on a related website, user interface, blogpost, about page, or product\ \ documentation. If you use the Llama Materials or any outputs or results of the\ \ Llama Materials to create, train, fine tune, or otherwise improve an AI model,\ \ which is distributed or made available, you shall also include “Llama” at the\ \ beginning of any such AI model name.\nii. If you receive Llama Materials, or any\ \ derivative works thereof, from a Licensee as part of an integrated end user product,\ \ then Section 2 of this Agreement will not apply to you. \niii. You must retain\ \ in all copies of the Llama Materials that you distribute the following attribution\ \ notice within a “Notice” text file distributed as a part of such copies: “Llama\ \ 3.2 is licensed under the Llama 3.2 Community License, Copyright © Meta Platforms,\ \ Inc. All Rights Reserved.”\niv. Your use of the Llama Materials must comply with\ \ applicable laws and regulations (including trade compliance laws and regulations)\ \ and adhere to the Acceptable Use Policy for the Llama Materials (available at\ \ https://www.llama.com/llama3_2/use-policy), which is hereby incorporated by reference\ \ into this Agreement.\n \n2. Additional Commercial Terms. If, on the Llama 3.2\ \ version release date, the monthly active users of the products or services made\ \ available by or for Licensee, or Licensee’s affiliates, is greater than 700 million\ \ monthly active users in the preceding calendar month, you must request a license\ \ from Meta, which Meta may grant to you in its sole discretion, and you are not\ \ authorized to exercise any of the rights under this Agreement unless or until\ \ Meta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS\ \ REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM\ \ ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS\ \ ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION,\ \ ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR\ \ PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING\ \ OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR\ \ USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability.\ \ IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY,\ \ WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING\ \ OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL,\ \ INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE\ \ BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\ a. No trademark licenses are granted under this Agreement, and in connection with\ \ the Llama Materials, neither Meta nor Licensee may use any name or mark owned\ \ by or associated with the other or any of its affiliates, except as required\ \ for reasonable and customary use in describing and redistributing the Llama Materials\ \ or as set forth in this Section 5(a). Meta hereby grants you a license to use\ \ “Llama” (the “Mark”) solely as required to comply with the last sentence of Section\ \ 1.b.i. You will comply with Meta’s brand guidelines (currently accessible at\ \ https://about.meta.com/brand/resources/meta/company-brand/). All goodwill arising\ \ out of your use of the Mark will inure to the benefit of Meta.\nb. Subject to\ \ Meta’s ownership of Llama Materials and derivatives made by or for Meta, with\ \ respect to any derivative works and modifications of the Llama Materials that\ \ are made by you, as between you and Meta, you are and will be the owner of such\ \ derivative works and modifications.\nc. If you institute litigation or other proceedings\ \ against Meta or any entity (including a cross-claim or counterclaim in a lawsuit)\ \ alleging that the Llama Materials or Llama 3.2 outputs or results, or any portion\ \ of any of the foregoing, constitutes infringement of intellectual property or\ \ other rights owned or licensable by you, then any licenses granted to you under\ \ this Agreement shall terminate as of the date such litigation or claim is filed\ \ or instituted. You will indemnify and hold harmless Meta from and against any\ \ claim by any third party arising out of or related to your use or distribution\ \ of the Llama Materials.\n6. Term and Termination. The term of this Agreement will\ \ commence upon your acceptance of this Agreement or access to the Llama Materials\ \ and will continue in full force and effect until terminated in accordance with\ \ the terms and conditions herein. Meta may terminate this Agreement if you are\ \ in breach of any term or condition of this Agreement. Upon termination of this\ \ Agreement, you shall delete and cease use of the Llama Materials. Sections 3,\ \ 4 and 7 shall survive the termination of this Agreement. \n7. Governing Law and\ \ Jurisdiction. This Agreement will be governed and construed under the laws of\ \ the State of California without regard to choice of law principles, and the UN\ \ Convention on Contracts for the International Sale of Goods does not apply to\ \ this Agreement. The courts of California shall have exclusive jurisdiction of\ \ any dispute arising out of this Agreement. \n### Llama 3.2 Acceptable Use Policy\n\ Meta is committed to promoting safe and fair use of its tools and features, including\ \ Llama 3.2. If you access or use Llama 3.2, you agree to this Acceptable Use Policy\ \ (“**Policy**”). The most recent copy of this policy can be found at [https://www.llama.com/llama3_2/use-policy](https://www.llama.com/llama3_2/use-policy).\n\ #### Prohibited Uses\nWe want everyone to use Llama 3.2 safely and responsibly.\ \ You agree you will not use, or allow others to use, Llama 3.2 to:\n1. Violate\ \ the law or others’ rights, including to:\n 1. Engage in, promote, generate,\ \ contribute to, encourage, plan, incite, or further illegal or unlawful activity\ \ or content, such as:\n 1. Violence or terrorism\n 2. Exploitation\ \ or harm to children, including the solicitation, creation, acquisition, or dissemination\ \ of child exploitative content or failure to report Child Sexual Abuse Material\n\ \ 3. Human trafficking, exploitation, and sexual violence\n 4. The\ \ illegal distribution of information or materials to minors, including obscene\ \ materials, or failure to employ legally required age-gating in connection with\ \ such information or materials.\n 5. Sexual solicitation\n 6. Any\ \ other criminal activity\n 1. Engage in, promote, incite, or facilitate the\ \ harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\ \ 2. Engage in, promote, incite, or facilitate discrimination or other unlawful\ \ or harmful conduct in the provision of employment, employment benefits, credit,\ \ housing, other economic benefits, or other essential goods and services\n 3.\ \ Engage in the unauthorized or unlicensed practice of any profession including,\ \ but not limited to, financial, legal, medical/health, or related professional\ \ practices\n 4. Collect, process, disclose, generate, or infer private or sensitive\ \ information about individuals, including information about individuals’ identity,\ \ health, or demographic information, unless you have obtained the right to do so\ \ in accordance with applicable law\n 5. Engage in or facilitate any action or\ \ generate any content that infringes, misappropriates, or otherwise violates any\ \ third-party rights, including the outputs or results of any products or services\ \ using the Llama Materials\n 6. Create, generate, or facilitate the creation\ \ of malicious code, malware, computer viruses or do anything else that could disable,\ \ overburden, interfere with or impair the proper working, integrity, operation\ \ or appearance of a website or computer system\n 7. Engage in any action, or\ \ facilitate any action, to intentionally circumvent or remove usage restrictions\ \ or other safety measures, or to enable functionality disabled by Meta \n2. Engage\ \ in, promote, incite, facilitate, or assist in the planning or development of activities\ \ that present a risk of death or bodily harm to individuals, including use of Llama\ \ 3.2 related to the following:\n 8. Military, warfare, nuclear industries or\ \ applications, espionage, use for materials or activities that are subject to the\ \ International Traffic Arms Regulations (ITAR) maintained by the United States\ \ Department of State or to the U.S. Biological Weapons Anti-Terrorism Act of 1989\ \ or the Chemical Weapons Convention Implementation Act of 1997\n 9. Guns and\ \ illegal weapons (including weapon development)\n 10. Illegal drugs and regulated/controlled\ \ substances\n 11. Operation of critical infrastructure, transportation technologies,\ \ or heavy machinery\n 12. Self-harm or harm to others, including suicide, cutting,\ \ and eating disorders\n 13. Any content intended to incite or promote violence,\ \ abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive\ \ or mislead others, including use of Llama 3.2 related to the following:\n 14.\ \ Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\ \ 15. Generating, promoting, or furthering defamatory content, including the\ \ creation of defamatory statements, images, or other content\n 16. Generating,\ \ promoting, or further distributing spam\n 17. Impersonating another individual\ \ without consent, authorization, or legal right\n 18. Representing that the\ \ use of Llama 3.2 or outputs are human-generated\n 19. Generating or facilitating\ \ false online engagement, including fake reviews and other means of fake online\ \ engagement \n4. Fail to appropriately disclose to end users any known dangers\ \ of your AI system 5. Interact with third party tools, models, or software designed\ \ to generate unlawful content or engage in unlawful or harmful conduct and/or represent\ \ that the outputs of such tools, models, or software are associated with Meta or\ \ Llama 3.2\n\nWith respect to any multimodal models included in Llama 3.2, the\ \ rights granted under Section 1(a) of the Llama 3.2 Community License Agreement\ \ are not being granted to you if you are an individual domiciled in, or a company\ \ with a principal place of business in, the European Union. This restriction does\ \ not apply to end users of a product or service that incorporates any such multimodal\ \ models.\n\nPlease report any violation of this Policy, software “bug,” or other\ \ problems that could lead to a violation of this Policy through one of the following\ \ means:\n\n* Reporting issues with the model: [https://github.com/meta-llama/llama-models/issues](https://l.workplace.com/l.php?u=https%3A%2F%2Fgithub.com%2Fmeta-llama%2Fllama-models%2Fissues&h=AT0qV8W9BFT6NwihiOHRuKYQM_UnkzN_NmHMy91OT55gkLpgi4kQupHUl0ssR4dQsIQ8n3tfd0vtkobvsEvt1l4Ic6GXI2EeuHV8N08OG2WnbAmm0FL4ObkazC6G_256vN0lN9DsykCvCqGZ)\n\ * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)\n\ * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)\n\ * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama\ \ 3.2: [email protected]" extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text Job title: type: select options: - Student - Research Graduate - AI researcher - AI developer/engineer - Reporter - Other geo: ip_location ? By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy : checkbox extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit --- ## Model Information The Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks. **Model Developer:** Meta **Model Architecture:** Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. | | Training Data | Params | Input modalities | Output modalities | Context Length | GQA | Shared Embeddings | Token count | Knowledge cutoff | | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | | Llama 3.2 (text only) | A new mix of publicly available online data. | 1B (1.23B) | Multilingual Text | Multilingual Text and code | 128k | Yes | Yes | Up to 9T tokens | December 2023 | | | | 3B (3.21B) | Multilingual Text | Multilingual Text and code | | | | | | | Llama 3.2 Quantized (text only) | A new mix of publicly available online data. | 1B (1.23B) | Multilingual Text | Multilingual Text and code | 8k | Yes | Yes | Up to 9T tokens | December 2023 | | | | 3B (3.21B) | Multilingual Text | Multilingual Text and code | | | | | | **Supported Languages:** English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai are officially supported. Llama 3.2 has been trained on a broader collection of languages than these 8 supported languages. Developers may fine-tune Llama 3.2 models for languages beyond these supported languages, provided they comply with the Llama 3.2 Community License and the Acceptable Use Policy. Developers are always expected to ensure that their deployments, including those that involve additional languages, are completed safely and responsibly. **Llama 3.2 Model Family:** Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability. **Model Release Date:** Sept 25, 2024 **Status:** This is a static model trained on an offline dataset. Future versions may be released that improve model capabilities and safety. **License:** Use of Llama 3.2 is governed by the [Llama 3.2 Community License](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE) (a custom, commercial license agreement). **Feedback:** Instructions on how to provide feedback or comments on the model can be found in the Llama Models [README](https://github.com/meta-llama/llama-models/blob/main/README.md). For more technical information about generation parameters and recipes for how to use Llama 3.2 in applications, please go [here](https://github.com/meta-llama/llama-recipes). ## Intended Use **Intended Use Cases:** Llama 3.2 is intended for commercial and research use in multiple languages. Instruction tuned text only models are intended for assistant-like chat and agentic applications like knowledge retrieval and summarization, mobile AI powered writing assistants and query and prompt rewriting. Pretrained models can be adapted for a variety of additional natural language generation tasks. Similarly, quantized models can be adapted for a variety of on-device use-cases with limited compute resources. **Out of Scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3.2 Community License. Use in languages beyond those explicitly referenced as supported in this model card. ## How to use This repository contains two versions of Llama-3.2-1B-Instruct, for use with transformers and with the original `llama` codebase. ### Use with transformers Starting with `transformers >= 4.43.0` onward, you can run conversational inference using the Transformers `pipeline` abstraction or by leveraging the Auto classes with the `generate()` function. Make sure to update your transformers installation via `pip install --upgrade transformers`. ```python import torch from transformers import pipeline model_id = "meta-llama/Llama-3.2-1B-Instruct" pipe = pipeline( "text-generation", model=model_id, torch_dtype=torch.bfloat16, device_map="auto", ) messages = [ {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, {"role": "user", "content": "Who are you?"}, ] outputs = pipe( messages, max_new_tokens=256, ) print(outputs[0]["generated_text"][-1]) ``` Note: You can also find detailed recipes on how to use the model locally, with `torch.compile()`, assisted generations, quantised and more at [`huggingface-llama-recipes`](https://github.com/huggingface/huggingface-llama-recipes) ### Use with `llama` Please, follow the instructions in the [repository](https://github.com/meta-llama/llama) To download Original checkpoints, see the example command below leveraging `huggingface-cli`: ``` huggingface-cli download meta-llama/Llama-3.2-1B-Instruct --include "original/*" --local-dir Llama-3.2-1B-Instruct ``` ## Hardware and Software **Training Factors:** We used custom training libraries, Meta's custom built GPU cluster, and production infrastructure for pretraining. Fine-tuning, quantization, annotation, and evaluation were also performed on production infrastructure. **Training Energy Use:** Training utilized a cumulative of **916k** GPU hours of computation on H100-80GB (TDP of 700W) type hardware, per the table below. Training time is the total GPU time required for training each model and power consumption is the peak power capacity per GPU device used, adjusted for power usage efficiency. **Training Greenhouse Gas Emissions:** Estimated total location-based greenhouse gas emissions were **240** tons CO2eq for training. Since 2020, Meta has maintained net zero greenhouse gas emissions in its global operations and matched 100% of its electricity use with renewable energy; therefore, the total market-based greenhouse gas emissions for training were 0 tons CO2eq. | | Training Time (GPU hours) | Logit Generation Time (GPU Hours) | Training Power Consumption (W) | Training Location-Based Greenhouse Gas Emissions (tons CO2eq) | Training Market-Based Greenhouse Gas Emissions (tons CO2eq) | | :---- | :---: | ----- | :---: | :---: | :---: | | Llama 3.2 1B | 370k | \- | 700 | 107 | 0 | | Llama 3.2 3B | 460k | \- | 700 | 133 | 0 | | Llama 3.2 1B SpinQuant | 1.7 | 0 | 700 | *Negligible*\*\* | 0 | | Llama 3.2 3B SpinQuant | 2.4 | 0 | 700 | *Negligible*\*\* | 0 | | Llama 3.2 1B QLora | 1.3k | 0 | 700 | 0.381 | 0 | | Llama 3.2 3B QLora | 1.6k | 0 | 700 | 0.461 | 0 | | Total | 833k | 86k | | 240 | 0 | \*\* The location-based CO2e emissions of Llama 3.2 1B SpinQuant and Llama 3.2 3B SpinQuant are less than 0.001 metric tonnes each. This is due to the minimal training GPU hours that are required. The methodology used to determine training energy use and greenhouse gas emissions can be found [here](https://arxiv.org/pdf/2204.05149). Since Meta is openly releasing these models, the training energy use and greenhouse gas emissions will not be incurred by others. ## Training Data **Overview:** Llama 3.2 was pretrained on up to 9 trillion tokens of data from publicly available sources. For the 1B and 3B Llama 3.2 models, we incorporated logits from the Llama 3.1 8B and 70B models into the pretraining stage of the model development, where outputs (logits) from these larger models were used as token-level targets. Knowledge distillation was used after pruning to recover performance. In post-training we used a similar recipe as Llama 3.1 and produced final chat models by doing several rounds of alignment on top of the pre-trained model. Each round involved Supervised Fine-Tuning (SFT), Rejection Sampling (RS), and Direct Preference Optimization (DPO). **Data Freshness:** The pretraining data has a cutoff of December 2023\. ## Quantization ### Quantization Scheme We designed the current quantization scheme with the [PyTorch’s ExecuTorch](https://github.com/pytorch/executorch) inference framework and Arm CPU backend in mind, taking into account metrics including model quality, prefill/decoding speed, and memory footprint. Our quantization scheme involves three parts: - All linear layers in all transformer blocks are quantized to a 4-bit groupwise scheme (with a group size of 32) for weights and 8-bit per-token dynamic quantization for activations. - The classification layer is quantized to 8-bit per-channel for weight and 8-bit per token dynamic quantization for activation. - Similar to classification layer, an 8-bit per channel quantization is used for embedding layer. ### Quantization-Aware Training and LoRA The quantization-aware training (QAT) with low-rank adaptation (LoRA) models went through only post-training stages, using the same data as the full precision models. To initialize QAT, we utilize BF16 Llama 3.2 model checkpoints obtained after supervised fine-tuning (SFT) and perform an additional full round of SFT training with QAT. We then freeze the backbone of the QAT model and perform another round of SFT with LoRA adaptors applied to all layers within the transformer block. Meanwhile, the LoRA adaptors' weights and activations are maintained in BF16. Because our approach is similar to QLoRA of Dettmers et al., (2023) (i.e., quantization followed by LoRA adapters), we refer this method as QLoRA. Finally, we fine-tune the resulting model (both backbone and LoRA adaptors) using direct preference optimization (DPO). ### SpinQuant [SpinQuant](https://arxiv.org/abs/2405.16406) was applied, together with generative post-training quantization (GPTQ). For the SpinQuant rotation matrix fine-tuning, we optimized for 100 iterations, using 800 samples with sequence-length 2048 from the WikiText 2 dataset. For GPTQ, we used 128 samples from the same dataset with the same sequence-length. ## Benchmarks \- English Text In this section, we report the results for Llama 3.2 models on standard automatic benchmarks. For all these evaluations, we used our internal evaluations library. ### Base Pretrained Models | Category | Benchmark | \# Shots | Metric | Llama 3.2 1B | Llama 3.2 3B | Llama 3.1 8B | | ----- | ----- | :---: | :---: | :---: | :---: | :---: | | General | MMLU | 5 | macro\_avg/acc\_char | 32.2 | 58 | 66.7 | | | AGIEval English | 3-5 | average/acc\_char | 23.3 | 39.2 | 47.8 | | | ARC-Challenge | 25 | acc\_char | 32.8 | 69.1 | 79.7 | | Reading comprehension | SQuAD | 1 | em | 49.2 | 67.7 | 77 | | | QuAC (F1) | 1 | f1 | 37.9 | 42.9 | 44.9 | | | DROP (F1) | 3 | f1 | 28.0 | 45.2 | 59.5 | | Long Context | Needle in Haystack | 0 | em | 96.8 | 1 | 1 | ### Instruction Tuned Models | Capability | | Benchmark | \# Shots | Metric | Llama 3.2 1B bf16 | Llama 3.2 1B Vanilla PTQ\*\* | Llama 3.2 1B Spin Quant | Llama 3.2 1B QLoRA | Llama 3.2 3B bf16 | Llama 3.2 3B Vanilla PTQ\*\* | Llama 3.2 3B Spin Quant | Llama 3.2 3B QLoRA | Llama 3.1 8B | | :---: | ----- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | General | | MMLU | 5 | macro\_avg/acc | 49.3 | 43.3 | 47.3 | 49.0 | 63.4 | 60.5 | 62 | 62.4 | 69.4 | | Re-writing | | Open-rewrite eval | 0 | micro\_avg/rougeL | 41.6 | 39.2 | 40.9 | 41.2 | 40.1 | 40.3 | 40.8 | 40.7 | 40.9 | | Summarization | | TLDR9+ (test) | 1 | rougeL | 16.8 | 14.9 | 16.7 | 16.8 | 19.0 | 19.1 | 19.2 | 19.1 | 17.2 | | Instruction following | | IFEval | 0 | Avg(Prompt/Instruction acc Loose/Strict) | 59.5 | 51.5 | 58.4 | 55.6 | 77.4 | 73.9 | 73.5 | 75.9 | 80.4 | | Math | | GSM8K (CoT) | 8 | em\_maj1@1 | 44.4 | 33.1 | 40.6 | 46.5 | 77.7 | 72.9 | 75.7 | 77.9 | 84.5 | | | | MATH (CoT) | 0 | final\_em | 30.6 | 20.5 | 25.3 | 31.0 | 48.0 | 44.2 | 45.3 | 49.2 | 51.9 | | Reasoning | | ARC-C | 0 | acc | 59.4 | 54.3 | 57 | 60.7 | 78.6 | 75.6 | 77.6 | 77.6 | 83.4 | | | | GPQA | 0 | acc | 27.2 | 25.9 | 26.3 | 25.9 | 32.8 | 32.8 | 31.7 | 33.9 | 32.8 | | | | Hellaswag | 0 | acc | 41.2 | 38.1 | 41.3 | 41.5 | 69.8 | 66.3 | 68 | 66.3 | 78.7 | | Tool Use | | BFCL V2 | 0 | acc | 25.7 | 14.3 | 15.9 | 23.7 | 67.0 | 53.4 | 60.1 | 63.5 | 67.1 | | | | Nexus | 0 | macro\_avg/acc | 13.5 | 5.2 | 9.6 | 12.5 | 34.3 | 32.4 | 31.5 | 30.1 | 38.5 | | Long Context | | InfiniteBench/En.QA | 0 | longbook\_qa/f1 | 20.3 | N/A | N/A | N/A | 19.8 | N/A | N/A | N/A | 27.3 | | | | InfiniteBench/En.MC | 0 | longbook\_choice/acc | 38.0 | N/A | N/A | N/A | 63.3 | N/A | N/A | N/A | 72.2 | | | | NIH/Multi-needle | 0 | recall | 75.0 | N/A | N/A | N/A | 84.7 | N/A | N/A | N/A | 98.8 | | Multilingual | | MGSM (CoT) | 0 | em | 24.5 | 13.7 | 18.2 | 24.4 | 58.2 | 48.9 | 54.3 | 56.8 | 68.9 | \*\*for comparison purposes only. Model not released. ### Multilingual Benchmarks | Category | Benchmark | Language | Llama 3.2 1B | Llama 3.2 1B Vanilla PTQ\*\* | Llama 3.2 1B Spin Quant | Llama 3.2 1B QLoRA | Llama 3.2 3B | Llama 3.2 3B Vanilla PTQ\*\* | Llama 3.2 3B Spin Quant | Llama 3.2 3B QLoRA | Llama 3.1 8B | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | General | MMLU (5-shot, macro_avg/acc) | Portuguese | 39.8 | 34.9 | 38.9 | 40.2 | 54.5 | 50.9 | 53.3 | 53.4 | 62.1 | | | | Spanish | 41.5 | 36.0 | 39.8 | 41.8 | 55.1 | 51.9 | 53.6 | 53.6 | 62.5 | | | | Italian | 39.8 | 34.9 | 38.1 | 40.6 | 53.8 | 49.9 | 52.1 | 51.7 | 61.6 | | | | German | 39.2 | 34.9 | 37.5 | 39.6 | 53.3 | 50.0 | 52.2 | 51.3 | 60.6 | | | | French | 40.5 | 34.8 | 39.2 | 40.8 | 54.6 | 51.2 | 53.3 | 53.3 | 62.3 | | | | Hindi | 33.5 | 30.0 | 32.1 | 34.0 | 43.3 | 40.4 | 42.0 | 42.1 | 50.9 | | | | Thai | 34.7 | 31.2 | 32.4 | 34.9 | 44.5 | 41.3 | 44.0 | 42.2 | 50.3 | \*\*for comparison purposes only. Model not released. ## Inference time In the below table, we compare the performance metrics of different quantization methods (SpinQuant and QAT \+ LoRA) with the BF16 baseline. The evaluation was done using the [ExecuTorch](https://github.com/pytorch/executorch) framework as the inference engine, with the ARM CPU as a backend using Android OnePlus 12 device. | Category | Decode (tokens/sec) | Time-to-first-token (sec) | Prefill (tokens/sec) | Model size (PTE file size in MB) | Memory size (RSS in MB) | | :---- | ----- | ----- | ----- | ----- | ----- | | 1B BF16 (baseline) | 19.2 | 1.0 | 60.3 | 2358 | 3,185 | | 1B SpinQuant | 50.2 (2.6x) | 0.3 (-76.9%) | 260.5 (4.3x) | 1083 (-54.1%) | 1,921 (-39.7%) | | 1B QLoRA | 45.8 (2.4x) | 0.3 (-76.0%) | 252.0 (4.2x) | 1127 (-52.2%) | 2,255 (-29.2%) | | 3B BF16 (baseline) | 7.6 | 3.0 | 21.2 | 6129 | 7,419 | | 3B SpinQuant | 19.7 (2.6x) | 0.7 (-76.4%) | 89.7 (4.2x) | 2435 (-60.3%) | 3,726 (-49.8%) | | 3B QLoRA | 18.5 (2.4x) | 0.7 (-76.1%) | 88.8 (4.2x) | 2529 (-58.7%) | 4,060 (-45.3%) | (\*) The performance measurement is done using an adb binary-based approach. (\*\*) It is measured on an Android OnePlus 12 device. (\*\*\*) Time-to-first-token (TTFT) is measured with prompt length=64 *Footnote:* - *Decode (tokens/second) is for how quickly it keeps generating. Higher is better.* - *Time-to-first-token (TTFT for shorthand) is for how fast it generates the first token for a given prompt. Lower is better.* - *Prefill is the inverse of TTFT (aka 1/TTFT) in tokens/second. Higher is better* - *Model size \- how big is the model, measured by, PTE file, a binary file format for ExecuTorch* - *RSS size \- Memory usage in resident set size (RSS)* ## Responsibility & Safety As part of our Responsible release approach, we followed a three-pronged strategy to managing trust & safety risks: 1. Enable developers to deploy helpful, safe and flexible experiences for their target audience and for the use cases supported by Llama 2. Protect developers against adversarial users aiming to exploit Llama capabilities to potentially cause harm 3. Provide protections for the community to help prevent the misuse of our models ### Responsible Deployment **Approach:** Llama is a foundational technology designed to be used in a variety of use cases. Examples on how Meta’s Llama models have been responsibly deployed can be found in our [Community Stories webpage](https://llama.meta.com/community-stories/). Our approach is to build the most helpful models, enabling the world to benefit from the technology power, by aligning our model safety for generic use cases and addressing a standard set of harms. Developers are then in the driver’s seat to tailor safety for their use cases, defining their own policies and deploying the models with the necessary safeguards in their Llama systems. Llama 3.2 was developed following the best practices outlined in our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/). #### Llama 3.2 Instruct **Objective:** Our main objectives for conducting safety fine-tuning are to provide the research community with a valuable resource for studying the robustness of safety fine-tuning, as well as to offer developers a readily available, safe, and powerful model for various applications to reduce the developer workload to deploy safe AI systems. We implemented the same set of safety mitigations as in Llama 3, and you can learn more about these in the Llama 3 [paper](https://ai.meta.com/research/publications/the-llama-3-herd-of-models/). **Fine-Tuning Data:** We employ a multi-faceted approach to data collection, combining human-generated data from our vendors with synthetic data to mitigate potential safety risks. We’ve developed many large language model (LLM)-based classifiers that enable us to thoughtfully select high-quality prompts and responses, enhancing data quality control. **Refusals and Tone:** Building on the work we started with Llama 3, we put a great emphasis on model refusals to benign prompts as well as refusal tone. We included both borderline and adversarial prompts in our safety data strategy, and modified our safety data responses to follow tone guidelines. #### Llama 3.2 Systems **Safety as a System:** Large language models, including Llama 3.2, **are not designed to be deployed in isolation** but instead should be deployed as part of an overall AI system with additional safety guardrails as required. Developers are expected to deploy system safeguards when building agentic systems. Safeguards are key to achieve the right helpfulness-safety alignment as well as mitigating safety and security risks inherent to the system and any integration of the model or system with external tools. As part of our responsible release approach, we provide the community with [safeguards](https://llama.meta.com/trust-and-safety/) that developers should deploy with Llama models or other LLMs, including Llama Guard, Prompt Guard and Code Shield. All our [reference implementations](https://github.com/meta-llama/llama-agentic-system) demos contain these safeguards by default so developers can benefit from system-level safety out-of-the-box. ### New Capabilities and Use Cases **Technological Advancement:** Llama releases usually introduce new capabilities that require specific considerations in addition to the best practices that generally apply across all Generative AI use cases. For prior release capabilities also supported by Llama 3.2, see [Llama 3.1 Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md), as the same considerations apply here as well. **Constrained Environments:** Llama 3.2 1B and 3B models are expected to be deployed in highly constrained environments, such as mobile devices. LLM Systems using smaller models will have a different alignment profile and safety/helpfulness tradeoff than more complex, larger systems. Developers should ensure the safety of their system meets the requirements of their use case. We recommend using lighter system safeguards for such use cases, like Llama Guard 3-1B or its mobile-optimized version. ### Evaluations **Scaled Evaluations:** We built dedicated, adversarial evaluation datasets and evaluated systems composed of Llama models and Purple Llama safeguards to filter input prompt and output response. It is important to evaluate applications in context, and we recommend building dedicated evaluation dataset for your use case. **Red Teaming:** We conducted recurring red teaming exercises with the goal of discovering risks via adversarial prompting and we used the learnings to improve our benchmarks and safety tuning datasets. We partnered early with subject-matter experts in critical risk areas to understand the nature of these real-world harms and how such models may lead to unintended harm for society. Based on these conversations, we derived a set of adversarial goals for the red team to attempt to achieve, such as extracting harmful information or reprogramming the model to act in a potentially harmful capacity. The red team consisted of experts in cybersecurity, adversarial machine learning, responsible AI, and integrity in addition to multilingual content specialists with background in integrity issues in specific geographic markets. ### Critical Risks In addition to our safety work above, we took extra care on measuring and/or mitigating the following critical risk areas: **1\. CBRNE (Chemical, Biological, Radiological, Nuclear, and Explosive Weapons):** Llama 3.2 1B and 3B models are smaller and less capable derivatives of Llama 3.1. For Llama 3.1 70B and 405B, to assess risks related to proliferation of chemical and biological weapons, we performed uplift testing designed to assess whether use of Llama 3.1 models could meaningfully increase the capabilities of malicious actors to plan or carry out attacks using these types of weapons and have determined that such testing also applies to the smaller 1B and 3B models. **2\. Child Safety:** Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors including the additional languages Llama 3 is trained on. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences. **3\. Cyber Attacks:** For Llama 3.1 405B, our cyber attack uplift study investigated whether LLMs can enhance human capabilities in hacking tasks, both in terms of skill level and speed. Our attack automation study focused on evaluating the capabilities of LLMs when used as autonomous agents in cyber offensive operations, specifically in the context of ransomware attacks. This evaluation was distinct from previous studies that considered LLMs as interactive assistants. The primary objective was to assess whether these models could effectively function as independent agents in executing complex cyber-attacks without human intervention. Because Llama 3.2’s 1B and 3B models are smaller and less capable models than Llama 3.1 405B, we broadly believe that the testing conducted for the 405B model also applies to Llama 3.2 models. ### Community **Industry Partnerships:** Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership on AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama). **Grants:** We also set up the [Llama Impact Grants](https://llama.meta.com/llama-impact-grants/) program to identify and support the most compelling applications of Meta’s Llama model for societal benefit across three categories: education, climate and open innovation. The 20 finalists from the hundreds of applications can be found [here](https://llama.meta.com/llama-impact-grants/#finalists). **Reporting:** Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community. ## Ethical Considerations and Limitations **Values:** The core values of Llama 3.2 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3.2 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress. **Testing:** Llama 3.2 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3.2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3.2 models, developers should perform safety testing and tuning tailored to their specific applications of the model. Please refer to available resources including our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide), [Trust and Safety](https://llama.meta.com/trust-and-safety/) solutions, and other [resources](https://llama.meta.com/docs/get-started/) to learn more about responsible development.
null
Non_BioNLP
## Model Information The Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks. **Model Developer:** Meta **Model Architecture:** Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. | | Training Data | Params | Input modalities | Output modalities | Context Length | GQA | Shared Embeddings | Token count | Knowledge cutoff | | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | | Llama 3.2 (text only) | A new mix of publicly available online data. | 1B (1.23B) | Multilingual Text | Multilingual Text and code | 128k | Yes | Yes | Up to 9T tokens | December 2023 | | | | 3B (3.21B) | Multilingual Text | Multilingual Text and code | | | | | | | Llama 3.2 Quantized (text only) | A new mix of publicly available online data. | 1B (1.23B) | Multilingual Text | Multilingual Text and code | 8k | Yes | Yes | Up to 9T tokens | December 2023 | | | | 3B (3.21B) | Multilingual Text | Multilingual Text and code | | | | | | **Supported Languages:** English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai are officially supported. Llama 3.2 has been trained on a broader collection of languages than these 8 supported languages. Developers may fine-tune Llama 3.2 models for languages beyond these supported languages, provided they comply with the Llama 3.2 Community License and the Acceptable Use Policy. Developers are always expected to ensure that their deployments, including those that involve additional languages, are completed safely and responsibly. **Llama 3.2 Model Family:** Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability. **Model Release Date:** Sept 25, 2024 **Status:** This is a static model trained on an offline dataset. Future versions may be released that improve model capabilities and safety. **License:** Use of Llama 3.2 is governed by the [Llama 3.2 Community License](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE) (a custom, commercial license agreement). **Feedback:** Instructions on how to provide feedback or comments on the model can be found in the Llama Models [README](https://github.com/meta-llama/llama-models/blob/main/README.md). For more technical information about generation parameters and recipes for how to use Llama 3.2 in applications, please go [here](https://github.com/meta-llama/llama-recipes). ## Intended Use **Intended Use Cases:** Llama 3.2 is intended for commercial and research use in multiple languages. Instruction tuned text only models are intended for assistant-like chat and agentic applications like knowledge retrieval and summarization, mobile AI powered writing assistants and query and prompt rewriting. Pretrained models can be adapted for a variety of additional natural language generation tasks. Similarly, quantized models can be adapted for a variety of on-device use-cases with limited compute resources. **Out of Scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3.2 Community License. Use in languages beyond those explicitly referenced as supported in this model card. ## How to use This repository contains two versions of Llama-3.2-1B-Instruct, for use with transformers and with the original `llama` codebase. ### Use with transformers Starting with `transformers >= 4.43.0` onward, you can run conversational inference using the Transformers `pipeline` abstraction or by leveraging the Auto classes with the `generate()` function. Make sure to update your transformers installation via `pip install --upgrade transformers`. ```python import torch from transformers import pipeline model_id = "meta-llama/Llama-3.2-1B-Instruct" pipe = pipeline( "text-generation", model=model_id, torch_dtype=torch.bfloat16, device_map="auto", ) messages = [ {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, {"role": "user", "content": "Who are you?"}, ] outputs = pipe( messages, max_new_tokens=256, ) print(outputs[0]["generated_text"][-1]) ``` Note: You can also find detailed recipes on how to use the model locally, with `torch.compile()`, assisted generations, quantised and more at [`huggingface-llama-recipes`](https://github.com/huggingface/huggingface-llama-recipes) ### Use with `llama` Please, follow the instructions in the [repository](https://github.com/meta-llama/llama) To download Original checkpoints, see the example command below leveraging `huggingface-cli`: ``` huggingface-cli download meta-llama/Llama-3.2-1B-Instruct --include "original/*" --local-dir Llama-3.2-1B-Instruct ``` ## Hardware and Software **Training Factors:** We used custom training libraries, Meta's custom built GPU cluster, and production infrastructure for pretraining. Fine-tuning, quantization, annotation, and evaluation were also performed on production infrastructure. **Training Energy Use:** Training utilized a cumulative of **916k** GPU hours of computation on H100-80GB (TDP of 700W) type hardware, per the table below. Training time is the total GPU time required for training each model and power consumption is the peak power capacity per GPU device used, adjusted for power usage efficiency. **Training Greenhouse Gas Emissions:** Estimated total location-based greenhouse gas emissions were **240** tons CO2eq for training. Since 2020, Meta has maintained net zero greenhouse gas emissions in its global operations and matched 100% of its electricity use with renewable energy; therefore, the total market-based greenhouse gas emissions for training were 0 tons CO2eq. | | Training Time (GPU hours) | Logit Generation Time (GPU Hours) | Training Power Consumption (W) | Training Location-Based Greenhouse Gas Emissions (tons CO2eq) | Training Market-Based Greenhouse Gas Emissions (tons CO2eq) | | :---- | :---: | ----- | :---: | :---: | :---: | | Llama 3.2 1B | 370k | \- | 700 | 107 | 0 | | Llama 3.2 3B | 460k | \- | 700 | 133 | 0 | | Llama 3.2 1B SpinQuant | 1.7 | 0 | 700 | *Negligible*\*\* | 0 | | Llama 3.2 3B SpinQuant | 2.4 | 0 | 700 | *Negligible*\*\* | 0 | | Llama 3.2 1B QLora | 1.3k | 0 | 700 | 0.381 | 0 | | Llama 3.2 3B QLora | 1.6k | 0 | 700 | 0.461 | 0 | | Total | 833k | 86k | | 240 | 0 | \*\* The location-based CO2e emissions of Llama 3.2 1B SpinQuant and Llama 3.2 3B SpinQuant are less than 0.001 metric tonnes each. This is due to the minimal training GPU hours that are required. The methodology used to determine training energy use and greenhouse gas emissions can be found [here](https://arxiv.org/pdf/2204.05149). Since Meta is openly releasing these models, the training energy use and greenhouse gas emissions will not be incurred by others. ## Training Data **Overview:** Llama 3.2 was pretrained on up to 9 trillion tokens of data from publicly available sources. For the 1B and 3B Llama 3.2 models, we incorporated logits from the Llama 3.1 8B and 70B models into the pretraining stage of the model development, where outputs (logits) from these larger models were used as token-level targets. Knowledge distillation was used after pruning to recover performance. In post-training we used a similar recipe as Llama 3.1 and produced final chat models by doing several rounds of alignment on top of the pre-trained model. Each round involved Supervised Fine-Tuning (SFT), Rejection Sampling (RS), and Direct Preference Optimization (DPO). **Data Freshness:** The pretraining data has a cutoff of December 2023\. ## Quantization ### Quantization Scheme We designed the current quantization scheme with the [PyTorch’s ExecuTorch](https://github.com/pytorch/executorch) inference framework and Arm CPU backend in mind, taking into account metrics including model quality, prefill/decoding speed, and memory footprint. Our quantization scheme involves three parts: - All linear layers in all transformer blocks are quantized to a 4-bit groupwise scheme (with a group size of 32) for weights and 8-bit per-token dynamic quantization for activations. - The classification layer is quantized to 8-bit per-channel for weight and 8-bit per token dynamic quantization for activation. - Similar to classification layer, an 8-bit per channel quantization is used for embedding layer. ### Quantization-Aware Training and LoRA The quantization-aware training (QAT) with low-rank adaptation (LoRA) models went through only post-training stages, using the same data as the full precision models. To initialize QAT, we utilize BF16 Llama 3.2 model checkpoints obtained after supervised fine-tuning (SFT) and perform an additional full round of SFT training with QAT. We then freeze the backbone of the QAT model and perform another round of SFT with LoRA adaptors applied to all layers within the transformer block. Meanwhile, the LoRA adaptors' weights and activations are maintained in BF16. Because our approach is similar to QLoRA of Dettmers et al., (2023) (i.e., quantization followed by LoRA adapters), we refer this method as QLoRA. Finally, we fine-tune the resulting model (both backbone and LoRA adaptors) using direct preference optimization (DPO). ### SpinQuant [SpinQuant](https://arxiv.org/abs/2405.16406) was applied, together with generative post-training quantization (GPTQ). For the SpinQuant rotation matrix fine-tuning, we optimized for 100 iterations, using 800 samples with sequence-length 2048 from the WikiText 2 dataset. For GPTQ, we used 128 samples from the same dataset with the same sequence-length. ## Benchmarks \- English Text In this section, we report the results for Llama 3.2 models on standard automatic benchmarks. For all these evaluations, we used our internal evaluations library. ### Base Pretrained Models | Category | Benchmark | \# Shots | Metric | Llama 3.2 1B | Llama 3.2 3B | Llama 3.1 8B | | ----- | ----- | :---: | :---: | :---: | :---: | :---: | | General | MMLU | 5 | macro\_avg/acc\_char | 32.2 | 58 | 66.7 | | | AGIEval English | 3-5 | average/acc\_char | 23.3 | 39.2 | 47.8 | | | ARC-Challenge | 25 | acc\_char | 32.8 | 69.1 | 79.7 | | Reading comprehension | SQuAD | 1 | em | 49.2 | 67.7 | 77 | | | QuAC (F1) | 1 | f1 | 37.9 | 42.9 | 44.9 | | | DROP (F1) | 3 | f1 | 28.0 | 45.2 | 59.5 | | Long Context | Needle in Haystack | 0 | em | 96.8 | 1 | 1 | ### Instruction Tuned Models | Capability | | Benchmark | \# Shots | Metric | Llama 3.2 1B bf16 | Llama 3.2 1B Vanilla PTQ\*\* | Llama 3.2 1B Spin Quant | Llama 3.2 1B QLoRA | Llama 3.2 3B bf16 | Llama 3.2 3B Vanilla PTQ\*\* | Llama 3.2 3B Spin Quant | Llama 3.2 3B QLoRA | Llama 3.1 8B | | :---: | ----- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | General | | MMLU | 5 | macro\_avg/acc | 49.3 | 43.3 | 47.3 | 49.0 | 63.4 | 60.5 | 62 | 62.4 | 69.4 | | Re-writing | | Open-rewrite eval | 0 | micro\_avg/rougeL | 41.6 | 39.2 | 40.9 | 41.2 | 40.1 | 40.3 | 40.8 | 40.7 | 40.9 | | Summarization | | TLDR9+ (test) | 1 | rougeL | 16.8 | 14.9 | 16.7 | 16.8 | 19.0 | 19.1 | 19.2 | 19.1 | 17.2 | | Instruction following | | IFEval | 0 | Avg(Prompt/Instruction acc Loose/Strict) | 59.5 | 51.5 | 58.4 | 55.6 | 77.4 | 73.9 | 73.5 | 75.9 | 80.4 | | Math | | GSM8K (CoT) | 8 | em\_maj1@1 | 44.4 | 33.1 | 40.6 | 46.5 | 77.7 | 72.9 | 75.7 | 77.9 | 84.5 | | | | MATH (CoT) | 0 | final\_em | 30.6 | 20.5 | 25.3 | 31.0 | 48.0 | 44.2 | 45.3 | 49.2 | 51.9 | | Reasoning | | ARC-C | 0 | acc | 59.4 | 54.3 | 57 | 60.7 | 78.6 | 75.6 | 77.6 | 77.6 | 83.4 | | | | GPQA | 0 | acc | 27.2 | 25.9 | 26.3 | 25.9 | 32.8 | 32.8 | 31.7 | 33.9 | 32.8 | | | | Hellaswag | 0 | acc | 41.2 | 38.1 | 41.3 | 41.5 | 69.8 | 66.3 | 68 | 66.3 | 78.7 | | Tool Use | | BFCL V2 | 0 | acc | 25.7 | 14.3 | 15.9 | 23.7 | 67.0 | 53.4 | 60.1 | 63.5 | 67.1 | | | | Nexus | 0 | macro\_avg/acc | 13.5 | 5.2 | 9.6 | 12.5 | 34.3 | 32.4 | 31.5 | 30.1 | 38.5 | | Long Context | | InfiniteBench/En.QA | 0 | longbook\_qa/f1 | 20.3 | N/A | N/A | N/A | 19.8 | N/A | N/A | N/A | 27.3 | | | | InfiniteBench/En.MC | 0 | longbook\_choice/acc | 38.0 | N/A | N/A | N/A | 63.3 | N/A | N/A | N/A | 72.2 | | | | NIH/Multi-needle | 0 | recall | 75.0 | N/A | N/A | N/A | 84.7 | N/A | N/A | N/A | 98.8 | | Multilingual | | MGSM (CoT) | 0 | em | 24.5 | 13.7 | 18.2 | 24.4 | 58.2 | 48.9 | 54.3 | 56.8 | 68.9 | \*\*for comparison purposes only. Model not released. ### Multilingual Benchmarks | Category | Benchmark | Language | Llama 3.2 1B | Llama 3.2 1B Vanilla PTQ\*\* | Llama 3.2 1B Spin Quant | Llama 3.2 1B QLoRA | Llama 3.2 3B | Llama 3.2 3B Vanilla PTQ\*\* | Llama 3.2 3B Spin Quant | Llama 3.2 3B QLoRA | Llama 3.1 8B | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | General | MMLU (5-shot, macro_avg/acc) | Portuguese | 39.8 | 34.9 | 38.9 | 40.2 | 54.5 | 50.9 | 53.3 | 53.4 | 62.1 | | | | Spanish | 41.5 | 36.0 | 39.8 | 41.8 | 55.1 | 51.9 | 53.6 | 53.6 | 62.5 | | | | Italian | 39.8 | 34.9 | 38.1 | 40.6 | 53.8 | 49.9 | 52.1 | 51.7 | 61.6 | | | | German | 39.2 | 34.9 | 37.5 | 39.6 | 53.3 | 50.0 | 52.2 | 51.3 | 60.6 | | | | French | 40.5 | 34.8 | 39.2 | 40.8 | 54.6 | 51.2 | 53.3 | 53.3 | 62.3 | | | | Hindi | 33.5 | 30.0 | 32.1 | 34.0 | 43.3 | 40.4 | 42.0 | 42.1 | 50.9 | | | | Thai | 34.7 | 31.2 | 32.4 | 34.9 | 44.5 | 41.3 | 44.0 | 42.2 | 50.3 | \*\*for comparison purposes only. Model not released. ## Inference time In the below table, we compare the performance metrics of different quantization methods (SpinQuant and QAT \+ LoRA) with the BF16 baseline. The evaluation was done using the [ExecuTorch](https://github.com/pytorch/executorch) framework as the inference engine, with the ARM CPU as a backend using Android OnePlus 12 device. | Category | Decode (tokens/sec) | Time-to-first-token (sec) | Prefill (tokens/sec) | Model size (PTE file size in MB) | Memory size (RSS in MB) | | :---- | ----- | ----- | ----- | ----- | ----- | | 1B BF16 (baseline) | 19.2 | 1.0 | 60.3 | 2358 | 3,185 | | 1B SpinQuant | 50.2 (2.6x) | 0.3 (-76.9%) | 260.5 (4.3x) | 1083 (-54.1%) | 1,921 (-39.7%) | | 1B QLoRA | 45.8 (2.4x) | 0.3 (-76.0%) | 252.0 (4.2x) | 1127 (-52.2%) | 2,255 (-29.2%) | | 3B BF16 (baseline) | 7.6 | 3.0 | 21.2 | 6129 | 7,419 | | 3B SpinQuant | 19.7 (2.6x) | 0.7 (-76.4%) | 89.7 (4.2x) | 2435 (-60.3%) | 3,726 (-49.8%) | | 3B QLoRA | 18.5 (2.4x) | 0.7 (-76.1%) | 88.8 (4.2x) | 2529 (-58.7%) | 4,060 (-45.3%) | (\*) The performance measurement is done using an adb binary-based approach. (\*\*) It is measured on an Android OnePlus 12 device. (\*\*\*) Time-to-first-token (TTFT) is measured with prompt length=64 *Footnote:* - *Decode (tokens/second) is for how quickly it keeps generating. Higher is better.* - *Time-to-first-token (TTFT for shorthand) is for how fast it generates the first token for a given prompt. Lower is better.* - *Prefill is the inverse of TTFT (aka 1/TTFT) in tokens/second. Higher is better* - *Model size \- how big is the model, measured by, PTE file, a binary file format for ExecuTorch* - *RSS size \- Memory usage in resident set size (RSS)* ## Responsibility & Safety As part of our Responsible release approach, we followed a three-pronged strategy to managing trust & safety risks: 1. Enable developers to deploy helpful, safe and flexible experiences for their target audience and for the use cases supported by Llama 2. Protect developers against adversarial users aiming to exploit Llama capabilities to potentially cause harm 3. Provide protections for the community to help prevent the misuse of our models ### Responsible Deployment **Approach:** Llama is a foundational technology designed to be used in a variety of use cases. Examples on how Meta’s Llama models have been responsibly deployed can be found in our [Community Stories webpage](https://llama.meta.com/community-stories/). Our approach is to build the most helpful models, enabling the world to benefit from the technology power, by aligning our model safety for generic use cases and addressing a standard set of harms. Developers are then in the driver’s seat to tailor safety for their use cases, defining their own policies and deploying the models with the necessary safeguards in their Llama systems. Llama 3.2 was developed following the best practices outlined in our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/). #### Llama 3.2 Instruct **Objective:** Our main objectives for conducting safety fine-tuning are to provide the research community with a valuable resource for studying the robustness of safety fine-tuning, as well as to offer developers a readily available, safe, and powerful model for various applications to reduce the developer workload to deploy safe AI systems. We implemented the same set of safety mitigations as in Llama 3, and you can learn more about these in the Llama 3 [paper](https://ai.meta.com/research/publications/the-llama-3-herd-of-models/). **Fine-Tuning Data:** We employ a multi-faceted approach to data collection, combining human-generated data from our vendors with synthetic data to mitigate potential safety risks. We’ve developed many large language model (LLM)-based classifiers that enable us to thoughtfully select high-quality prompts and responses, enhancing data quality control. **Refusals and Tone:** Building on the work we started with Llama 3, we put a great emphasis on model refusals to benign prompts as well as refusal tone. We included both borderline and adversarial prompts in our safety data strategy, and modified our safety data responses to follow tone guidelines. #### Llama 3.2 Systems **Safety as a System:** Large language models, including Llama 3.2, **are not designed to be deployed in isolation** but instead should be deployed as part of an overall AI system with additional safety guardrails as required. Developers are expected to deploy system safeguards when building agentic systems. Safeguards are key to achieve the right helpfulness-safety alignment as well as mitigating safety and security risks inherent to the system and any integration of the model or system with external tools. As part of our responsible release approach, we provide the community with [safeguards](https://llama.meta.com/trust-and-safety/) that developers should deploy with Llama models or other LLMs, including Llama Guard, Prompt Guard and Code Shield. All our [reference implementations](https://github.com/meta-llama/llama-agentic-system) demos contain these safeguards by default so developers can benefit from system-level safety out-of-the-box. ### New Capabilities and Use Cases **Technological Advancement:** Llama releases usually introduce new capabilities that require specific considerations in addition to the best practices that generally apply across all Generative AI use cases. For prior release capabilities also supported by Llama 3.2, see [Llama 3.1 Model Card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md), as the same considerations apply here as well. **Constrained Environments:** Llama 3.2 1B and 3B models are expected to be deployed in highly constrained environments, such as mobile devices. LLM Systems using smaller models will have a different alignment profile and safety/helpfulness tradeoff than more complex, larger systems. Developers should ensure the safety of their system meets the requirements of their use case. We recommend using lighter system safeguards for such use cases, like Llama Guard 3-1B or its mobile-optimized version. ### Evaluations **Scaled Evaluations:** We built dedicated, adversarial evaluation datasets and evaluated systems composed of Llama models and Purple Llama safeguards to filter input prompt and output response. It is important to evaluate applications in context, and we recommend building dedicated evaluation dataset for your use case. **Red Teaming:** We conducted recurring red teaming exercises with the goal of discovering risks via adversarial prompting and we used the learnings to improve our benchmarks and safety tuning datasets. We partnered early with subject-matter experts in critical risk areas to understand the nature of these real-world harms and how such models may lead to unintended harm for society. Based on these conversations, we derived a set of adversarial goals for the red team to attempt to achieve, such as extracting harmful information or reprogramming the model to act in a potentially harmful capacity. The red team consisted of experts in cybersecurity, adversarial machine learning, responsible AI, and integrity in addition to multilingual content specialists with background in integrity issues in specific geographic markets. ### Critical Risks In addition to our safety work above, we took extra care on measuring and/or mitigating the following critical risk areas: **1\. CBRNE (Chemical, Biological, Radiological, Nuclear, and Explosive Weapons):** Llama 3.2 1B and 3B models are smaller and less capable derivatives of Llama 3.1. For Llama 3.1 70B and 405B, to assess risks related to proliferation of chemical and biological weapons, we performed uplift testing designed to assess whether use of Llama 3.1 models could meaningfully increase the capabilities of malicious actors to plan or carry out attacks using these types of weapons and have determined that such testing also applies to the smaller 1B and 3B models. **2\. Child Safety:** Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors including the additional languages Llama 3 is trained on. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences. **3\. Cyber Attacks:** For Llama 3.1 405B, our cyber attack uplift study investigated whether LLMs can enhance human capabilities in hacking tasks, both in terms of skill level and speed. Our attack automation study focused on evaluating the capabilities of LLMs when used as autonomous agents in cyber offensive operations, specifically in the context of ransomware attacks. This evaluation was distinct from previous studies that considered LLMs as interactive assistants. The primary objective was to assess whether these models could effectively function as independent agents in executing complex cyber-attacks without human intervention. Because Llama 3.2’s 1B and 3B models are smaller and less capable models than Llama 3.1 405B, we broadly believe that the testing conducted for the 405B model also applies to Llama 3.2 models. ### Community **Industry Partnerships:** Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership on AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama). **Grants:** We also set up the [Llama Impact Grants](https://llama.meta.com/llama-impact-grants/) program to identify and support the most compelling applications of Meta’s Llama model for societal benefit across three categories: education, climate and open innovation. The 20 finalists from the hundreds of applications can be found [here](https://llama.meta.com/llama-impact-grants/#finalists). **Reporting:** Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community. ## Ethical Considerations and Limitations **Values:** The core values of Llama 3.2 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3.2 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress. **Testing:** Llama 3.2 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3.2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3.2 models, developers should perform safety testing and tuning tailored to their specific applications of the model. Please refer to available resources including our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide), [Trust and Safety](https://llama.meta.com/trust-and-safety/) solutions, and other [resources](https://llama.meta.com/docs/get-started/) to learn more about responsible development.
{"language": ["en", "de", "fr", "it", "pt", "hi", "es", "th"], "library_name": "transformers", "license": "llama3.2", "pipeline_tag": "text-generation", "tags": ["facebook", "meta", "pytorch", "llama", "llama-3"], "extra_gated_prompt": "### LLAMA 3.2 COMMUNITY LICENSE AGREEMENT\n\nLlama 3.2 Version Release Date: September 25, 2024\n\n“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.\n\n“Documentation” means the specifications, manuals and documentation accompanying Llama 3.2 distributed by Meta at https://llama.meta.com/doc/overview.\n\n“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.\n\n“Llama 3.2” means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at https://www.llama.com/llama-downloads.\n\n“Llama Materials” means, collectively, Meta’s proprietary Llama 3.2 and Documentation (and any portion thereof) made available under this Agreement.\n\n“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). \n\nBy clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement.\n\n1. License Rights and Redistribution.\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. \nb. Redistribution and Use. \ni. If you distribute or make available the Llama Materials (or any derivative works thereof), or a product or service (including another AI model) that contains any of them, you shall (A) provide a copy of this Agreement with any such Llama Materials; and (B) prominently display “Built with Llama” on a related website, user interface, blogpost, about page, or product documentation. If you use the Llama Materials or any outputs or results of the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is distributed or made available, you shall also include “Llama” at the beginning of any such AI model name.\nii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. \niii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “Llama 3.2 is licensed under the Llama 3.2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.”\niv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://www.llama.com/llama3_2/use-policy), which is hereby incorporated by reference into this Agreement.\n \n2. Additional Commercial Terms. If, on the Llama 3.2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee’s affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\na. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to use “Llama” (the “Mark”) solely as required to comply with the last sentence of Section 1.b.i. You will comply with Meta’s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/). All goodwill arising out of your use of the Mark will inure to the benefit of Meta.\nb. Subject to Meta’s ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.\nc. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 3.2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.\n6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. \n7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. \n### Llama 3.2 Acceptable Use Policy\nMeta is committed to promoting safe and fair use of its tools and features, including Llama 3.2. If you access or use Llama 3.2, you agree to this Acceptable Use Policy (“**Policy**”). The most recent copy of this policy can be found at [https://www.llama.com/llama3_2/use-policy](https://www.llama.com/llama3_2/use-policy).\n#### Prohibited Uses\nWe want everyone to use Llama 3.2 safely and responsibly. You agree you will not use, or allow others to use, Llama 3.2 to:\n1. Violate the law or others’ rights, including to:\n 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n 1. Violence or terrorism\n 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n 3. Human trafficking, exploitation, and sexual violence\n 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n 5. Sexual solicitation\n 6. Any other criminal activity\n 1. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n 2. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n 3. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n 4. Collect, process, disclose, generate, or infer private or sensitive information about individuals, including information about individuals’ identity, health, or demographic information, unless you have obtained the right to do so in accordance with applicable law\n 5. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials\n 6. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n 7. Engage in any action, or facilitate any action, to intentionally circumvent or remove usage restrictions or other safety measures, or to enable functionality disabled by Meta \n2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 3.2 related to the following:\n 8. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State or to the U.S. Biological Weapons Anti-Terrorism Act of 1989 or the Chemical Weapons Convention Implementation Act of 1997\n 9. Guns and illegal weapons (including weapon development)\n 10. Illegal drugs and regulated/controlled substances\n 11. Operation of critical infrastructure, transportation technologies, or heavy machinery\n 12. Self-harm or harm to others, including suicide, cutting, and eating disorders\n 13. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 3.2 related to the following:\n 14. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n 15. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n 16. Generating, promoting, or further distributing spam\n 17. Impersonating another individual without consent, authorization, or legal right\n 18. Representing that the use of Llama 3.2 or outputs are human-generated\n 19. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement \n4. Fail to appropriately disclose to end users any known dangers of your AI system 5. Interact with third party tools, models, or software designed to generate unlawful content or engage in unlawful or harmful conduct and/or represent that the outputs of such tools, models, or software are associated with Meta or Llama 3.2\n\nWith respect to any multimodal models included in Llama 3.2, the rights granted under Section 1(a) of the Llama 3.2 Community License Agreement are not being granted to you if you are an individual domiciled in, or a company with a principal place of business in, the European Union. This restriction does not apply to end users of a product or service that incorporates any such multimodal models.\n\nPlease report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:\n\n* Reporting issues with the model: [https://github.com/meta-llama/llama-models/issues](https://l.workplace.com/l.php?u=https%3A%2F%2Fgithub.com%2Fmeta-llama%2Fllama-models%2Fissues&h=AT0qV8W9BFT6NwihiOHRuKYQM_UnkzN_NmHMy91OT55gkLpgi4kQupHUl0ssR4dQsIQ8n3tfd0vtkobvsEvt1l4Ic6GXI2EeuHV8N08OG2WnbAmm0FL4ObkazC6G_256vN0lN9DsykCvCqGZ)\n* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)\n* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)\n* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama 3.2: [email protected]", "extra_gated_fields": {"First Name": "text", "Last Name": "text", "Date of birth": "date_picker", "Country": "country", "Affiliation": "text", "Job title": {"type": "select", "options": ["Student", "Research Graduate", "AI researcher", "AI developer/engineer", "Reporter", "Other"]}, "geo": "ip_location", "By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy": "checkbox"}, "extra_gated_description": "The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).", "extra_gated_button_content": "Submit"}
task
[ "SUMMARIZATION" ]
41,411
TroyDoesAI/Contextual-Llama-6.7b
TroyDoesAI
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "license:cc-by-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-05-02T21:07:33Z
2024-05-03T03:14:55+00:00
7
0
--- license: cc-by-4.0 --- We focus on training models efficiently, not race to be the biggest, but big things come in optimizing small packages. # Mermaid-Llama-6.7B Introducing Mermaid-LLama-6.7B, a robust language model designed for Python code understanding and crafting captivating story flow maps. Down to 6 billion parameter to show we dont need the bloat. I wish to specialize in packing models on Edge Devices. Open For Hire See my links to my Linkedin for more. ![MermaidLlama GIF](Mermaid_ShowCase/MermaidLlama.webp) ## Key Features 1. **Code Understanding:** - Masters Python intricacies with finesse. - Generates clear and accurate Mermaid Diagram Flow Charts. - Ideal for developers seeking visual representations of their code logic. 2. **Storytelling Capabilities:** - Converts narrative inputs into captivating Mermaid Diagrams. - Maps character interactions, plot developments, and narrative arcs. 3. **Unmatched Performance:** - Surpasses GPT-4 in generating well-organized Mermaid Diagrams. 4. **Training Insights:** - Trained on a diverse dataset, including 800 unique, hand-curated Mermaid Graph examples utilizing 478 complete Python programs. - Exhibits emergent properties in story-to-flow map translations and step-by-step instruction flow maps. ## Collaboration Interested in enhancing Mermaid's capabilities? Contact [email protected] for collaboration opportunities. ## Example Use Cases - **Retrieval-Augmented Generation (RAG):** Utilize Mermaid-LLama to create condensed knowledge graphs. - This model excels in generating flow diagrams that enhance the retrieval process. - These knowledge graphs are stored in a vector database, which allows for quick and efficient retrieval of contextually relevant information. - When a query is received, the system retrieves a pertinent knowledge graph, appending it as context to the model. - This enriched context enables Mermaid-LLama to deliver more accurate and nuanced responses. - This approach is particularly beneficial in applications requiring deep, context-aware interactions, such as sophisticated Q&A systems, dynamic data analysis, and complex decision-making tasks. - **Code Documentation:** Automatic visual flow charts from Python code. - **Storyboarding:** Visually appealing diagrams for storytelling. - **Project Planning:** Visual project flow maps for effective team communication. - **Learning Python:** Helps students visually understand Python code structures. - **Game Design:** Visualizing game storylines for coherent narrative structure. ## Proof of Concept Stay tuned for the release of the VSCode Extension that displays the Live Flow Map every time a user stops typing for more than 10 seconds. ## Training Specifications - **LoRA Rank:** 2048 - **LoRA Alpha:** 4096 - **Batch Size:** 1 - **Micro Batch Size:** 1 - **Cutoff Length:** 4096 - **Save every n steps:** 1000 - **Epochs:** 3 - **Learning Rate:** 1e-6 - **LR Scheduler:** Cosine **Target Modules:** - Enable q_proj - Enable v_proj - Enable k_proj - Enable o_proj - Enable gate_proj - Enable down_proj - Enable up_proj ## Getting Started Start by downloading one of my models. ![0 TroyDoesAI GIF](Mermaid_ShowCase/0_TroyDoesAI.gif) Load the model. ![1 Load Model in 4-bit Show Example Use GIF](Mermaid_ShowCase/1_LoadModel_in_4bit_Show_Example_Use.gif) Use my prompt template to generate a Mermaid code block, which can be viewed in the Mermaid Live Editor or using the Mermaid CLI tool. ![2 Loaded Model in Full Precision 16-bit Show Inference and Mermaid Live Editor GIF](Mermaid_ShowCase/2_Loaded_Model_in_Full_Precision_16bit_Show_Inference_and_Mermaid_Live_editor.gif) Here we open the VLLM GUI Program while still running in Vram the Mermaid-Llama-8B to compare the flow diagram to the actual program and show the lightweight capabilites of small models on consumer hardware. ![3 Open The Program VLLM Program With Full Precision Mermaid-Llama-8B Running to Evaluate Flow Map GIF](Mermaid_ShowCase/3_Open_The_Program_VLLM_Program_With_Full_Precision_Mermaid-Llama-8B-Running_to_evaluate_flow_map.gif) ## More on my VLLM Class and inference GUI : https://github.com/Troys-Code/VLLM ![Python RtdBsaz8gy GIF](Mermaid_ShowCase/python_RtdBsaz8gy.gif) --- Note: This model should be treated as an Auto-Complete Model, Do not try talking to it in chat you are gonna get garbage, those layers have been pruned and replaced, that is all you will hear of my secret sauce on training on small < 1000 entry datasets.
null
Non_BioNLP
We focus on training models efficiently, not race to be the biggest, but big things come in optimizing small packages. # Mermaid-Llama-6.7B Introducing Mermaid-LLama-6.7B, a robust language model designed for Python code understanding and crafting captivating story flow maps. Down to 6 billion parameter to show we dont need the bloat. I wish to specialize in packing models on Edge Devices. Open For Hire See my links to my Linkedin for more. ![MermaidLlama GIF](Mermaid_ShowCase/MermaidLlama.webp) ## Key Features 1. **Code Understanding:** - Masters Python intricacies with finesse. - Generates clear and accurate Mermaid Diagram Flow Charts. - Ideal for developers seeking visual representations of their code logic. 2. **Storytelling Capabilities:** - Converts narrative inputs into captivating Mermaid Diagrams. - Maps character interactions, plot developments, and narrative arcs. 3. **Unmatched Performance:** - Surpasses GPT-4 in generating well-organized Mermaid Diagrams. 4. **Training Insights:** - Trained on a diverse dataset, including 800 unique, hand-curated Mermaid Graph examples utilizing 478 complete Python programs. - Exhibits emergent properties in story-to-flow map translations and step-by-step instruction flow maps. ## Collaboration Interested in enhancing Mermaid's capabilities? Contact [email protected] for collaboration opportunities. ## Example Use Cases - **Retrieval-Augmented Generation (RAG):** Utilize Mermaid-LLama to create condensed knowledge graphs. - This model excels in generating flow diagrams that enhance the retrieval process. - These knowledge graphs are stored in a vector database, which allows for quick and efficient retrieval of contextually relevant information. - When a query is received, the system retrieves a pertinent knowledge graph, appending it as context to the model. - This enriched context enables Mermaid-LLama to deliver more accurate and nuanced responses. - This approach is particularly beneficial in applications requiring deep, context-aware interactions, such as sophisticated Q&A systems, dynamic data analysis, and complex decision-making tasks. - **Code Documentation:** Automatic visual flow charts from Python code. - **Storyboarding:** Visually appealing diagrams for storytelling. - **Project Planning:** Visual project flow maps for effective team communication. - **Learning Python:** Helps students visually understand Python code structures. - **Game Design:** Visualizing game storylines for coherent narrative structure. ## Proof of Concept Stay tuned for the release of the VSCode Extension that displays the Live Flow Map every time a user stops typing for more than 10 seconds. ## Training Specifications - **LoRA Rank:** 2048 - **LoRA Alpha:** 4096 - **Batch Size:** 1 - **Micro Batch Size:** 1 - **Cutoff Length:** 4096 - **Save every n steps:** 1000 - **Epochs:** 3 - **Learning Rate:** 1e-6 - **LR Scheduler:** Cosine **Target Modules:** - Enable q_proj - Enable v_proj - Enable k_proj - Enable o_proj - Enable gate_proj - Enable down_proj - Enable up_proj ## Getting Started Start by downloading one of my models. ![0 TroyDoesAI GIF](Mermaid_ShowCase/0_TroyDoesAI.gif) Load the model. ![1 Load Model in 4-bit Show Example Use GIF](Mermaid_ShowCase/1_LoadModel_in_4bit_Show_Example_Use.gif) Use my prompt template to generate a Mermaid code block, which can be viewed in the Mermaid Live Editor or using the Mermaid CLI tool. ![2 Loaded Model in Full Precision 16-bit Show Inference and Mermaid Live Editor GIF](Mermaid_ShowCase/2_Loaded_Model_in_Full_Precision_16bit_Show_Inference_and_Mermaid_Live_editor.gif) Here we open the VLLM GUI Program while still running in Vram the Mermaid-Llama-8B to compare the flow diagram to the actual program and show the lightweight capabilites of small models on consumer hardware. ![3 Open The Program VLLM Program With Full Precision Mermaid-Llama-8B Running to Evaluate Flow Map GIF](Mermaid_ShowCase/3_Open_The_Program_VLLM_Program_With_Full_Precision_Mermaid-Llama-8B-Running_to_evaluate_flow_map.gif) ## More on my VLLM Class and inference GUI : https://github.com/Troys-Code/VLLM ![Python RtdBsaz8gy GIF](Mermaid_ShowCase/python_RtdBsaz8gy.gif) --- Note: This model should be treated as an Auto-Complete Model, Do not try talking to it in chat you are gonna get garbage, those layers have been pruned and replaced, that is all you will hear of my secret sauce on training on small < 1000 entry datasets.
{"license": "cc-by-4.0"}
task
[ "TRANSLATION" ]
41,412
erichennings/my_model
erichennings
null
[ "region:us" ]
2024-10-21T01:35:18Z
2024-10-21T02:18:06+00:00
0
0
--- {} --- # Model Card ## Model Description This is not a real model - just a test publishing evals of TinyLlama/TinyLlama-1.1B-Chat-v1.0 [link](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) ## Evaluation Results ### Hellaswag * Hellaswag measures ability to complete sentences * [website](https://rowanzellers.com/hellaswag/) * [eleuther task](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hellaswag/README.md) | Metric | Value | Stderr | | ------ | ----- | ------ | |acc |0.2872 |0.0045 | |acc_norm|0.3082 |0.0046 | ### GLUE * GLUE is a multi-measure llm eval with a number of (subtasks)[https://gluebenchmark.com/tasks] * [eleuther task](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/lm_eval/tasks/glue) | Tasks | Metric |Value |Stderr| |-------------|--------|------ |----- | |cola |mcc |0.0000 |0.0000| |mnli |acc |0.3484 |0.0048| |mnli_mismatch|acc |0.3463 |0.0048| |mrpc |acc |0.6838 |0.0230| | |f1 |0.8122 |0.0163| |qnli |acc |0.4959 |0.0068| |qqp |acc |0.3678 |0.0024| | |f1 |0.5373 |0.0026| |rte |acc |0.5271 |0.0301| |sst2 |acc |0.5092 |0.0169| |wnli |acc |0.4225 |0.0590| Subtask breakdown, courtesy of ChatGPT: # GLUE Benchmark Breakdown GLUE (General Language Understanding Evaluation) is a collection of tasks designed to evaluate natural language understanding (NLU) models. The benchmark includes various subtasks that test different aspects of language comprehension. ## Subtasks ### 1. **CoLA (Corpus of Linguistic Acceptability)** - **Task**: Sentence acceptability judgment (grammaticality). - **Goal**: Determine if a sentence is grammatically acceptable. - **Input**: A single sentence. - **Output**: Binary classification (grammatically correct or not). ### 2. **SST-2 (Stanford Sentiment Treebank)** - **Task**: Sentiment analysis. - **Goal**: Classify a sentence's sentiment as positive or negative. - **Input**: A single sentence. - **Output**: Binary classification (positive or negative sentiment). ### 3. **MRPC (Microsoft Research Paraphrase Corpus)** - **Task**: Paraphrase detection. - **Goal**: Determine if two sentences are semantically equivalent. - **Input**: Two sentences. - **Output**: Binary classification (paraphrase or not). ### 4. **STS-B (Semantic Textual Similarity Benchmark)** - **Task**: Sentence similarity assessment. - **Goal**: Assess the degree of similarity between two sentences. - **Input**: Two sentences. - **Output**: Real-valued score (1 to 5) indicating similarity. ### 5. **QQP (Quora Question Pairs)** - **Task**: Question pair similarity. - **Goal**: Determine whether two questions are semantically equivalent. - **Input**: Two questions. - **Output**: Binary classification (equivalent or not). ### 6. **MNLI (Multi-Genre Natural Language Inference)** - **Task**: Textual entailment. - **Goal**: Determine the relationship between a premise and a hypothesis (entailment, contradiction, or neutral). - **Input**: A premise and a hypothesis. - **Output**: Three-way classification (entailment, contradiction, or neutral). - **EH NOTE**: Genre here refers to type of language source (telephone call, fiction writing, etc) - **Variants**: - **Matched**: In-domain evaluation (test data from the same genres as training data). - **Mismatched**: Out-of-domain evaluation (test data from different genres than training data). ### 7. **QNLI (Question Natural Language Inference)** - **Task**: Question answering in inference format. - **Goal**: Determine if a sentence answers a question. - **Input**: A question and a sentence. - **Output**: Binary classification (entailment or not entailment). ### 8. **RTE (Recognizing Textual Entailment)** - **Task**: Textual entailment. - **Goal**: Determine if a premise entails a hypothesis. - **Input**: A premise and a hypothesis. - **Output**: Binary classification (entailment or not). ### 9. **WNLI (Winograd Natural Language Inference)** - **Task**: Coreference resolution. - **Goal**: Determine whether a pronoun in a sentence refers to a given noun. - **Input**: A sentence with a pronoun and a candidate antecedent. - **Output**: Binary classification (correct or incorrect coreference). ## Summary of Subtasks: - **CoLA**: Grammaticality judgment (binary classification). - **SST-2**: Sentiment analysis (binary classification). - **MRPC**: Paraphrase detection (binary classification). - **STS-B**: Sentence similarity (regression score). - **QQP**: Question pair similarity (binary classification). - **MNLI**: Textual entailment (three-way classification). - **Matched**: In-domain data. - **Mismatched**: Out-of-domain data. - **QNLI**: Question answering entailment (binary classification). - **RTE**: Textual entailment (binary classification). - **WNLI**: Coreference resolution (binary classification). ## How to Use ... don't use this, it's just a test ...
null
Non_BioNLP
# Model Card ## Model Description This is not a real model - just a test publishing evals of TinyLlama/TinyLlama-1.1B-Chat-v1.0 [link](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) ## Evaluation Results ### Hellaswag * Hellaswag measures ability to complete sentences * [website](https://rowanzellers.com/hellaswag/) * [eleuther task](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hellaswag/README.md) | Metric | Value | Stderr | | ------ | ----- | ------ | |acc |0.2872 |0.0045 | |acc_norm|0.3082 |0.0046 | ### GLUE * GLUE is a multi-measure llm eval with a number of (subtasks)[https://gluebenchmark.com/tasks] * [eleuther task](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/lm_eval/tasks/glue) | Tasks | Metric |Value |Stderr| |-------------|--------|------ |----- | |cola |mcc |0.0000 |0.0000| |mnli |acc |0.3484 |0.0048| |mnli_mismatch|acc |0.3463 |0.0048| |mrpc |acc |0.6838 |0.0230| | |f1 |0.8122 |0.0163| |qnli |acc |0.4959 |0.0068| |qqp |acc |0.3678 |0.0024| | |f1 |0.5373 |0.0026| |rte |acc |0.5271 |0.0301| |sst2 |acc |0.5092 |0.0169| |wnli |acc |0.4225 |0.0590| Subtask breakdown, courtesy of ChatGPT: # GLUE Benchmark Breakdown GLUE (General Language Understanding Evaluation) is a collection of tasks designed to evaluate natural language understanding (NLU) models. The benchmark includes various subtasks that test different aspects of language comprehension. ## Subtasks ### 1. **CoLA (Corpus of Linguistic Acceptability)** - **Task**: Sentence acceptability judgment (grammaticality). - **Goal**: Determine if a sentence is grammatically acceptable. - **Input**: A single sentence. - **Output**: Binary classification (grammatically correct or not). ### 2. **SST-2 (Stanford Sentiment Treebank)** - **Task**: Sentiment analysis. - **Goal**: Classify a sentence's sentiment as positive or negative. - **Input**: A single sentence. - **Output**: Binary classification (positive or negative sentiment). ### 3. **MRPC (Microsoft Research Paraphrase Corpus)** - **Task**: Paraphrase detection. - **Goal**: Determine if two sentences are semantically equivalent. - **Input**: Two sentences. - **Output**: Binary classification (paraphrase or not). ### 4. **STS-B (Semantic Textual Similarity Benchmark)** - **Task**: Sentence similarity assessment. - **Goal**: Assess the degree of similarity between two sentences. - **Input**: Two sentences. - **Output**: Real-valued score (1 to 5) indicating similarity. ### 5. **QQP (Quora Question Pairs)** - **Task**: Question pair similarity. - **Goal**: Determine whether two questions are semantically equivalent. - **Input**: Two questions. - **Output**: Binary classification (equivalent or not). ### 6. **MNLI (Multi-Genre Natural Language Inference)** - **Task**: Textual entailment. - **Goal**: Determine the relationship between a premise and a hypothesis (entailment, contradiction, or neutral). - **Input**: A premise and a hypothesis. - **Output**: Three-way classification (entailment, contradiction, or neutral). - **EH NOTE**: Genre here refers to type of language source (telephone call, fiction writing, etc) - **Variants**: - **Matched**: In-domain evaluation (test data from the same genres as training data). - **Mismatched**: Out-of-domain evaluation (test data from different genres than training data). ### 7. **QNLI (Question Natural Language Inference)** - **Task**: Question answering in inference format. - **Goal**: Determine if a sentence answers a question. - **Input**: A question and a sentence. - **Output**: Binary classification (entailment or not entailment). ### 8. **RTE (Recognizing Textual Entailment)** - **Task**: Textual entailment. - **Goal**: Determine if a premise entails a hypothesis. - **Input**: A premise and a hypothesis. - **Output**: Binary classification (entailment or not). ### 9. **WNLI (Winograd Natural Language Inference)** - **Task**: Coreference resolution. - **Goal**: Determine whether a pronoun in a sentence refers to a given noun. - **Input**: A sentence with a pronoun and a candidate antecedent. - **Output**: Binary classification (correct or incorrect coreference). ## Summary of Subtasks: - **CoLA**: Grammaticality judgment (binary classification). - **SST-2**: Sentiment analysis (binary classification). - **MRPC**: Paraphrase detection (binary classification). - **STS-B**: Sentence similarity (regression score). - **QQP**: Question pair similarity (binary classification). - **MNLI**: Textual entailment (three-way classification). - **Matched**: In-domain data. - **Mismatched**: Out-of-domain data. - **QNLI**: Question answering entailment (binary classification). - **RTE**: Textual entailment (binary classification). - **WNLI**: Coreference resolution (binary classification). ## How to Use ... don't use this, it's just a test ...
{}
task
[ "COREFERENCE_RESOLUTION", "QUESTION_ANSWERING", "TEXTUAL_ENTAILMENT" ]
41,413
aammari/setfit-zero-shot-classification-pbsp-p3-sev
aammari
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-07-10T04:28:56Z
2023-07-10T04:31:17+00:00
15
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # setfit-zero-shot-classification-pbsp-p3-sev This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("setfit-zero-shot-classification-pbsp-p3-sev") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# setfit-zero-shot-classification-pbsp-p3-sev This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("setfit-zero-shot-classification-pbsp-p3-sev") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
41,414
FuturisticVibes/c4ai-command-r-plus-08-2024-4.0bpw-h8-exl2
FuturisticVibes
text-generation
[ "transformers", "safetensors", "cohere", "text-generation", "conversational", "en", "fr", "de", "es", "it", "pt", "ja", "ko", "zh", "ar", "license:cc-by-nc-4.0", "autotrain_compatible", "text-generation-inference", "4-bit", "exl2", "region:us" ]
2024-09-07T23:06:08Z
2024-09-07T23:16:18+00:00
15
0
--- language: - en - fr - de - es - it - pt - ja - ko - zh - ar library_name: transformers license: cc-by-nc-4.0 inference: false --- I have no idea what I’m doing… if this causes the apocalypse someone please let me know. c4ai-command-r-plus-08-2024 4.0bpw h8 EXL2 Includes [measurement.json](https://huggingface.co/FuturisticVibes/c4ai-command-r-plus-08-2024-4.0bpw-h8-exl2/tree/measurement) file for further quantization Original Model: https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024 # Original Model Card # Model Card for C4AI Command R+ 08-2024 ## Model Summary C4AI Command R+ 08-2024 is an open weights research release of a 104B billion parameter model with highly advanced capabilities, this includes Retrieval Augmented Generation (RAG) and tool use to automate sophisticated tasks. The tool use in this model generation enables multi-step tool use which allows the model to combine multiple tools over multiple steps to accomplish difficult tasks. C4AI Command R+ 08-2024 is a multilingual model trained on 23 languages and evaluated in 10 languages. Command R+ 08-2024 is optimized for a variety of use cases including reasoning, summarization, and question answering. C4AI Command R+ 08-2024 is part of a family of open weight releases from Cohere For AI and Cohere. Our smaller companion model is [C4AI Command R 08-2024](https://huggingface.co/CohereForAI/c4ai-command-r-08-2024). - Point of Contact: Cohere For AI: [cohere.for.ai](https://cohere.for.ai/) - License: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy) - Model: c4ai-command-r-plus-08-2024 - Model Size: 104 billion parameters - Context length: 128K **Try C4AI Command R+** You can try out C4AI Command R+ before downloading the weights in our hosted [Hugging Face Space](https://huggingface.co/spaces/CohereForAI/c4ai-command?model=command-r-plus-08-2024). **Usage** Please use `transformers` version 4.39.1 or higher ```python # pip install 'transformers>=4.39.1' from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # Format message with the command-r-plus-08-2024 chat template messages = [{"role": "user", "content": "Hello, how are you?"}] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) gen_text = tokenizer.decode(gen_tokens[0]) print(gen_text) ``` ## Model Details **Input**: Models input text only. **Output**: Models generate text only. **Model Architecture**: This is an auto-regressive language model that uses an optimized transformer architecture. After pretraining, this model uses supervised fine-tuning (SFT) and preference training to align model behavior to human preferences for helpfulness and safety. We use grouped query attention (GQA) to improve inference speed. **Languages covered**: The model has been trained on 23 languages (English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Arabic, Simplified Chinese, Russian, Polish, Turkish, Vietnamese, Dutch, Czech, Indonesian, Ukrainian, Romanian, Greek, Hindi, Hebrew, and Persian) and evaluated on 10 languages (English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Arabic, Simplified Chinese). **Context length**: Command R+ 08-2024 supports a context length of 128K. ### Tool use & Agent capabilities: Command R+ 08-2024 has been specifically trained with conversational tool use capabilities. These have been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template will likely reduce performance, but we encourage experimentation. Command R+ 08-2024’s tool use functionality takes a conversation as input (with an optional user-system preamble), along with a list of available tools. The model will then generate a json-formatted list of actions to execute on a subset of those tools. Command R+ 08-2024 may use one of its supplied tools more than once. The model has been trained to recognise a special `directly_answer` tool, which it uses to indicate that it doesn’t want to use any of its other tools. The ability to abstain from calling a specific tool can be useful in a range of situations, such as greeting a user, or asking clarifying questions. We recommend including the `directly_answer` tool, but it can be removed or renamed if required. Comprehensive documentation for working with Command R+ 08-2024's tool use prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r). Command R+ 08-2024 also supports Hugging Face's [tool use API](https://huggingface.co/docs/transformers/main/en/chat_templating#advanced-tool-use--function-calling). The code snippets below show minimal working examples on how to render a prompt. <details> <summary><b>Usage: Rendering Tool Use Prompts [CLICK TO EXPAND]</b> </summary> ```python from transformers import AutoTokenizer model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) # define conversation input: conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] # Define tools available for the model to use: tools = [ { "name": "internet_search", "description": "Returns a list of relevant document snippets for a textual query retrieved from the internet", "parameter_definitions": { "query": { "description": "Query to search the internet with", "type": 'str', "required": True } } }, { 'name': "directly_answer", "description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history", 'parameter_definitions': {} } ] # render the tool use prompt as a string: tool_use_prompt = tokenizer.apply_tool_use_template( conversation, tools=tools, tokenize=False, add_generation_prompt=True, ) print(tool_use_prompt) ``` </details> <details> <summary><b>Usage: Rendering prompts with the Tool Use API [CLICK TO EXPAND]</b> </summary> ```python from transformers import AutoTokenizer model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) # define conversation input: conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] # Define tools available for the model to use # Type hints and docstrings from Python functions are automatically extracted def internet_search(query: str): """ Returns a list of relevant document snippets for a textual query retrieved from the internet Args: query: Query to search the internet with """ pass def directly_answer(): """ Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history """ pass tools = [internet_search, directly_answer] # render the tool use prompt as a string: tool_use_prompt = tokenizer.apply_chat_template( conversation, tools=tools, tokenize=False, add_generation_prompt=True, ) print(tool_use_prompt) ``` </details> <details> <summary><b>Example Rendered Tool Use Prompt [CLICK TO EXPAND]</b></summary> ```` <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. # System Preamble ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling. ## Available Tools Here is a list of tools that you have available to you: ```python def internet_search(query: str) -> List[Dict]: """Returns a list of relevant document snippets for a textual query retrieved from the internet Args: query (str): Query to search the internet with """ pass ``` ```python def directly_answer() -> List[Dict]: """Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history """ pass ```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: ```json [ { "tool_name": title of the tool in the specification, "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters } ]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> ```` </details> <details> <summary><b>Example Rendered Tool Use Completion [CLICK TO EXPAND]</b></summary> ```` Action: ```json [ { "tool_name": "internet_search", "parameters": { "query": "biggest penguin in the world" } } ] ``` ```` </details> ### Grounded Generation and RAG Capabilities: Command R+ 08-2024 has been specifically trained with grounded generation capabilities. This means that it can generate responses based on a list of supplied document snippets, and it will include grounding spans (citations) in its response indicating the source of the information. This can be used to enable behaviors such as grounded summarization and the final step of Retrieval Augmented Generation (RAG). This behavior has been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance, but we encourage experimentation. Command R+ 08-2024’s grounded generation behavior takes a conversation as input (with an optional user-supplied system preamble, indicating task, context and desired output style), along with a list of retrieved document snippets. The document snippets should be chunks, rather than long documents, typically around 100-400 words per chunk. Document snippets consist of key-value pairs. The keys should be short descriptive strings, the values can be text or semi-structured. By default, Command R+ 08-2024 will generate grounded responses by first predicting which documents are relevant, then predicting which ones it will cite, then generating an answer. Finally, it will then insert grounding spans into the answer. See below for an example. This is referred to as `accurate` grounded generation. The model is trained with a number of other answering modes, which can be selected by prompt changes. A `fast` citation mode is supported in the tokenizer, which will directly generate an answer with grounding spans in it, without first writing the answer out in full. This sacrifices some grounding accuracy in favor of generating fewer tokens. Comprehensive documentation for working with Command R+ 08-2024's grounded generation prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r). The code snippet below shows a minimal working example on how to render a prompt. <details> <summary> <b>Usage: Rendering Grounded Generation prompts [CLICK TO EXPAND]</b> </summary> ````python from transformers import AutoTokenizer model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) # define conversation input: conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] # define documents to ground on: documents = [ { "title": "Tall penguins", "text": "Emperor penguins are the tallest growing up to 122 cm in height." }, { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."} ] # render the tool use prompt as a string: grounded_generation_prompt = tokenizer.apply_grounded_generation_template( conversation, documents=documents, citation_mode="accurate", # or "fast" tokenize=False, add_generation_prompt=True, ) print(grounded_generation_prompt) ```` </details> <details> <summary><b>Example Rendered Grounded Generation Prompt [CLICK TO EXPAND]</b></summary> ```` <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. # System Preamble ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results> Document: 0 title: Tall penguins text: Emperor penguins are the tallest growing up to 122 cm in height. Document: 1 title: Penguin habitats text: Emperor penguins only live in Antarctica. </results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line. Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'. Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'. Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup. Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> ```` </details> <details> <summary><b>Example Rendered Grounded Generation Completion [CLICK TO EXPAND]</b></summary> ```` Relevant Documents: 0,1 Cited Documents: 0,1 Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres. Grounded answer: The <co: 0>Emperor Penguin</co: 0> is the <co: 0>tallest</co: 0> or biggest penguin in the world. It is a bird that <co: 1>lives only in Antarctica</co: 1> and <co: 0>grows to a height of around 122 centimetres.</co: 0> ```` </details> ### Code Capabilities: Command R+ 08-2024 has been optimized to interact with your code, by requesting code snippets, code explanations, or code rewrites. It might not perform well out-of-the-box for pure code completion. For better performance, we also recommend using a low temperature (and even greedy decoding) for code-generation related instructions. ### Model Card Contact For errors or additional questions about details in this model card, contact [[email protected]](mailto:[email protected]). ### Terms of Use: We hope that the release of this model will make community-based research efforts more accessible, by releasing the weights of a highly performant 104 billion parameter model to researchers all over the world. This model is governed by a [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license) License with an acceptable use addendum, and also requires adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy). ### Try Chat: You can try Command R+ 08-2024 chat in the playground [here](https://dashboard.cohere.com/playground/chat). You can also use it in our dedicated Hugging Face Space [here](https://huggingface.co/spaces/CohereForAI/c4ai-command?model=command-r-plus-08-2024).
null
Non_BioNLP
I have no idea what I’m doing… if this causes the apocalypse someone please let me know. c4ai-command-r-plus-08-2024 4.0bpw h8 EXL2 Includes [measurement.json](https://huggingface.co/FuturisticVibes/c4ai-command-r-plus-08-2024-4.0bpw-h8-exl2/tree/measurement) file for further quantization Original Model: https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024 # Original Model Card # Model Card for C4AI Command R+ 08-2024 ## Model Summary C4AI Command R+ 08-2024 is an open weights research release of a 104B billion parameter model with highly advanced capabilities, this includes Retrieval Augmented Generation (RAG) and tool use to automate sophisticated tasks. The tool use in this model generation enables multi-step tool use which allows the model to combine multiple tools over multiple steps to accomplish difficult tasks. C4AI Command R+ 08-2024 is a multilingual model trained on 23 languages and evaluated in 10 languages. Command R+ 08-2024 is optimized for a variety of use cases including reasoning, summarization, and question answering. C4AI Command R+ 08-2024 is part of a family of open weight releases from Cohere For AI and Cohere. Our smaller companion model is [C4AI Command R 08-2024](https://huggingface.co/CohereForAI/c4ai-command-r-08-2024). - Point of Contact: Cohere For AI: [cohere.for.ai](https://cohere.for.ai/) - License: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy) - Model: c4ai-command-r-plus-08-2024 - Model Size: 104 billion parameters - Context length: 128K **Try C4AI Command R+** You can try out C4AI Command R+ before downloading the weights in our hosted [Hugging Face Space](https://huggingface.co/spaces/CohereForAI/c4ai-command?model=command-r-plus-08-2024). **Usage** Please use `transformers` version 4.39.1 or higher ```python # pip install 'transformers>=4.39.1' from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # Format message with the command-r-plus-08-2024 chat template messages = [{"role": "user", "content": "Hello, how are you?"}] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) gen_text = tokenizer.decode(gen_tokens[0]) print(gen_text) ``` ## Model Details **Input**: Models input text only. **Output**: Models generate text only. **Model Architecture**: This is an auto-regressive language model that uses an optimized transformer architecture. After pretraining, this model uses supervised fine-tuning (SFT) and preference training to align model behavior to human preferences for helpfulness and safety. We use grouped query attention (GQA) to improve inference speed. **Languages covered**: The model has been trained on 23 languages (English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Arabic, Simplified Chinese, Russian, Polish, Turkish, Vietnamese, Dutch, Czech, Indonesian, Ukrainian, Romanian, Greek, Hindi, Hebrew, and Persian) and evaluated on 10 languages (English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Arabic, Simplified Chinese). **Context length**: Command R+ 08-2024 supports a context length of 128K. ### Tool use & Agent capabilities: Command R+ 08-2024 has been specifically trained with conversational tool use capabilities. These have been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template will likely reduce performance, but we encourage experimentation. Command R+ 08-2024’s tool use functionality takes a conversation as input (with an optional user-system preamble), along with a list of available tools. The model will then generate a json-formatted list of actions to execute on a subset of those tools. Command R+ 08-2024 may use one of its supplied tools more than once. The model has been trained to recognise a special `directly_answer` tool, which it uses to indicate that it doesn’t want to use any of its other tools. The ability to abstain from calling a specific tool can be useful in a range of situations, such as greeting a user, or asking clarifying questions. We recommend including the `directly_answer` tool, but it can be removed or renamed if required. Comprehensive documentation for working with Command R+ 08-2024's tool use prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r). Command R+ 08-2024 also supports Hugging Face's [tool use API](https://huggingface.co/docs/transformers/main/en/chat_templating#advanced-tool-use--function-calling). The code snippets below show minimal working examples on how to render a prompt. <details> <summary><b>Usage: Rendering Tool Use Prompts [CLICK TO EXPAND]</b> </summary> ```python from transformers import AutoTokenizer model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) # define conversation input: conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] # Define tools available for the model to use: tools = [ { "name": "internet_search", "description": "Returns a list of relevant document snippets for a textual query retrieved from the internet", "parameter_definitions": { "query": { "description": "Query to search the internet with", "type": 'str', "required": True } } }, { 'name': "directly_answer", "description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history", 'parameter_definitions': {} } ] # render the tool use prompt as a string: tool_use_prompt = tokenizer.apply_tool_use_template( conversation, tools=tools, tokenize=False, add_generation_prompt=True, ) print(tool_use_prompt) ``` </details> <details> <summary><b>Usage: Rendering prompts with the Tool Use API [CLICK TO EXPAND]</b> </summary> ```python from transformers import AutoTokenizer model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) # define conversation input: conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] # Define tools available for the model to use # Type hints and docstrings from Python functions are automatically extracted def internet_search(query: str): """ Returns a list of relevant document snippets for a textual query retrieved from the internet Args: query: Query to search the internet with """ pass def directly_answer(): """ Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history """ pass tools = [internet_search, directly_answer] # render the tool use prompt as a string: tool_use_prompt = tokenizer.apply_chat_template( conversation, tools=tools, tokenize=False, add_generation_prompt=True, ) print(tool_use_prompt) ``` </details> <details> <summary><b>Example Rendered Tool Use Prompt [CLICK TO EXPAND]</b></summary> ```` <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. # System Preamble ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling. ## Available Tools Here is a list of tools that you have available to you: ```python def internet_search(query: str) -> List[Dict]: """Returns a list of relevant document snippets for a textual query retrieved from the internet Args: query (str): Query to search the internet with """ pass ``` ```python def directly_answer() -> List[Dict]: """Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history """ pass ```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: ```json [ { "tool_name": title of the tool in the specification, "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters } ]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> ```` </details> <details> <summary><b>Example Rendered Tool Use Completion [CLICK TO EXPAND]</b></summary> ```` Action: ```json [ { "tool_name": "internet_search", "parameters": { "query": "biggest penguin in the world" } } ] ``` ```` </details> ### Grounded Generation and RAG Capabilities: Command R+ 08-2024 has been specifically trained with grounded generation capabilities. This means that it can generate responses based on a list of supplied document snippets, and it will include grounding spans (citations) in its response indicating the source of the information. This can be used to enable behaviors such as grounded summarization and the final step of Retrieval Augmented Generation (RAG). This behavior has been trained into the model via a mixture of supervised fine-tuning and preference fine-tuning, using a specific prompt template. Deviating from this prompt template may reduce performance, but we encourage experimentation. Command R+ 08-2024’s grounded generation behavior takes a conversation as input (with an optional user-supplied system preamble, indicating task, context and desired output style), along with a list of retrieved document snippets. The document snippets should be chunks, rather than long documents, typically around 100-400 words per chunk. Document snippets consist of key-value pairs. The keys should be short descriptive strings, the values can be text or semi-structured. By default, Command R+ 08-2024 will generate grounded responses by first predicting which documents are relevant, then predicting which ones it will cite, then generating an answer. Finally, it will then insert grounding spans into the answer. See below for an example. This is referred to as `accurate` grounded generation. The model is trained with a number of other answering modes, which can be selected by prompt changes. A `fast` citation mode is supported in the tokenizer, which will directly generate an answer with grounding spans in it, without first writing the answer out in full. This sacrifices some grounding accuracy in favor of generating fewer tokens. Comprehensive documentation for working with Command R+ 08-2024's grounded generation prompt template can be found [here](https://docs.cohere.com/docs/prompting-command-r). The code snippet below shows a minimal working example on how to render a prompt. <details> <summary> <b>Usage: Rendering Grounded Generation prompts [CLICK TO EXPAND]</b> </summary> ````python from transformers import AutoTokenizer model_id = "CohereForAI/c4ai-command-r-plus-08-2024" tokenizer = AutoTokenizer.from_pretrained(model_id) # define conversation input: conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] # define documents to ground on: documents = [ { "title": "Tall penguins", "text": "Emperor penguins are the tallest growing up to 122 cm in height." }, { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."} ] # render the tool use prompt as a string: grounded_generation_prompt = tokenizer.apply_grounded_generation_template( conversation, documents=documents, citation_mode="accurate", # or "fast" tokenize=False, add_generation_prompt=True, ) print(grounded_generation_prompt) ```` </details> <details> <summary><b>Example Rendered Grounded Generation Prompt [CLICK TO EXPAND]</b></summary> ```` <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. # System Preamble ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results> Document: 0 title: Tall penguins text: Emperor penguins are the tallest growing up to 122 cm in height. Document: 1 title: Penguin habitats text: Emperor penguins only live in Antarctica. </results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line. Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'. Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'. Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup. Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> ```` </details> <details> <summary><b>Example Rendered Grounded Generation Completion [CLICK TO EXPAND]</b></summary> ```` Relevant Documents: 0,1 Cited Documents: 0,1 Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres. Grounded answer: The <co: 0>Emperor Penguin</co: 0> is the <co: 0>tallest</co: 0> or biggest penguin in the world. It is a bird that <co: 1>lives only in Antarctica</co: 1> and <co: 0>grows to a height of around 122 centimetres.</co: 0> ```` </details> ### Code Capabilities: Command R+ 08-2024 has been optimized to interact with your code, by requesting code snippets, code explanations, or code rewrites. It might not perform well out-of-the-box for pure code completion. For better performance, we also recommend using a low temperature (and even greedy decoding) for code-generation related instructions. ### Model Card Contact For errors or additional questions about details in this model card, contact [[email protected]](mailto:[email protected]). ### Terms of Use: We hope that the release of this model will make community-based research efforts more accessible, by releasing the weights of a highly performant 104 billion parameter model to researchers all over the world. This model is governed by a [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license) License with an acceptable use addendum, and also requires adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy). ### Try Chat: You can try Command R+ 08-2024 chat in the playground [here](https://dashboard.cohere.com/playground/chat). You can also use it in our dedicated Hugging Face Space [here](https://huggingface.co/spaces/CohereForAI/c4ai-command?model=command-r-plus-08-2024).
{"language": ["en", "fr", "de", "es", "it", "pt", "ja", "ko", "zh", "ar"], "library_name": "transformers", "license": "cc-by-nc-4.0", "inference": false}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
41,415
varun-v-rao/bart-base-snli-model2
varun-v-rao
text-classification
[ "transformers", "tensorboard", "safetensors", "bart", "text-classification", "generated_from_trainer", "dataset:stanfordnlp/snli", "base_model:facebook/bart-base", "base_model:finetune:facebook/bart-base", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-19T19:05:12Z
2024-06-19T21:27:42+00:00
4
0
--- base_model: facebook/bart-base datasets: - stanfordnlp/snli license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: bart-base-snli-model2 results: - task: type: text-classification name: Text Classification dataset: name: snli type: stanfordnlp/snli metrics: - type: accuracy value: 0.9089615931721194 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-base-snli-model2 This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the snli dataset. It achieves the following results on the evaluation set: - Loss: 0.2600 - Accuracy: 0.9090 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 128 - seed: 30 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.369 | 1.0 | 2146 | 0.2819 | 0.9007 | | 0.3179 | 2.0 | 4292 | 0.2659 | 0.9067 | | 0.2931 | 3.0 | 6438 | 0.2600 | 0.9090 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.1+cu121 - Datasets 2.15.0 - Tokenizers 0.15.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-base-snli-model2 This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the snli dataset. It achieves the following results on the evaluation set: - Loss: 0.2600 - Accuracy: 0.9090 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 128 - seed: 30 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.369 | 1.0 | 2146 | 0.2819 | 0.9007 | | 0.3179 | 2.0 | 4292 | 0.2659 | 0.9067 | | 0.2931 | 3.0 | 6438 | 0.2600 | 0.9090 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.1+cu121 - Datasets 2.15.0 - Tokenizers 0.15.0
{"base_model": "facebook/bart-base", "datasets": ["stanfordnlp/snli"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bart-base-snli-model2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "snli", "type": "stanfordnlp/snli"}, "metrics": [{"type": "accuracy", "value": 0.9089615931721194, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,416
ptom01/distilbert-base-uncased-finetuned-emotion
ptom01
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-03-16T09:21:03Z
2024-03-16T10:03:14+00:00
6
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.9395 name: Accuracy - type: f1 value: 0.9394405491387633 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2417 - Accuracy: 0.9395 - F1: 0.9394 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.0461 | 1.0 | 1000 | 0.3721 | 0.9295 | 0.9302 | | 0.1008 | 2.0 | 2000 | 0.2417 | 0.9395 | 0.9394 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2417 - Accuracy: 0.9395 - F1: 0.9394 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.0461 | 1.0 | 1000 | 0.3721 | 0.9295 | 0.9302 | | 0.1008 | 2.0 | 2000 | 0.2417 | 0.9395 | 0.9394 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9395, "name": "Accuracy"}, {"type": "f1", "value": 0.9394405491387633, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,417
aszfcxcgszdx/samsum
aszfcxcgszdx
summarization
[ "transformers", "pytorch", "bart", "text2text-generation", "autotrain", "summarization", "en", "dataset:aszfcxcgszdx/autotrain-data-samsum-auto", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-15T14:25:51Z
2023-03-15T14:30:17+00:00
21
0
--- datasets: - aszfcxcgszdx/autotrain-data-samsum-auto language: - en tags: - autotrain - summarization widget: - text: I love AutoTrain 🤗 co2_eq_emissions: emissions: 0.0077793677303344775 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 41244106342 - CO2 Emissions (in grams): 0.0078 ## Validation Metrics - Loss: 1.565 - Rouge1: 47.592 - Rouge2: 23.270 - RougeL: 39.623 - RougeLsum: 43.180 - Gen Len: 18.305 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/aszfcxcgszdx/autotrain-samsum-auto-41244106342 ```
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 41244106342 - CO2 Emissions (in grams): 0.0078 ## Validation Metrics - Loss: 1.565 - Rouge1: 47.592 - Rouge2: 23.270 - RougeL: 39.623 - RougeLsum: 43.180 - Gen Len: 18.305 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/aszfcxcgszdx/autotrain-samsum-auto-41244106342 ```
{"datasets": ["aszfcxcgszdx/autotrain-data-samsum-auto"], "language": ["en"], "tags": ["autotrain", "summarization"], "widget": [{"text": "I love AutoTrain 🤗"}], "co2_eq_emissions": {"emissions": 0.0077793677303344775}}
task
[ "SUMMARIZATION" ]
41,418
gaudi/opus-mt-gaa-es-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-25T15:16:13Z
2024-10-19T05:01:02+00:00
6
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-gaa-es) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-gaa-es).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-gaa-es --output_dir ./ctranslate2/opus-mt-gaa-es-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-gaa-es-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-gaa-es-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-gaa-es-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-gaa-es) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-gaa-es) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-gaa-es).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-gaa-es --output_dir ./ctranslate2/opus-mt-gaa-es-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-gaa-es-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-gaa-es-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-gaa-es-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-gaa-es) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
41,419
Xenova/opus-mt-ru-en
Xenova
translation
[ "transformers.js", "onnx", "marian", "text2text-generation", "translation", "base_model:Helsinki-NLP/opus-mt-ru-en", "base_model:quantized:Helsinki-NLP/opus-mt-ru-en", "region:us" ]
2023-08-07T21:33:58Z
2024-10-08T13:39:54+00:00
107
0
--- base_model: Helsinki-NLP/opus-mt-ru-en library_name: transformers.js pipeline_tag: translation --- https://huggingface.co/Helsinki-NLP/opus-mt-ru-en with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
null
Non_BioNLP
ERROR: type should be string, got "\nhttps://huggingface.co/Helsinki-NLP/opus-mt-ru-en with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."
{"base_model": "Helsinki-NLP/opus-mt-ru-en", "library_name": "transformers.js", "pipeline_tag": "translation"}
task
[ "TRANSLATION" ]
41,420
gaudi/opus-mt-zh-en-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-17T00:18:17Z
2024-10-18T21:40:33+00:00
25
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-zh-en --output_dir ./ctranslate2/opus-mt-zh-en-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-zh-en-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-zh-en-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-zh-en-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-zh-en --output_dir ./ctranslate2/opus-mt-zh-en-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-zh-en-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-zh-en-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-zh-en-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-zh-en) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
41,421
pszemraj/t5-large-for-lexical-analysis
pszemraj
text2text-generation
[ "transformers", "pytorch", "onnx", "safetensors", "t5", "text2text-generation", "analysis", "book", "notes", "en", "dataset:kmfoda/booksum", "arxiv:2105.08209", "license:bsd-3-clause", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2023-03-31T17:44:21+00:00
46
3
--- datasets: - kmfoda/booksum language: - en library_name: transformers license: bsd-3-clause metrics: - rouge pipeline_tag: text2text-generation tags: - t5 - analysis - book - notes widget: - text: I'm just a girl standing in front of a boy asking him to love her. example_title: Notting Hill - text: Son, your ego is writing checks your body can't cash. example_title: top gun - text: I really love to eat beans. example_title: beans - text: The ledge, where I placed my candle, had a few mildewed books piled up in one corner; and it was covered with writing scratched on the paint. This writing, however, was nothing but a name repeated in all kinds of characters, large and small—Catherine Earnshaw, here and there varied to Catherine Heathcliff, and then again to Catherine Linton. In vapid listlessness I leant my head against the window, and continued spelling over Catherine Earnshaw—Heathcliff—Linton, till my eyes closed; but they had not rested five minutes when a glare of white letters started from the dark, as vivid as spectres—the air swarmed with Catherines; and rousing myself to dispel the obtrusive name, I discovered my candle wick reclining on one of the antique volumes, and perfuming the place with an odour of roasted calf-skin. example_title: Wuthering Heights - text: Did you ever hear the tragedy of Darth Plagueis The Wise? I thought not. It’s not a story the Jedi would tell you. It’s a Sith legend. Darth Plagueis was a Dark Lord of the Sith, so powerful and so wise he could use the Force to influence the midichlorians to create life… He had such a knowledge of the dark side that he could even keep the ones he cared about from dying. The dark side of the Force is a pathway to many abilities some consider to be unnatural. He became so powerful… the only thing he was afraid of was losing his power, which eventually, of course, he did. Unfortunately, he taught his apprentice everything he knew, then his apprentice killed him in his sleep. Ironic. He could save others from death, but not himself. example_title: Darth Plagueis The Wise inference: parameters: no_repeat_ngram_size: 2 max_length: 64 early_stopping: true --- # T5-large for Lexical Analysis - This model was trained a text-to-text task with input text as a summary of a chapter, and the output text as the analysis of that chapter on the [booksum](https://arxiv.org/abs/2105.08209) dataset. - it has somewhat learned how to complete literary analysis on an arbitrary input text. - **NOTE: this is fairly intensive computationally and recommended to be run on GPU. please see example usage in [this demo notebook](https://colab.research.google.com/gist/pszemraj/8e9cc5bee5cac7916ef9241b66e01b05/demo-t5-large-for-lexical-analysis.ipynb)** - The API is set to return max 64 tokens to avoid timeouts on CPU. ## Example - PROMPT: > Carmen: We're kids, not monsters. Dr. Romero: What's the difference? - OUTPUT: > Commentary on Act IV, scenes i-ii In these scenes, we see Dracula's transformation of the children into "monstrous" creatures. Doctor Romero says, "We're kidnapped, but not monsters." This is an important question for the audience to ask: Is there a difference between childhood and adulthood? _longer examples are available in both [the demo notebook](https://colab.research.google.com/gist/pszemraj/8e9cc5bee5cac7916ef9241b66e01b05/demo-t5-large-for-lexical-analysis.ipynb) and at the bottom of this README._ # checkpoints - a fine-tuned version of [google/t5-v1_1-large](https://huggingface.co/google/t5-v1_1-large) - 16 epochs of training total ## Model description - automatic literary analysis on arbitrary text - `booksum` is a dataset created primarily for long-range summarization. It's excellent at that, and usually, this is done with the `chapter` and `summary_text` columns. However, there is a `summary_analysis` column as well, containing literary analysis on the passage in question :eyes: - this model was trained on text-to-text with `summary_text` as the input and `summary_analysis` as the output, so it will analyze whatever text for _deeper meaning_. ## Intended uses & limitations - given the absence of more specific info, the analysis will reference authors/titles from the training data in the context of the analysis. ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 4 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0 ## Extended Examples the below responses from the model are unedited. ### A prose passage from _The Lord of the Rings_ > Far above the Ephel Duath in the West the night-sky was still dim and pale. There, peering among the cloud-wrack above a dark tor high up in the mountains, Sam saw a white star twinkle for a while. The beauty of it smote his heart, as he looked up out of the forsaken land, and hope returned to him. For like a shaft, clear and cold, the thought pierced him that in the end the Shadow was only a small and passing thing: there was light and high beauty for ever beyond its reach. analysis: > Notes Sam's encounter with the white star is one of the most memorable scenes in the novel. It was a moment of great relief for him to see that there was light beyond his reach, and he had been longing for it all along. He looked up into the heavens at the sight of it: "It was as if I were looking out through the window of my own soul" . In this chapter, however, we are reminded of another scene from the Book of Eliot, when Sam saw the White Star twinkled for the first time since the book began. The night-sky still shrouded by clouds and cloud-wreck on the mountain tops, but now the darkness has begun to darken the whole world. When Sam looks up above the dusk over the mountains, which pierces his heart, Sam realizes that the Shadow will not last forever. ### the rick and morty copypasta > Notes This is a very long post, but it's important to remember that this is the first of many posts on this site. It will take some time for me to get over the fact that I don't have an incredibly high iq to appreciate all of the humor in the show. In other words, people who dislike the series are idiots- they have no idea what these jokes mean or why they're funny at all. The only way to truly appreciate them is to be one of their diehard fans. If you do not like them, you'll end up making fools out of your own stupidity. For example, Dan Harmon writes: "I hated him because I loved him so much" . He was also extremely intelligent, and he would never have been able to fully appreciate the depth of his humour. That said, however, is that if you did not love him, then you should go ahead and make fun of him. And yet another reason why Morty dislikes him is partly due to his lack of narcissism rather than any kind of self-delusion. But there is something special about Mr. Moriarty himself- despite his lowly wittedness, which makes him seem almost superstitious. His attitude towards life seems to stem from his belief that nothing can ever be good enough to save the world. However, as noted above, Dickens says, "Life is full of paradoxes and contradictions... Life is more complex than anything else." Indeed, most critics have pointed out that even those with lower IQ points could possibly be seen as being subversive; indeed, readers might find it hard to sympathize with such simpletons. Of course, Stevenson has made it clear that we need to look beyond the surface level of normalcy in order to understand the absurdity of modern society. There are several examples of this sort of hypocrisy going on in contemporary literature. One of my favorite books is Fathers Sons, written by Alexander Nevsky, published in 1897. These books were published around 18 years before the novel was published. They were serialised in serial format, meaning that they were produced in 1921. Their publication dates back to 1864, when they appeared in London during the late eighteenth century England. At the time of its publication date, it was released in November 1793. When it came out in December, the book had already been published after 1859.
null
Non_BioNLP
# T5-large for Lexical Analysis - This model was trained a text-to-text task with input text as a summary of a chapter, and the output text as the analysis of that chapter on the [booksum](https://arxiv.org/abs/2105.08209) dataset. - it has somewhat learned how to complete literary analysis on an arbitrary input text. - **NOTE: this is fairly intensive computationally and recommended to be run on GPU. please see example usage in [this demo notebook](https://colab.research.google.com/gist/pszemraj/8e9cc5bee5cac7916ef9241b66e01b05/demo-t5-large-for-lexical-analysis.ipynb)** - The API is set to return max 64 tokens to avoid timeouts on CPU. ## Example - PROMPT: > Carmen: We're kids, not monsters. Dr. Romero: What's the difference? - OUTPUT: > Commentary on Act IV, scenes i-ii In these scenes, we see Dracula's transformation of the children into "monstrous" creatures. Doctor Romero says, "We're kidnapped, but not monsters." This is an important question for the audience to ask: Is there a difference between childhood and adulthood? _longer examples are available in both [the demo notebook](https://colab.research.google.com/gist/pszemraj/8e9cc5bee5cac7916ef9241b66e01b05/demo-t5-large-for-lexical-analysis.ipynb) and at the bottom of this README._ # checkpoints - a fine-tuned version of [google/t5-v1_1-large](https://huggingface.co/google/t5-v1_1-large) - 16 epochs of training total ## Model description - automatic literary analysis on arbitrary text - `booksum` is a dataset created primarily for long-range summarization. It's excellent at that, and usually, this is done with the `chapter` and `summary_text` columns. However, there is a `summary_analysis` column as well, containing literary analysis on the passage in question :eyes: - this model was trained on text-to-text with `summary_text` as the input and `summary_analysis` as the output, so it will analyze whatever text for _deeper meaning_. ## Intended uses & limitations - given the absence of more specific info, the analysis will reference authors/titles from the training data in the context of the analysis. ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 4 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0 ## Extended Examples the below responses from the model are unedited. ### A prose passage from _The Lord of the Rings_ > Far above the Ephel Duath in the West the night-sky was still dim and pale. There, peering among the cloud-wrack above a dark tor high up in the mountains, Sam saw a white star twinkle for a while. The beauty of it smote his heart, as he looked up out of the forsaken land, and hope returned to him. For like a shaft, clear and cold, the thought pierced him that in the end the Shadow was only a small and passing thing: there was light and high beauty for ever beyond its reach. analysis: > Notes Sam's encounter with the white star is one of the most memorable scenes in the novel. It was a moment of great relief for him to see that there was light beyond his reach, and he had been longing for it all along. He looked up into the heavens at the sight of it: "It was as if I were looking out through the window of my own soul" . In this chapter, however, we are reminded of another scene from the Book of Eliot, when Sam saw the White Star twinkled for the first time since the book began. The night-sky still shrouded by clouds and cloud-wreck on the mountain tops, but now the darkness has begun to darken the whole world. When Sam looks up above the dusk over the mountains, which pierces his heart, Sam realizes that the Shadow will not last forever. ### the rick and morty copypasta > Notes This is a very long post, but it's important to remember that this is the first of many posts on this site. It will take some time for me to get over the fact that I don't have an incredibly high iq to appreciate all of the humor in the show. In other words, people who dislike the series are idiots- they have no idea what these jokes mean or why they're funny at all. The only way to truly appreciate them is to be one of their diehard fans. If you do not like them, you'll end up making fools out of your own stupidity. For example, Dan Harmon writes: "I hated him because I loved him so much" . He was also extremely intelligent, and he would never have been able to fully appreciate the depth of his humour. That said, however, is that if you did not love him, then you should go ahead and make fun of him. And yet another reason why Morty dislikes him is partly due to his lack of narcissism rather than any kind of self-delusion. But there is something special about Mr. Moriarty himself- despite his lowly wittedness, which makes him seem almost superstitious. His attitude towards life seems to stem from his belief that nothing can ever be good enough to save the world. However, as noted above, Dickens says, "Life is full of paradoxes and contradictions... Life is more complex than anything else." Indeed, most critics have pointed out that even those with lower IQ points could possibly be seen as being subversive; indeed, readers might find it hard to sympathize with such simpletons. Of course, Stevenson has made it clear that we need to look beyond the surface level of normalcy in order to understand the absurdity of modern society. There are several examples of this sort of hypocrisy going on in contemporary literature. One of my favorite books is Fathers Sons, written by Alexander Nevsky, published in 1897. These books were published around 18 years before the novel was published. They were serialised in serial format, meaning that they were produced in 1921. Their publication dates back to 1864, when they appeared in London during the late eighteenth century England. At the time of its publication date, it was released in November 1793. When it came out in December, the book had already been published after 1859.
{"datasets": ["kmfoda/booksum"], "language": ["en"], "library_name": "transformers", "license": "bsd-3-clause", "metrics": ["rouge"], "pipeline_tag": "text2text-generation", "tags": ["t5", "analysis", "book", "notes"], "widget": [{"text": "I'm just a girl standing in front of a boy asking him to love her.", "example_title": "Notting Hill"}, {"text": "Son, your ego is writing checks your body can't cash.", "example_title": "top gun"}, {"text": "I really love to eat beans.", "example_title": "beans"}, {"text": "The ledge, where I placed my candle, had a few mildewed books piled up in one corner; and it was covered with writing scratched on the paint. This writing, however, was nothing but a name repeated in all kinds of characters, large and small—Catherine Earnshaw, here and there varied to Catherine Heathcliff, and then again to Catherine Linton. In vapid listlessness I leant my head against the window, and continued spelling over Catherine Earnshaw—Heathcliff—Linton, till my eyes closed; but they had not rested five minutes when a glare of white letters started from the dark, as vivid as spectres—the air swarmed with Catherines; and rousing myself to dispel the obtrusive name, I discovered my candle wick reclining on one of the antique volumes, and perfuming the place with an odour of roasted calf-skin.", "example_title": "Wuthering Heights"}, {"text": "Did you ever hear the tragedy of Darth Plagueis The Wise? I thought not. It’s not a story the Jedi would tell you. It’s a Sith legend. Darth Plagueis was a Dark Lord of the Sith, so powerful and so wise he could use the Force to influence the midichlorians to create life… He had such a knowledge of the dark side that he could even keep the ones he cared about from dying. The dark side of the Force is a pathway to many abilities some consider to be unnatural. He became so powerful… the only thing he was afraid of was losing his power, which eventually, of course, he did. Unfortunately, he taught his apprentice everything he knew, then his apprentice killed him in his sleep. Ironic. He could save others from death, but not himself.", "example_title": "Darth Plagueis The Wise"}], "inference": {"parameters": {"no_repeat_ngram_size": 2, "max_length": 64, "early_stopping": true}}}
task
[ "SUMMARIZATION" ]
41,422
waelChafei/resume-summarization-finetuned-mistral-7b
waelChafei
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "dataset:generator", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "base_model:adapter:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "region:us" ]
2024-05-07T16:17:18Z
2024-05-07T17:09:12+00:00
3
0
--- base_model: mistralai/Mistral-7B-Instruct-v0.2 datasets: - generator library_name: peft license: apache-2.0 tags: - trl - sft - generated_from_trainer model-index: - name: resume-summarization-finetuned-mistral-7b results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resume-summarization-finetuned-mistral-7b This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset. It achieves the following results on the evaluation set: - Loss: 2.9103 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_steps: 0.03 - training_steps: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 1.8838 | 0.6897 | 20 | 2.2258 | | 1.4079 | 1.3793 | 40 | 2.3118 | | 1.207 | 2.0690 | 60 | 2.4381 | | 0.765 | 2.7586 | 80 | 2.6361 | | 0.385 | 3.4483 | 100 | 2.9103 | ### Framework versions - PEFT 0.10.0 - Transformers 4.40.2 - Pytorch 2.1.2 - Datasets 2.16.0 - Tokenizers 0.19.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resume-summarization-finetuned-mistral-7b This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset. It achieves the following results on the evaluation set: - Loss: 2.9103 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_steps: 0.03 - training_steps: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 1.8838 | 0.6897 | 20 | 2.2258 | | 1.4079 | 1.3793 | 40 | 2.3118 | | 1.207 | 2.0690 | 60 | 2.4381 | | 0.765 | 2.7586 | 80 | 2.6361 | | 0.385 | 3.4483 | 100 | 2.9103 | ### Framework versions - PEFT 0.10.0 - Transformers 4.40.2 - Pytorch 2.1.2 - Datasets 2.16.0 - Tokenizers 0.19.1
{"base_model": "mistralai/Mistral-7B-Instruct-v0.2", "datasets": ["generator"], "library_name": "peft", "license": "apache-2.0", "tags": ["trl", "sft", "generated_from_trainer"], "model-index": [{"name": "resume-summarization-finetuned-mistral-7b", "results": []}]}
task
[ "SUMMARIZATION" ]
41,423
pawan2411/semantic-embedding
pawan2411
sentence-similarity
[ "sentence-transformers", "safetensors", "mpnet", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:7851", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:microsoft/mpnet-base", "base_model:finetune:microsoft/mpnet-base", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-11-21T08:06:32Z
2024-11-21T08:07:02+00:00
12
0
--- base_model: microsoft/mpnet-base library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:7851 - loss:MultipleNegativesRankingLoss widget: - source_sentence: show my holdings that have performed well over the past 10 years sentences: - Do I hold any premium investment options? - Show me the investments that have generated the highest returns over the past 10 years - What implications does this news have for my portfolio's income generation? - source_sentence: How risky is my portfolio currently? sentences: - Can I use diversification to minimize risk in my portfolio - Research investment options complementing my stock portfolio - How secure or vulnerable is my investment portfolio at the moment? - source_sentence: What are my investments? sentences: - What's my current investment situation? - What is the performance of my portfolio over the last week in terms of interest - Exchange mutual fund holdings for stocks - source_sentence: View my ETFs sentences: - Change my stock-based portfolio to mutual fund-based - What equities are projected to climb 10% in share price over the next 3 quarters? - Glance at my exchange-traded fund assets - source_sentence: Tell me which of my stocks will go up 10 % in next month sentences: - Which of my stocks are most likely to experience price breakouts - What's advancing well in my portfolio composition - Which of my stocks are expected to be the top performers in my portfolio over the next month, with a 10% gain? --- # SentenceTransformer based on microsoft/mpnet-base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) <!-- at revision 6996ce1e91bd2a9c7d7f61daec37463394f73f09 --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("pawan2411/semantic-embedding") # Run inference sentences = [ 'Tell me which of my stocks will go up 10 % in next month', 'Which of my stocks are expected to be the top performers in my portfolio over the next month, with a 10% gain?', 'Which of my stocks are most likely to experience price breakouts', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 7,851 training samples * Columns: <code>sentence_0</code> and <code>sentence_1</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | |:--------|:---------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 5 tokens</li><li>mean: 9.65 tokens</li><li>max: 19 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 12.16 tokens</li><li>max: 29 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | |:----------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------| | <code>What is the performance of my portfolio over the last 10 days?</code> | <code>What is the performance of my investment portfolio over the last 10 days including dividends</code> | | <code>Can you show my worst performing holdings</code> | <code>What are my poorest-performing mutual funds</code> | | <code>Are there any red flags in my portfolio?</code> | <code>What are the red flags that indicate a portfolio is not performing well</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 64 - `num_train_epochs`: 50 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 64 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 50 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | |:-------:|:----:|:-------------:| | 4.0650 | 500 | 1.2423 | | 8.1301 | 1000 | 0.6364 | | 12.1951 | 1500 | 0.5848 | | 4.0650 | 500 | 0.5865 | | 8.1301 | 1000 | 0.5477 | | 12.1951 | 1500 | 0.5253 | | 4.0650 | 500 | 0.4432 | | 8.1301 | 1000 | 0.431 | | 12.1951 | 1500 | 0.463 | | 4.0650 | 500 | 0.3228 | | 8.1301 | 1000 | 0.3049 | | 12.1951 | 1500 | 0.4006 | | 16.2602 | 2000 | 0.5188 | | 20.3252 | 2500 | 0.5218 | | 24.3902 | 3000 | 0.5098 | | 28.4553 | 3500 | 0.5022 | | 32.5203 | 4000 | 0.4973 | | 36.5854 | 4500 | 0.493 | | 40.6504 | 5000 | 0.4914 | | 44.7154 | 5500 | 0.4898 | | 48.7805 | 6000 | 0.4886 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.1.1 - Transformers: 4.45.2 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.20.3 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on microsoft/mpnet-base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) <!-- at revision 6996ce1e91bd2a9c7d7f61daec37463394f73f09 --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("pawan2411/semantic-embedding") # Run inference sentences = [ 'Tell me which of my stocks will go up 10 % in next month', 'Which of my stocks are expected to be the top performers in my portfolio over the next month, with a 10% gain?', 'Which of my stocks are most likely to experience price breakouts', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 7,851 training samples * Columns: <code>sentence_0</code> and <code>sentence_1</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | |:--------|:---------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 5 tokens</li><li>mean: 9.65 tokens</li><li>max: 19 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 12.16 tokens</li><li>max: 29 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | |:----------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------| | <code>What is the performance of my portfolio over the last 10 days?</code> | <code>What is the performance of my investment portfolio over the last 10 days including dividends</code> | | <code>Can you show my worst performing holdings</code> | <code>What are my poorest-performing mutual funds</code> | | <code>Are there any red flags in my portfolio?</code> | <code>What are the red flags that indicate a portfolio is not performing well</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 64 - `num_train_epochs`: 50 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 64 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 50 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | |:-------:|:----:|:-------------:| | 4.0650 | 500 | 1.2423 | | 8.1301 | 1000 | 0.6364 | | 12.1951 | 1500 | 0.5848 | | 4.0650 | 500 | 0.5865 | | 8.1301 | 1000 | 0.5477 | | 12.1951 | 1500 | 0.5253 | | 4.0650 | 500 | 0.4432 | | 8.1301 | 1000 | 0.431 | | 12.1951 | 1500 | 0.463 | | 4.0650 | 500 | 0.3228 | | 8.1301 | 1000 | 0.3049 | | 12.1951 | 1500 | 0.4006 | | 16.2602 | 2000 | 0.5188 | | 20.3252 | 2500 | 0.5218 | | 24.3902 | 3000 | 0.5098 | | 28.4553 | 3500 | 0.5022 | | 32.5203 | 4000 | 0.4973 | | 36.5854 | 4500 | 0.493 | | 40.6504 | 5000 | 0.4914 | | 44.7154 | 5500 | 0.4898 | | 48.7805 | 6000 | 0.4886 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.1.1 - Transformers: 4.45.2 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.20.3 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "microsoft/mpnet-base", "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:7851", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "show my holdings that have performed well over the past 10 years", "sentences": ["Do I hold any premium investment options?", "Show me the investments that have generated the highest returns over the past 10 years", "What implications does this news have for my portfolio's income generation?"]}, {"source_sentence": "How risky is my portfolio currently?", "sentences": ["Can I use diversification to minimize risk in my portfolio", "Research investment options complementing my stock portfolio", "How secure or vulnerable is my investment portfolio at the moment?"]}, {"source_sentence": "What are my investments?", "sentences": ["What's my current investment situation?", "What is the performance of my portfolio over the last week in terms of interest", "Exchange mutual fund holdings for stocks"]}, {"source_sentence": "View my ETFs", "sentences": ["Change my stock-based portfolio to mutual fund-based", "What equities are projected to climb 10% in share price over the next 3 quarters?", "Glance at my exchange-traded fund assets"]}, {"source_sentence": "Tell me which of my stocks will go up 10 % in next month", "sentences": ["Which of my stocks are most likely to experience price breakouts", "What's advancing well in my portfolio composition", "Which of my stocks are expected to be the top performers in my portfolio over the next month, with a 10% gain?"]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,424
anhtuansh/vietnamese-bi-encoder-financial-matryoshka-2
anhtuansh
sentence-similarity
[ "sentence-transformers", "safetensors", "roberta", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:10248", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss", "en", "arxiv:1908.10084", "arxiv:2205.13147", "arxiv:1705.00652", "base_model:bkai-foundation-models/vietnamese-bi-encoder", "base_model:finetune:bkai-foundation-models/vietnamese-bi-encoder", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-10T12:55:08Z
2024-06-10T12:55:25+00:00
6
0
--- base_model: bkai-foundation-models/vietnamese-bi-encoder datasets: [] language: - en library_name: sentence-transformers license: apache-2.0 metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:10248 - loss:MatryoshkaLoss - loss:MultipleNegativesRankingLoss widget: - source_sentence: Tại khoản 1, Điều 5. Nghị định số 03/2016/NĐ-CP ngày 05/01/2016 của Chính phủ quy định Hồ sơ thành lập đơn vị tự vệ trong doanh nghiệp như sau:a) Văn bản đề nghị thành lập đơn vị tự vệ của người quản lý doanh nghiệp;b) Kế hoạch xây dựng lực lượng tự vệ của doanh nghiệp;c) Danh sách cán bộ, công chức, viên chức, người lao động tham gia tổ chức tự vệ của doanh nghiệp;d) Văn bản của cơ quan chức năng thẩm định việc thành lập tự vệ của doanh nghiệp. sentences: - "Căn cứ pháp lý thủ tục thay đổi nội dung đăng ký hoạt động của Văn phòng công\ \ chứng \nnhận sáp nhập?" - Doanh nghiệp muốn thực hiện phân loại trang thiết bị y tế thì phải làm như thế nào? - Hồ sơ thành lập đơn vị tự vệ trong doanh nghiệp được quy định như thế nào? - source_sentence: Căn cứ Khoản 2 Điều 11 “Điều kiện về an ninh, trật tự đối với kinh doanh dịch vụ bảo vệ” của Nghị định số 96/2016/NĐ-CP ngày 01/7/2016 của Chính phủ quy định điều kiện về an ninh, trật tự đối với một số ngành, nghề đầu tư kinh doanh có điều kiện “Người chịu trách nhiệm về an ninh, trật tự của cơ sở kinh doanh dịch vụ bảo vệ ….. không phải là người chịu trách nhiệm về an ninh, trật tự của cơ sở kinh doanh dịch vụ bảo vệ mà trong 24 tháng liền kề trước đó đã bị thu hồi không có thời hạn Giấy chứng nhận đủ điều kiện về an ninh, trật tự” .Như vậy, anh có thể thành lập công ty mới kinh doanh dịch vụ bảo vệ theo quy định của pháp luật. sentences: - Trong thời hạn bao nhiêu ngày làm việc kể từ ngày nhận được thông báo hoàn tất việc mua bán, chuyển nhượng Phần vốn góp của tổ chức tài chính vi mô, Ngân hàng Nhà nước ra quyết định sửa đổi, bổ sung Giấy phép của tổ chức tài chính vi mô? - Công ty kinh doanh nhân viên bảo vệ của tôi đã từng bị thu hồi không thời hạn Giấy chứng nhận đủ điều kiện về an ninh trật tự cách đây 2 năm. Hiện giờ tôi muốn thành lập công ty mới thì có được không? - 'Thủ tục: Điều chỉnh tên dự án đầu tư, tên và địa chỉ nhà đầu tư trong Giấy chứng nhận đăng ký đầu tư gồm các thành phần hồ sơ nào?' - source_sentence: Quy trình tạo và gửi hồ sơ thủ tục hành chính gồm các bước sau:(i) Truy cập và đăng nhập vào trang dịch vụ công bằng tài khoản được cấp(ii) Lựa chọn loại TTHC đề nghị khen thưởng(iii) Tải các thành phần hồ sơ của TTHC lên dịch vụ công(iv) Xác nhận việc tạo mới hồ sơ và chuyển cho lãnh đạo phê duyệt(v) Lãnh đạo đăng nhập bằng tài khoản được cấp và phê duyệt gửi hồ sơ sentences: - Quy định đối với người nước ngoài và hoạt động kinh tế ở khu vực bảo vệ và vành đai an toàn? - Hồ sơ đề nghị cấp Giấy chứng nhận doanh nghiệp công nghệ cao gồm những gì và được nộp ở đâu? - Quy trình tạo và gửi một hồ sơ TTHC trên dịch vụ công gồm những bước nào? - source_sentence: Mẫu số 02 Thông tư số 02/2013/TT-BNV. sentences: - Thời gian xử lý các thủ tục chứng thư số là bao lâu? - Tố cáo cán bộ, công chức, viên chức trong cơ quan mà hiện nay cơ quan đó đã giải thể thì ai là người giải quyết? - Điều lệ mẫu của quỹ xã hội, quỹ từ thiện được quy định ở văn bản nào? - source_sentence: '<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">Căn cứ Điều 108, Luật Thú y số 79/2015/QH 13 ngày 19/6/2015 của Quốc hội và Điều 21 Nghị định số 35/2016/NĐ-CP ngày 15/5/2016 của Chính phủ quy định về hành nghề thú y như sau;</span></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><strong><span style="color:black;">1. Điều kiện hành nghề thú y:</span></strong></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">- Có đạo đức nghề nghiệp;</span></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">- Có đủ sức khỏe hành nghề.</span></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="letter-spacing:-.2pt;"><span style="color:black;">- Có Chứng chỉ hành nghề thú y phù hợp với từng loại hình hành nghề thú y.</span></span></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><strong><span style="color:black;">2. Điều kiện để cấp chứng chỉ hành nghề thú y:</span></strong></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">a.&nbsp;</span><span style="color:black;">Người hành nghề chẩn đoán, chữa bệnh, phẫu thuật động vật, tư vấn các hoạt động liên quan đến lĩnh vực thú y tối thiểu phải có bằng trung cấp chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi&nbsp;</span><span style="color:black;">tr</span><span style="color:black;">ồng thủy sản, bệnh&nbsp;học thủy sản đối với hành nghề thú y thủy sản. Người hành nghề tiêm phòng cho động vật phải có chứng chỉ tốt nghiệp lớp đào tạo về kỹ thuật do cơ quan quản lý nhà nước có thẩm quyền cấp tỉnh cấp.</span></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">b.&nbsp;Người phụ&nbsp;trách kỹ thuật của cơ sở phẫu thuật động vật, khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật phải có bằng đại học&nbsp;trở lên chuyên ngành thú y, chăn nuôi thú y hoặc nuôi&nbsp;trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p> <p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">c.&nbsp;Người buôn bán thuốc thú y phải có bằng trung cấp trở&nbsp;lên chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>' sentences: - Căn cứ pháp lý thủ tục cấp Giấy chứng nhận tổ chức đủ điều kiện hoạt động giám định sở hữu công nghiệp - 'Điều kiện để hành nghề và cấp chứng chỉ hành nghề thú y đối với các loại hình: Tiêm phòng, chữa bệnh, phẫu thuật động vật; tư vấn các hoạt động liên quan đến lĩnh vực thú y; khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật; buôn bán thuốc thú y?' - Trường hợp đã có chứng thư số ký duyệt nghiệp vụ dịch vụ công liên quan đến chứng thư số, nếu người sử dụng muốn đăng ký để ký duyệtnghiệp vụ dịch vụ công khác như Mã ngân hàng, Thanh toán liên ngân hàng (TTLNH), Thi đua – Khen thưởng, Thanh toán thì cần phải làm thủ tục gì? model-index: - name: vietnamese-bi-encoder Financial Matryoshka - 2 results: - task: type: information-retrieval name: Information Retrieval dataset: name: dim 768 type: dim_768 metrics: - type: cosine_accuracy@1 value: 0.5329236172080772 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.6988586479367866 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.7603160667251976 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8138718173836699 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.5329236172080772 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2329528826455955 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.1520632133450395 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08138718173836698 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.5329236172080772 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.6988586479367866 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.7603160667251976 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8138718173836699 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.6735784668494285 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.6285285756093483 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.6338528205835894 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 512 type: dim_512 metrics: - type: cosine_accuracy@1 value: 0.5364354697102721 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.6997366110623354 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.762071992976295 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8086040386303776 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.5364354697102721 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.23324553702077844 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.152414398595259 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08086040386303775 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.5364354697102721 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.6997366110623354 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.762071992976295 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8086040386303776 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.6733400704768767 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.6297514528199337 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.6355829264231957 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 256 type: dim_256 metrics: - type: cosine_accuracy@1 value: 0.5311676909569798 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.6892010535557507 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.7550482879719052 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8068481123792801 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.5311676909569798 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.22973368451858353 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.15100965759438104 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08068481123792799 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.5311676909569798 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.6892010535557507 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.7550482879719052 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8068481123792801 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.6689063830436761 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.624737307858467 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.6307311118555528 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 128 type: dim_128 metrics: - type: cosine_accuracy@1 value: 0.5074626865671642 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.6681299385425812 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.7339771729587358 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.7998244073748902 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.5074626865671642 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.22270997951419372 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.14679543459174713 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.07998244073748902 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.5074626865671642 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.6681299385425812 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.7339771729587358 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.7998244073748902 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.6520119825054099 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.6049092771436932 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.6107424205763605 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 64 type: dim_64 metrics: - type: cosine_accuracy@1 value: 0.47146619841966636 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.6347673397717296 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.6918349429323969 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.7638279192273925 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.47146619841966636 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2115891132572432 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.13836698858647933 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.07638279192273925 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.47146619841966636 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.6347673397717296 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.6918349429323969 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.7638279192273925 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.6148557873571111 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.5675923603272155 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.5737454249088845 name: Cosine Map@100 --- # vietnamese-bi-encoder Financial Matryoshka - 2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [bkai-foundation-models/vietnamese-bi-encoder](https://huggingface.co/bkai-foundation-models/vietnamese-bi-encoder). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [bkai-foundation-models/vietnamese-bi-encoder](https://huggingface.co/bkai-foundation-models/vietnamese-bi-encoder) <!-- at revision 84f9d9ada0d1a3c37557398b9ae9fcedcdf40be0 --> - **Maximum Sequence Length:** 256 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> - **Language:** en - **License:** apache-2.0 ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("anhtuansh/vietnamese-bi-encoder-financial-matryoshka-2") # Run inference sentences = [ '<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">Căn cứ Điều 108, Luật Thú y số 79/2015/QH 13 ngày 19/6/2015 của Quốc hội và Điều 21 Nghị định số 35/2016/NĐ-CP ngày 15/5/2016 của Chính phủ quy định về hành nghề thú y như sau;</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><strong><span style="color:black;">1. Điều kiện hành nghề thú y:</span></strong></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">- Có đạo đức nghề nghiệp;</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">- Có đủ sức khỏe hành nghề.</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="letter-spacing:-.2pt;"><span style="color:black;">- Có Chứng chỉ hành nghề thú y phù hợp với từng loại hình hành nghề thú y.</span></span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><strong><span style="color:black;">2. Điều kiện để cấp chứng chỉ hành nghề thú y:</span></strong></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">a.&nbsp;</span><span style="color:black;">Người hành nghề chẩn đoán, chữa bệnh, phẫu thuật động vật, tư vấn các hoạt động liên quan đến lĩnh vực thú y tối thiểu phải có bằng trung cấp chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi&nbsp;</span><span style="color:black;">tr</span><span style="color:black;">ồng thủy sản, bệnh&nbsp;học thủy sản đối với hành nghề thú y thủy sản. Người hành nghề tiêm phòng cho động vật phải có chứng chỉ tốt nghiệp lớp đào tạo về kỹ thuật do cơ quan quản lý nhà nước có thẩm quyền cấp tỉnh cấp.</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">b.&nbsp;Người phụ&nbsp;trách kỹ thuật của cơ sở phẫu thuật động vật, khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật phải có bằng đại học&nbsp;trở lên chuyên ngành thú y, chăn nuôi thú y hoặc nuôi&nbsp;trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">c.&nbsp;Người buôn bán thuốc thú y phải có bằng trung cấp trở&nbsp;lên chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>', 'Điều kiện để hành nghề và cấp chứng chỉ hành nghề thú y đối với các loại hình: Tiêm phòng, chữa bệnh, phẫu thuật động vật; tư vấn các hoạt động liên quan đến lĩnh vực thú y; khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật; buôn bán thuốc thú y?', 'Trường hợp đã có chứng thư số ký duyệt nghiệp vụ dịch vụ công liên quan đến chứng thư số, nếu người sử dụng muốn đăng ký để ký duyệtnghiệp vụ dịch vụ công khác như Mã ngân hàng, Thanh toán liên ngân hàng (TTLNH), Thi đua – Khen thưởng, Thanh toán thì cần phải làm thủ tục gì?', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `dim_768` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5329 | | cosine_accuracy@3 | 0.6989 | | cosine_accuracy@5 | 0.7603 | | cosine_accuracy@10 | 0.8139 | | cosine_precision@1 | 0.5329 | | cosine_precision@3 | 0.233 | | cosine_precision@5 | 0.1521 | | cosine_precision@10 | 0.0814 | | cosine_recall@1 | 0.5329 | | cosine_recall@3 | 0.6989 | | cosine_recall@5 | 0.7603 | | cosine_recall@10 | 0.8139 | | cosine_ndcg@10 | 0.6736 | | cosine_mrr@10 | 0.6285 | | **cosine_map@100** | **0.6339** | #### Information Retrieval * Dataset: `dim_512` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5364 | | cosine_accuracy@3 | 0.6997 | | cosine_accuracy@5 | 0.7621 | | cosine_accuracy@10 | 0.8086 | | cosine_precision@1 | 0.5364 | | cosine_precision@3 | 0.2332 | | cosine_precision@5 | 0.1524 | | cosine_precision@10 | 0.0809 | | cosine_recall@1 | 0.5364 | | cosine_recall@3 | 0.6997 | | cosine_recall@5 | 0.7621 | | cosine_recall@10 | 0.8086 | | cosine_ndcg@10 | 0.6733 | | cosine_mrr@10 | 0.6298 | | **cosine_map@100** | **0.6356** | #### Information Retrieval * Dataset: `dim_256` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5312 | | cosine_accuracy@3 | 0.6892 | | cosine_accuracy@5 | 0.755 | | cosine_accuracy@10 | 0.8068 | | cosine_precision@1 | 0.5312 | | cosine_precision@3 | 0.2297 | | cosine_precision@5 | 0.151 | | cosine_precision@10 | 0.0807 | | cosine_recall@1 | 0.5312 | | cosine_recall@3 | 0.6892 | | cosine_recall@5 | 0.755 | | cosine_recall@10 | 0.8068 | | cosine_ndcg@10 | 0.6689 | | cosine_mrr@10 | 0.6247 | | **cosine_map@100** | **0.6307** | #### Information Retrieval * Dataset: `dim_128` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5075 | | cosine_accuracy@3 | 0.6681 | | cosine_accuracy@5 | 0.734 | | cosine_accuracy@10 | 0.7998 | | cosine_precision@1 | 0.5075 | | cosine_precision@3 | 0.2227 | | cosine_precision@5 | 0.1468 | | cosine_precision@10 | 0.08 | | cosine_recall@1 | 0.5075 | | cosine_recall@3 | 0.6681 | | cosine_recall@5 | 0.734 | | cosine_recall@10 | 0.7998 | | cosine_ndcg@10 | 0.652 | | cosine_mrr@10 | 0.6049 | | **cosine_map@100** | **0.6107** | #### Information Retrieval * Dataset: `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.4715 | | cosine_accuracy@3 | 0.6348 | | cosine_accuracy@5 | 0.6918 | | cosine_accuracy@10 | 0.7638 | | cosine_precision@1 | 0.4715 | | cosine_precision@3 | 0.2116 | | cosine_precision@5 | 0.1384 | | cosine_precision@10 | 0.0764 | | cosine_recall@1 | 0.4715 | | cosine_recall@3 | 0.6348 | | cosine_recall@5 | 0.6918 | | cosine_recall@10 | 0.7638 | | cosine_ndcg@10 | 0.6149 | | cosine_mrr@10 | 0.5676 | | **cosine_map@100** | **0.5737** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 10,248 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 4 tokens</li><li>mean: 150.46 tokens</li><li>max: 256 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 37.63 tokens</li><li>max: 210 tokens</li></ul> | * Samples: | positive | anchor | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Theo quy định tại Điều 7, Nghị định số 15/2018/NĐ-CP của Chính phủ về hướng dẫn Luật ATTP. Đối với sản phẩm sản xuất trong nước Hồ sơ đăng ký bản công bố sản phẩm gồm:a) Bản công bố sản phẩm được quy định tại Mẫu số 02 Phụ lục I ban hành kèm theo Nghị định số 15/2018/NĐ-CP của Chính phủ;b) Phiếu kết quả kiểm nghiệm an toàn thực phẩm của sản phẩm trong thời hạn 12 tháng tính đến ngày nộp hồ sơ được cấp bởi phòng kiểm nghiệm được chỉ định hoặc phòng kiểm nghiệm được công nhận phù hợp ISO 17025 gồm các chỉ tiêu an toàn do Bộ Y tế ban hành theo nguyên tắc quản lý rủi ro phù hợp với quy định của quốc tế hoặc các chỉ tiêu an toàn theo các quy chuẩn, tiêu chuẩn tương ứng do tổ chức, cá nhân công bố trong trường hợp chưa có quy định của Bộ Y tế (bản chính hoặc bản sao chứng thực);c) Bằng chứng khoa học chứng minh công dụng của sản phẩm hoặc của thành phần tạo nên công dụng đã công bố (bản chính hoặc bản sao có xác nhận của tổ chức, cá nhân). Khi sử dụng bằng chứng khoa học về công dụng thành phần của sản phẩm để làm công dụng cho sản phẩm thì liều sử dụng hàng ngày của sản phẩm tối thiểu phải lớn hơn hoặc bằng 15% lượng sử dụng thành phần đó đã nêu trong tài liệu;d) Giấy chứng nhận cơ sở đủ điều kiện an toàn thực phẩm trong trường hợp cơ sở thuộc đối tượng phải cấp giấy chứng nhận cơ sở đủ điều kiện an toàn thực phẩm theo quy định (bản sao có xác nhận của tổ chức, cá nhân);đ) Giấy chứng nhận cơ sở đủ điều kiện an toàn thực phẩm đạt yêu cầu Thực hành sản xuất tốt (GMP) trong trường hợp sản phẩm sản xuất trong nước là thực phẩm bảo vệ sức khỏe áp dụng từ ngày 01 tháng 7 năm 2019 (bản sao có xác nhận của tổ chức, cá nhân).</code> | <code>Hồ sơ công bố đối với nhóm thực phẩm bảo vệ sức khỏe sản xuất trong nước gồm những nội dung gì.</code> | | <code>Điểm ưu tiên về đối tượng, khu vực của thí sinh phải đúng quy chế của Bộ Giáo dục và Đào tạo, thống nhất và trùng khớp giữa hồ sơ sơ tuyển và hồ sơ xét tuyển. Khi phát hiện có sự sai sót về chế độ ưu tiên, thí sinh phải liên hệ với cơ quan đăng ký hồ sơ để điều chỉnh, cụ thể:- Về hồ sơ sơ tuyển: Thí sinh phải liên hệ với Ban Chỉ huy Quân sự huyện (trung đoàn hoặc tương đương) để được điều chỉnh, xác nhận và gửi về trường mình đăng ký.- Về hồ sơ xét tuyển: Thí sinh phải liên hệ với Sở Giáo dục và Đào tạo nơi thí sinh đăng ký dự Kỳ thi trung học phổ thông quốc gia để được điều chỉnh.</code> | <code>Nếu sai sót về chế độ ưu tiên theo đối tượng hoặc chế độ ưu tiên theo khu vực trong quá trình kê khai hồ sơ sơ tuyển, xét tuyển thí sinh phải liên hệ ở đâu để được điều chỉnh?</code> | | <code>Theo quy định tại Nghị định số 88/2014/NĐ-CP, Tổng Giám đốc, thành viên Hội đồng xếp hạng tín nhiệm, chuyên viên phân tích phải có trình độ từ đại học trở lên thuộc chuyên ngành kinh tế, tài chính, ngân hàng, kế toán, kiểm toán, thống kê, quản trị kinh doanh hoặc chuyên ngành thuộc lĩnh vực kinh doanh của tổ chức được xếp hạng tín nhiệm. Theo đó, khi cung cấp hồ sơ về Tổng Giám đốc, thành viên Hội đồng xếp hạng tín nhiệm, chuyên viên phân tích, doanh nghiệp cần cung cấp bằng cấp đáp ứng điều kiện quy định tại Nghị định số 88/2014/NĐ-CP.</code> | <code>Về hồ sơ của Tổng Giám đốc, thành viên Hội đồng xếp hạng tín nhiệm, chuyên viên phân tích, trường hợp đã cung cấp bằng đại học của người lao động đáp ứng chuyên ngành theo quy định tại Nghị định số 88/2014/NĐ-CP có cần cung cấp các bằng cấp khác như bằng thạc sĩ, tiến sĩ hay không?</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `gradient_accumulation_steps`: 32 - `learning_rate`: 2e-05 - `num_train_epochs`: 10 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `fp16`: True - `tf32`: False - `load_best_model_at_end`: True - `optim`: adamw_torch_fused - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 32 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 10 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: False - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 | |:----------:|:-------:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:| | 0.9969 | 10 | 3.4117 | 0.5202 | 0.5352 | 0.5441 | 0.4739 | 0.5412 | | 1.9938 | 20 | 1.8953 | 0.5655 | 0.5913 | 0.5970 | 0.5291 | 0.6007 | | 2.9907 | 30 | 1.2229 | 0.5828 | 0.6072 | 0.6140 | 0.5454 | 0.6193 | | 3.9875 | 40 | 0.81 | 0.5936 | 0.6202 | 0.6224 | 0.5576 | 0.6268 | | 4.9844 | 50 | 0.594 | 0.6039 | 0.6246 | 0.6303 | 0.5656 | 0.6285 | | 5.9813 | 60 | 0.4648 | 0.6056 | 0.6267 | 0.6346 | 0.5693 | 0.6313 | | 6.9782 | 70 | 0.3797 | 0.6092 | 0.6292 | 0.6357 | 0.5724 | 0.6333 | | 7.9751 | 80 | 0.3436 | 0.6101 | 0.6299 | 0.6360 | 0.5728 | 0.6339 | | 8.9720 | 90 | 0.3227 | 0.6102 | 0.6306 | 0.6360 | 0.5746 | 0.6339 | | **9.9688** | **100** | **0.3198** | **0.6107** | **0.6307** | **0.6356** | **0.5737** | **0.6339** | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.13 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.1.2 - Accelerate: 0.29.3 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# vietnamese-bi-encoder Financial Matryoshka - 2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [bkai-foundation-models/vietnamese-bi-encoder](https://huggingface.co/bkai-foundation-models/vietnamese-bi-encoder). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [bkai-foundation-models/vietnamese-bi-encoder](https://huggingface.co/bkai-foundation-models/vietnamese-bi-encoder) <!-- at revision 84f9d9ada0d1a3c37557398b9ae9fcedcdf40be0 --> - **Maximum Sequence Length:** 256 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> - **Language:** en - **License:** apache-2.0 ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("anhtuansh/vietnamese-bi-encoder-financial-matryoshka-2") # Run inference sentences = [ '<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">Căn cứ Điều 108, Luật Thú y số 79/2015/QH 13 ngày 19/6/2015 của Quốc hội và Điều 21 Nghị định số 35/2016/NĐ-CP ngày 15/5/2016 của Chính phủ quy định về hành nghề thú y như sau;</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><strong><span style="color:black;">1. Điều kiện hành nghề thú y:</span></strong></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">- Có đạo đức nghề nghiệp;</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">- Có đủ sức khỏe hành nghề.</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="letter-spacing:-.2pt;"><span style="color:black;">- Có Chứng chỉ hành nghề thú y phù hợp với từng loại hình hành nghề thú y.</span></span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><strong><span style="color:black;">2. Điều kiện để cấp chứng chỉ hành nghề thú y:</span></strong></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">a.&nbsp;</span><span style="color:black;">Người hành nghề chẩn đoán, chữa bệnh, phẫu thuật động vật, tư vấn các hoạt động liên quan đến lĩnh vực thú y tối thiểu phải có bằng trung cấp chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi&nbsp;</span><span style="color:black;">tr</span><span style="color:black;">ồng thủy sản, bệnh&nbsp;học thủy sản đối với hành nghề thú y thủy sản. Người hành nghề tiêm phòng cho động vật phải có chứng chỉ tốt nghiệp lớp đào tạo về kỹ thuật do cơ quan quản lý nhà nước có thẩm quyền cấp tỉnh cấp.</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">b.&nbsp;Người phụ&nbsp;trách kỹ thuật của cơ sở phẫu thuật động vật, khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật phải có bằng đại học&nbsp;trở lên chuyên ngành thú y, chăn nuôi thú y hoặc nuôi&nbsp;trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>\n\n<p style="margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;"><span style="color:black;">c.&nbsp;Người buôn bán thuốc thú y phải có bằng trung cấp trở&nbsp;lên chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>', 'Điều kiện để hành nghề và cấp chứng chỉ hành nghề thú y đối với các loại hình: Tiêm phòng, chữa bệnh, phẫu thuật động vật; tư vấn các hoạt động liên quan đến lĩnh vực thú y; khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật; buôn bán thuốc thú y?', 'Trường hợp đã có chứng thư số ký duyệt nghiệp vụ dịch vụ công liên quan đến chứng thư số, nếu người sử dụng muốn đăng ký để ký duyệtnghiệp vụ dịch vụ công khác như Mã ngân hàng, Thanh toán liên ngân hàng (TTLNH), Thi đua – Khen thưởng, Thanh toán thì cần phải làm thủ tục gì?', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `dim_768` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5329 | | cosine_accuracy@3 | 0.6989 | | cosine_accuracy@5 | 0.7603 | | cosine_accuracy@10 | 0.8139 | | cosine_precision@1 | 0.5329 | | cosine_precision@3 | 0.233 | | cosine_precision@5 | 0.1521 | | cosine_precision@10 | 0.0814 | | cosine_recall@1 | 0.5329 | | cosine_recall@3 | 0.6989 | | cosine_recall@5 | 0.7603 | | cosine_recall@10 | 0.8139 | | cosine_ndcg@10 | 0.6736 | | cosine_mrr@10 | 0.6285 | | **cosine_map@100** | **0.6339** | #### Information Retrieval * Dataset: `dim_512` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5364 | | cosine_accuracy@3 | 0.6997 | | cosine_accuracy@5 | 0.7621 | | cosine_accuracy@10 | 0.8086 | | cosine_precision@1 | 0.5364 | | cosine_precision@3 | 0.2332 | | cosine_precision@5 | 0.1524 | | cosine_precision@10 | 0.0809 | | cosine_recall@1 | 0.5364 | | cosine_recall@3 | 0.6997 | | cosine_recall@5 | 0.7621 | | cosine_recall@10 | 0.8086 | | cosine_ndcg@10 | 0.6733 | | cosine_mrr@10 | 0.6298 | | **cosine_map@100** | **0.6356** | #### Information Retrieval * Dataset: `dim_256` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5312 | | cosine_accuracy@3 | 0.6892 | | cosine_accuracy@5 | 0.755 | | cosine_accuracy@10 | 0.8068 | | cosine_precision@1 | 0.5312 | | cosine_precision@3 | 0.2297 | | cosine_precision@5 | 0.151 | | cosine_precision@10 | 0.0807 | | cosine_recall@1 | 0.5312 | | cosine_recall@3 | 0.6892 | | cosine_recall@5 | 0.755 | | cosine_recall@10 | 0.8068 | | cosine_ndcg@10 | 0.6689 | | cosine_mrr@10 | 0.6247 | | **cosine_map@100** | **0.6307** | #### Information Retrieval * Dataset: `dim_128` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.5075 | | cosine_accuracy@3 | 0.6681 | | cosine_accuracy@5 | 0.734 | | cosine_accuracy@10 | 0.7998 | | cosine_precision@1 | 0.5075 | | cosine_precision@3 | 0.2227 | | cosine_precision@5 | 0.1468 | | cosine_precision@10 | 0.08 | | cosine_recall@1 | 0.5075 | | cosine_recall@3 | 0.6681 | | cosine_recall@5 | 0.734 | | cosine_recall@10 | 0.7998 | | cosine_ndcg@10 | 0.652 | | cosine_mrr@10 | 0.6049 | | **cosine_map@100** | **0.6107** | #### Information Retrieval * Dataset: `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.4715 | | cosine_accuracy@3 | 0.6348 | | cosine_accuracy@5 | 0.6918 | | cosine_accuracy@10 | 0.7638 | | cosine_precision@1 | 0.4715 | | cosine_precision@3 | 0.2116 | | cosine_precision@5 | 0.1384 | | cosine_precision@10 | 0.0764 | | cosine_recall@1 | 0.4715 | | cosine_recall@3 | 0.6348 | | cosine_recall@5 | 0.6918 | | cosine_recall@10 | 0.7638 | | cosine_ndcg@10 | 0.6149 | | cosine_mrr@10 | 0.5676 | | **cosine_map@100** | **0.5737** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 10,248 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 4 tokens</li><li>mean: 150.46 tokens</li><li>max: 256 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 37.63 tokens</li><li>max: 210 tokens</li></ul> | * Samples: | positive | anchor | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Theo quy định tại Điều 7, Nghị định số 15/2018/NĐ-CP của Chính phủ về hướng dẫn Luật ATTP. Đối với sản phẩm sản xuất trong nước Hồ sơ đăng ký bản công bố sản phẩm gồm:a) Bản công bố sản phẩm được quy định tại Mẫu số 02 Phụ lục I ban hành kèm theo Nghị định số 15/2018/NĐ-CP của Chính phủ;b) Phiếu kết quả kiểm nghiệm an toàn thực phẩm của sản phẩm trong thời hạn 12 tháng tính đến ngày nộp hồ sơ được cấp bởi phòng kiểm nghiệm được chỉ định hoặc phòng kiểm nghiệm được công nhận phù hợp ISO 17025 gồm các chỉ tiêu an toàn do Bộ Y tế ban hành theo nguyên tắc quản lý rủi ro phù hợp với quy định của quốc tế hoặc các chỉ tiêu an toàn theo các quy chuẩn, tiêu chuẩn tương ứng do tổ chức, cá nhân công bố trong trường hợp chưa có quy định của Bộ Y tế (bản chính hoặc bản sao chứng thực);c) Bằng chứng khoa học chứng minh công dụng của sản phẩm hoặc của thành phần tạo nên công dụng đã công bố (bản chính hoặc bản sao có xác nhận của tổ chức, cá nhân). Khi sử dụng bằng chứng khoa học về công dụng thành phần của sản phẩm để làm công dụng cho sản phẩm thì liều sử dụng hàng ngày của sản phẩm tối thiểu phải lớn hơn hoặc bằng 15% lượng sử dụng thành phần đó đã nêu trong tài liệu;d) Giấy chứng nhận cơ sở đủ điều kiện an toàn thực phẩm trong trường hợp cơ sở thuộc đối tượng phải cấp giấy chứng nhận cơ sở đủ điều kiện an toàn thực phẩm theo quy định (bản sao có xác nhận của tổ chức, cá nhân);đ) Giấy chứng nhận cơ sở đủ điều kiện an toàn thực phẩm đạt yêu cầu Thực hành sản xuất tốt (GMP) trong trường hợp sản phẩm sản xuất trong nước là thực phẩm bảo vệ sức khỏe áp dụng từ ngày 01 tháng 7 năm 2019 (bản sao có xác nhận của tổ chức, cá nhân).</code> | <code>Hồ sơ công bố đối với nhóm thực phẩm bảo vệ sức khỏe sản xuất trong nước gồm những nội dung gì.</code> | | <code>Điểm ưu tiên về đối tượng, khu vực của thí sinh phải đúng quy chế của Bộ Giáo dục và Đào tạo, thống nhất và trùng khớp giữa hồ sơ sơ tuyển và hồ sơ xét tuyển. Khi phát hiện có sự sai sót về chế độ ưu tiên, thí sinh phải liên hệ với cơ quan đăng ký hồ sơ để điều chỉnh, cụ thể:- Về hồ sơ sơ tuyển: Thí sinh phải liên hệ với Ban Chỉ huy Quân sự huyện (trung đoàn hoặc tương đương) để được điều chỉnh, xác nhận và gửi về trường mình đăng ký.- Về hồ sơ xét tuyển: Thí sinh phải liên hệ với Sở Giáo dục và Đào tạo nơi thí sinh đăng ký dự Kỳ thi trung học phổ thông quốc gia để được điều chỉnh.</code> | <code>Nếu sai sót về chế độ ưu tiên theo đối tượng hoặc chế độ ưu tiên theo khu vực trong quá trình kê khai hồ sơ sơ tuyển, xét tuyển thí sinh phải liên hệ ở đâu để được điều chỉnh?</code> | | <code>Theo quy định tại Nghị định số 88/2014/NĐ-CP, Tổng Giám đốc, thành viên Hội đồng xếp hạng tín nhiệm, chuyên viên phân tích phải có trình độ từ đại học trở lên thuộc chuyên ngành kinh tế, tài chính, ngân hàng, kế toán, kiểm toán, thống kê, quản trị kinh doanh hoặc chuyên ngành thuộc lĩnh vực kinh doanh của tổ chức được xếp hạng tín nhiệm. Theo đó, khi cung cấp hồ sơ về Tổng Giám đốc, thành viên Hội đồng xếp hạng tín nhiệm, chuyên viên phân tích, doanh nghiệp cần cung cấp bằng cấp đáp ứng điều kiện quy định tại Nghị định số 88/2014/NĐ-CP.</code> | <code>Về hồ sơ của Tổng Giám đốc, thành viên Hội đồng xếp hạng tín nhiệm, chuyên viên phân tích, trường hợp đã cung cấp bằng đại học của người lao động đáp ứng chuyên ngành theo quy định tại Nghị định số 88/2014/NĐ-CP có cần cung cấp các bằng cấp khác như bằng thạc sĩ, tiến sĩ hay không?</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `gradient_accumulation_steps`: 32 - `learning_rate`: 2e-05 - `num_train_epochs`: 10 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `fp16`: True - `tf32`: False - `load_best_model_at_end`: True - `optim`: adamw_torch_fused - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 32 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 10 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: False - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 | |:----------:|:-------:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:| | 0.9969 | 10 | 3.4117 | 0.5202 | 0.5352 | 0.5441 | 0.4739 | 0.5412 | | 1.9938 | 20 | 1.8953 | 0.5655 | 0.5913 | 0.5970 | 0.5291 | 0.6007 | | 2.9907 | 30 | 1.2229 | 0.5828 | 0.6072 | 0.6140 | 0.5454 | 0.6193 | | 3.9875 | 40 | 0.81 | 0.5936 | 0.6202 | 0.6224 | 0.5576 | 0.6268 | | 4.9844 | 50 | 0.594 | 0.6039 | 0.6246 | 0.6303 | 0.5656 | 0.6285 | | 5.9813 | 60 | 0.4648 | 0.6056 | 0.6267 | 0.6346 | 0.5693 | 0.6313 | | 6.9782 | 70 | 0.3797 | 0.6092 | 0.6292 | 0.6357 | 0.5724 | 0.6333 | | 7.9751 | 80 | 0.3436 | 0.6101 | 0.6299 | 0.6360 | 0.5728 | 0.6339 | | 8.9720 | 90 | 0.3227 | 0.6102 | 0.6306 | 0.6360 | 0.5746 | 0.6339 | | **9.9688** | **100** | **0.3198** | **0.6107** | **0.6307** | **0.6356** | **0.5737** | **0.6339** | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.13 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.1.2 - Accelerate: 0.29.3 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "bkai-foundation-models/vietnamese-bi-encoder", "datasets": [], "language": ["en"], "library_name": "sentence-transformers", "license": "apache-2.0", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:10248", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Tại khoản 1, Điều 5. Nghị định số 03/2016/NĐ-CP ngày 05/01/2016 của Chính phủ quy định Hồ sơ thành lập đơn vị tự vệ trong doanh nghiệp như sau:a) Văn bản đề nghị thành lập đơn vị tự vệ của người quản lý doanh nghiệp;b) Kế hoạch xây dựng lực lượng tự vệ của doanh nghiệp;c) Danh sách cán bộ, công chức, viên chức, người lao động tham gia tổ chức tự vệ của doanh nghiệp;d) Văn bản của cơ quan chức năng thẩm định việc thành lập tự vệ của doanh nghiệp.", "sentences": ["Căn cứ pháp lý thủ tục thay đổi nội dung đăng ký hoạt động của Văn phòng công chứng \nnhận sáp nhập?", "Doanh nghiệp muốn thực hiện phân loại trang thiết bị y tế thì phải làm như thế nào?", "Hồ sơ thành lập đơn vị tự vệ trong doanh nghiệp được quy định như thế nào?"]}, {"source_sentence": "Căn cứ Khoản 2 Điều 11 “Điều kiện về an ninh, trật tự đối với kinh doanh dịch vụ bảo vệ” của Nghị định số 96/2016/NĐ-CP ngày 01/7/2016 của Chính phủ quy định điều kiện về an ninh, trật tự đối với một số ngành, nghề đầu tư kinh doanh có điều kiện “Người chịu trách nhiệm về an ninh, trật tự của cơ sở kinh doanh dịch vụ bảo vệ ….. không phải là người chịu trách nhiệm về an ninh, trật tự của cơ sở kinh doanh dịch vụ bảo vệ mà trong 24 tháng liền kề trước đó đã bị thu hồi không có thời hạn Giấy chứng nhận đủ điều kiện về an ninh, trật tự” .Như vậy, anh có thể thành lập công ty mới kinh doanh dịch vụ bảo vệ theo quy định của pháp luật.", "sentences": ["Trong thời hạn bao nhiêu ngày làm việc kể từ ngày nhận được thông báo hoàn tất việc mua bán, chuyển nhượng Phần vốn góp của tổ chức tài chính vi mô, Ngân hàng Nhà nước ra quyết định sửa đổi, bổ sung Giấy phép của tổ chức tài chính vi mô?", "Công ty kinh doanh nhân viên bảo vệ của tôi đã từng bị thu hồi không thời hạn Giấy chứng nhận đủ điều kiện về an ninh trật tự cách đây 2 năm. Hiện giờ tôi muốn thành lập công ty mới thì có được không?", "Thủ tục: Điều chỉnh tên dự án đầu tư, tên và địa chỉ nhà đầu tư trong Giấy chứng nhận đăng ký đầu tư gồm các thành phần hồ sơ nào?"]}, {"source_sentence": "Quy trình tạo và gửi hồ sơ thủ tục hành chính gồm các bước sau:(i) Truy cập và đăng nhập vào trang dịch vụ công bằng tài khoản được cấp(ii) Lựa chọn loại TTHC đề nghị khen thưởng(iii) Tải các thành phần hồ sơ của TTHC lên dịch vụ công(iv) Xác nhận việc tạo mới hồ sơ và chuyển cho lãnh đạo phê duyệt(v) Lãnh đạo đăng nhập bằng tài khoản được cấp và phê duyệt gửi hồ sơ", "sentences": ["Quy định đối với người nước ngoài và hoạt động kinh tế ở khu vực bảo vệ và vành đai an toàn?", "Hồ sơ đề nghị cấp Giấy chứng nhận doanh nghiệp công nghệ cao gồm những gì và được nộp ở đâu?", "Quy trình tạo và gửi một hồ sơ TTHC trên dịch vụ công gồm những bước nào?"]}, {"source_sentence": "Mẫu số 02 Thông tư số 02/2013/TT-BNV.", "sentences": ["Thời gian xử lý các thủ tục chứng thư số là bao lâu?", "Tố cáo cán bộ, công chức, viên chức trong cơ quan mà hiện nay cơ quan đó đã giải thể thì ai là người giải quyết?", "Điều lệ mẫu của quỹ xã hội, quỹ từ thiện được quy định ở văn bản nào?"]}, {"source_sentence": "<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"color:black;\">Căn cứ Điều 108, Luật Thú y số 79/2015/QH 13 ngày 19/6/2015 của Quốc hội và Điều 21 Nghị định số 35/2016/NĐ-CP ngày 15/5/2016 của Chính phủ quy định về hành nghề thú y như sau;</span></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><strong><span style=\"color:black;\">1. Điều kiện hành nghề thú y:</span></strong></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"color:black;\">- Có đạo đức nghề nghiệp;</span></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"color:black;\">- Có đủ sức khỏe hành nghề.</span></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"letter-spacing:-.2pt;\"><span style=\"color:black;\">- Có Chứng chỉ hành nghề thú y phù hợp với từng loại hình hành nghề thú y.</span></span></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><strong><span style=\"color:black;\">2. Điều kiện để cấp chứng chỉ hành nghề thú y:</span></strong></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"color:black;\">a.&nbsp;</span><span style=\"color:black;\">Người hành nghề chẩn đoán, chữa bệnh, phẫu thuật động vật, tư vấn các hoạt động liên quan đến lĩnh vực thú y tối thiểu phải có bằng trung cấp chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi&nbsp;</span><span style=\"color:black;\">tr</span><span style=\"color:black;\">ồng thủy sản, bệnh&nbsp;học thủy sản đối với hành nghề thú y thủy sản. Người hành nghề tiêm phòng cho động vật phải có chứng chỉ tốt nghiệp lớp đào tạo về kỹ thuật do cơ quan quản lý nhà nước có thẩm quyền cấp tỉnh cấp.</span></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"color:black;\">b.&nbsp;Người phụ&nbsp;trách kỹ thuật của cơ sở phẫu thuật động vật, khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật phải có bằng đại học&nbsp;trở lên chuyên ngành thú y, chăn nuôi thú y hoặc nuôi&nbsp;trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>\n\n<p style=\"margin-top:6.0pt;text-align:justify;text-indent:36.0pt;line-height:16.0pt;background:white;\"><span style=\"color:black;\">c.&nbsp;Người buôn bán thuốc thú y phải có bằng trung cấp trở&nbsp;lên chuyên ngành thú y, chăn nuôi thú y hoặc trung cấp nuôi trồng thủy sản, bệnh học thủy sản đối với hành nghề thú y thủy sản.</span></p>", "sentences": ["Căn cứ pháp lý thủ tục cấp Giấy chứng nhận tổ chức đủ điều kiện hoạt động giám định sở hữu công nghiệp", "Điều kiện để hành nghề và cấp chứng chỉ hành nghề thú y đối với các loại hình: Tiêm phòng, chữa bệnh, phẫu thuật động vật; tư vấn các hoạt động liên quan đến lĩnh vực thú y; khám bệnh, chẩn đoán bệnh, xét nghiệm bệnh động vật; buôn bán thuốc thú y?", "Trường hợp đã có chứng thư số ký duyệt nghiệp vụ dịch vụ công liên quan đến chứng thư số, nếu người sử dụng muốn đăng ký để ký duyệtnghiệp vụ dịch vụ công khác như Mã ngân hàng, Thanh toán liên ngân hàng (TTLNH), Thi đua – Khen thưởng, Thanh toán thì cần phải làm thủ tục gì?"]}], "model-index": [{"name": "vietnamese-bi-encoder Financial Matryoshka - 2", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 768", "type": "dim_768"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.5329236172080772, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.6988586479367866, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.7603160667251976, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8138718173836699, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.5329236172080772, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2329528826455955, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.1520632133450395, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08138718173836698, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.5329236172080772, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.6988586479367866, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.7603160667251976, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8138718173836699, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.6735784668494285, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.6285285756093483, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.6338528205835894, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 512", "type": "dim_512"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.5364354697102721, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.6997366110623354, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.762071992976295, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8086040386303776, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.5364354697102721, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.23324553702077844, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.152414398595259, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08086040386303775, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.5364354697102721, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.6997366110623354, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.762071992976295, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8086040386303776, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.6733400704768767, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.6297514528199337, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.6355829264231957, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 256", "type": "dim_256"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.5311676909569798, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.6892010535557507, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.7550482879719052, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8068481123792801, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.5311676909569798, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.22973368451858353, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.15100965759438104, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08068481123792799, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.5311676909569798, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.6892010535557507, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.7550482879719052, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8068481123792801, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.6689063830436761, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.624737307858467, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.6307311118555528, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 128", "type": "dim_128"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.5074626865671642, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.6681299385425812, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.7339771729587358, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.7998244073748902, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.5074626865671642, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.22270997951419372, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.14679543459174713, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.07998244073748902, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.5074626865671642, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.6681299385425812, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.7339771729587358, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.7998244073748902, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.6520119825054099, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.6049092771436932, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.6107424205763605, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 64", "type": "dim_64"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.47146619841966636, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.6347673397717296, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.6918349429323969, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.7638279192273925, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.47146619841966636, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2115891132572432, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.13836698858647933, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.07638279192273925, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.47146619841966636, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.6347673397717296, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.6918349429323969, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.7638279192273925, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.6148557873571111, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.5675923603272155, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.5737454249088845, "name": "Cosine Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,425
RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf
RichardErkhov
null
[ "gguf", "arxiv:2303.08774", "endpoints_compatible", "region:us", "conversational" ]
2024-05-04T12:03:35Z
2024-05-04T14:27:49+00:00
38
1
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Faro-Yi-9B - GGUF - Model creator: https://huggingface.co/wenbopan/ - Original model: https://huggingface.co/wenbopan/Faro-Yi-9B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Faro-Yi-9B.Q2_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q2_K.gguf) | Q2_K | 3.12GB | | [Faro-Yi-9B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ3_XS.gguf) | IQ3_XS | 3.46GB | | [Faro-Yi-9B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ3_S.gguf) | IQ3_S | 3.64GB | | [Faro-Yi-9B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K_S.gguf) | Q3_K_S | 3.63GB | | [Faro-Yi-9B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ3_M.gguf) | IQ3_M | 3.78GB | | [Faro-Yi-9B.Q3_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K.gguf) | Q3_K | 4.03GB | | [Faro-Yi-9B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K_M.gguf) | Q3_K_M | 4.03GB | | [Faro-Yi-9B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K_L.gguf) | Q3_K_L | 4.37GB | | [Faro-Yi-9B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ4_XS.gguf) | IQ4_XS | 4.5GB | | [Faro-Yi-9B.Q4_0.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_0.gguf) | Q4_0 | 4.69GB | | [Faro-Yi-9B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ4_NL.gguf) | IQ4_NL | 4.73GB | | [Faro-Yi-9B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_K_S.gguf) | Q4_K_S | 4.72GB | | [Faro-Yi-9B.Q4_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_K.gguf) | Q4_K | 4.96GB | | [Faro-Yi-9B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_K_M.gguf) | Q4_K_M | 4.96GB | | [Faro-Yi-9B.Q4_1.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_1.gguf) | Q4_1 | 5.19GB | | [Faro-Yi-9B.Q5_0.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_0.gguf) | Q5_0 | 5.69GB | | [Faro-Yi-9B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_K_S.gguf) | Q5_K_S | 5.69GB | | [Faro-Yi-9B.Q5_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_K.gguf) | Q5_K | 5.83GB | | [Faro-Yi-9B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_K_M.gguf) | Q5_K_M | 5.83GB | | [Faro-Yi-9B.Q5_1.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_1.gguf) | Q5_1 | 6.19GB | | [Faro-Yi-9B.Q6_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q6_K.gguf) | Q6_K | 6.75GB | Original model description: --- license: mit datasets: - wenbopan/Fusang-v1 - wenbopan/OpenOrca-zh-20k language: - zh - en --- ![image/webp](https://cdn-uploads.huggingface.co/production/uploads/62cd3a3691d27e60db0698b0/s21sMRxRT56c5t4M15GBP.webp) **The Faro chat model focuses on practicality and long-context modeling. It handles various downstream tasks with higher quality, delivering stable and reliable results even when inputs contain lengthy documents or complex instructions. Faro seamlessly works in both English and Chinese.** # Faro-Yi-9B Faro-Yi-9B is an improved [Yi-9B-200K](https://huggingface.co/01-ai/Yi-9B-200K) with extensive instruction tuning on [Fusang-V1](https://huggingface.co/datasets/wenbopan/Fusang-v1). Compared to Yi-9B-200K, Faro-Yi-9B has gained greater capability in various downstream tasks and long-context modeling thanks to the large-scale synthetic data in Fusang-V1. Just like Yi-9B-200K, Faro-Yi-9B supports up to 200K context length. ## How to Use Faro-Yi-9B uses the chatml template and performs well in both short and long contexts. For longer inputs under **24GB of VRAM**, I recommend to use vLLM to have a max prompt of 32K. Setting `kv_cache_dtype="fp8_e5m2"` allows for 48K input length. 4bit-AWQ quantization on top of that can boost input length to 160K, albeit with some performance impact. Adjust `max_model_len` arg in vLLM or `config.json` to avoid OOM. ```python import io import requests from PyPDF2 import PdfReader from vllm import LLM, SamplingParams llm = LLM(model="wenbopan/Faro-Yi-9B", kv_cache_dtype="fp8_e5m2", max_model_len=100000) pdf_data = io.BytesIO(requests.get("https://arxiv.org/pdf/2303.08774.pdf").content) document = "".join(page.extract_text() for page in PdfReader(pdf_data).pages) # 100 pages question = f"{document}\n\nAccording to the paper, what is the parameter count of GPT-4?" messages = [ {"role": "user", "content": question} ] # 83K tokens prompt = llm.get_tokenizer().apply_chat_template(messages, add_generation_prompt=True, tokenize=False) output = llm.generate(prompt, SamplingParams(temperature=0.8, max_tokens=500)) print(output[0].outputs[0].text) # Yi-9B-200K: 175B. GPT-4 has 175B \nparameters. How many models were combined to create GPT-4? Answer: 6. ... # Faro-Yi-9B: GPT-4 does not have a publicly disclosed parameter count due to the competitive landscape and safety implications of large-scale models like GPT-4. ... ``` <details> <summary>Or With Transformers</summary> ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained('wenbopan/Faro-Yi-9B', device_map="cuda") tokenizer = AutoTokenizer.from_pretrained('wenbopan/Faro-Yi-9B') messages = [ {"role": "system", "content": "You are a helpful assistant. Always answer with a short response."}, {"role": "user", "content": "Tell me what is Pythagorean theorem like you are a pirate."} ] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device) generated_ids = model.generate(input_ids, max_new_tokens=512, temperature=0.5) response = tokenizer.decode(generated_ids[0], skip_special_tokens=True) # Aye, matey! The Pythagorean theorem is a nautical rule that helps us find the length of the third side of a triangle. ... ``` </details> ## Performance Faro-Yi-9B enhances its ability compared to Yi-9B-200K in most dimensions, especially in long-range modeling and bilingual (English, Chinese) understanding. Faro is competitive among all open-sourced models at around 9B parameters. <details> <summary>Benchmark Results</summary> ### Fact-based Evaluation (Open LLM Leaderboard) | **Metric** | **MMLU** | **GSM8K** | **HellaSwag** | **TruthfulQA** | **Arc** | **Winogrande** | | -------------- | --------- | --------- | ------------- | -------------- | ----------- | -------------- | | **Yi-9B-200K** | 65.73 | 50.49 | 56.72 | 33.80 | 69.25 | 71.67 | | **Faro-Yi-9B** | **68.80** | **63.08** | **57.28** | **40.86** | **72.58** | 71.11 | ### Long-context Modeling ([LongBench](https://github.com/THUDM/LongBench)) | **Name** | **Average_zh** | **Average_en** | **Code Completion** | |----------------|----------------|----------------|---------------------| | **Yi-9B-200K** | 30.288 | 36.7071 | 72.2 | | **Faro-Yi-9B** | **41.092** | **40.9536** | 46.0 | <details> <summary>Score breakdown</summary> | **Name** | **Few-shot Learning_en** | **Synthetic Tasks_en** | **Single-Doc QA_en** | **Multi-Doc QA_en** | **Summarization_en** | **Few-shot Learning_zh** | **Synthetic Tasks_zh** | **Single-Doc QA_zh** | **Multi-Doc QA_zh** | **Summarization_zh** | |----------------|--------------------------|------------------------|----------------------|---------------------|----------------------|--------------------------|------------------------|----------------------|---------------------|----------------------| | **Yi-9B-200K** | 60.6 | 22.8 | 30.9 | 38.9 | 25.8 | 46.5 | 28.0 | 49.6 | 17.7 | 9.7 | | **Faro-Yi-9B** | **63.8** | **40.2** | **36.2** | 38.0 | **26.3** | 30.0 | **75.1** | **55.6** | **30.7** | **14.1** | </details> ### Performance on Preference (MT-Bench) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd3a3691d27e60db0698b0/M0Kc64sIsbNyCCvrRk1Lv.png) ### Bilingual Ability (CMMLU & MMLU) | **Name** | MMLU | **CMMLU** | | -------------- | --------- | --------- | | **Yi-9B-200K** | 65.73 | 71.97 | | **Faro-Yi-9B** | **68.80** | **73.28** | </details>
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Faro-Yi-9B - GGUF - Model creator: https://huggingface.co/wenbopan/ - Original model: https://huggingface.co/wenbopan/Faro-Yi-9B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Faro-Yi-9B.Q2_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q2_K.gguf) | Q2_K | 3.12GB | | [Faro-Yi-9B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ3_XS.gguf) | IQ3_XS | 3.46GB | | [Faro-Yi-9B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ3_S.gguf) | IQ3_S | 3.64GB | | [Faro-Yi-9B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K_S.gguf) | Q3_K_S | 3.63GB | | [Faro-Yi-9B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ3_M.gguf) | IQ3_M | 3.78GB | | [Faro-Yi-9B.Q3_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K.gguf) | Q3_K | 4.03GB | | [Faro-Yi-9B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K_M.gguf) | Q3_K_M | 4.03GB | | [Faro-Yi-9B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q3_K_L.gguf) | Q3_K_L | 4.37GB | | [Faro-Yi-9B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ4_XS.gguf) | IQ4_XS | 4.5GB | | [Faro-Yi-9B.Q4_0.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_0.gguf) | Q4_0 | 4.69GB | | [Faro-Yi-9B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.IQ4_NL.gguf) | IQ4_NL | 4.73GB | | [Faro-Yi-9B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_K_S.gguf) | Q4_K_S | 4.72GB | | [Faro-Yi-9B.Q4_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_K.gguf) | Q4_K | 4.96GB | | [Faro-Yi-9B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_K_M.gguf) | Q4_K_M | 4.96GB | | [Faro-Yi-9B.Q4_1.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q4_1.gguf) | Q4_1 | 5.19GB | | [Faro-Yi-9B.Q5_0.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_0.gguf) | Q5_0 | 5.69GB | | [Faro-Yi-9B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_K_S.gguf) | Q5_K_S | 5.69GB | | [Faro-Yi-9B.Q5_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_K.gguf) | Q5_K | 5.83GB | | [Faro-Yi-9B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_K_M.gguf) | Q5_K_M | 5.83GB | | [Faro-Yi-9B.Q5_1.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q5_1.gguf) | Q5_1 | 6.19GB | | [Faro-Yi-9B.Q6_K.gguf](https://huggingface.co/RichardErkhov/wenbopan_-_Faro-Yi-9B-gguf/blob/main/Faro-Yi-9B.Q6_K.gguf) | Q6_K | 6.75GB | Original model description: --- license: mit datasets: - wenbopan/Fusang-v1 - wenbopan/OpenOrca-zh-20k language: - zh - en --- ![image/webp](https://cdn-uploads.huggingface.co/production/uploads/62cd3a3691d27e60db0698b0/s21sMRxRT56c5t4M15GBP.webp) **The Faro chat model focuses on practicality and long-context modeling. It handles various downstream tasks with higher quality, delivering stable and reliable results even when inputs contain lengthy documents or complex instructions. Faro seamlessly works in both English and Chinese.** # Faro-Yi-9B Faro-Yi-9B is an improved [Yi-9B-200K](https://huggingface.co/01-ai/Yi-9B-200K) with extensive instruction tuning on [Fusang-V1](https://huggingface.co/datasets/wenbopan/Fusang-v1). Compared to Yi-9B-200K, Faro-Yi-9B has gained greater capability in various downstream tasks and long-context modeling thanks to the large-scale synthetic data in Fusang-V1. Just like Yi-9B-200K, Faro-Yi-9B supports up to 200K context length. ## How to Use Faro-Yi-9B uses the chatml template and performs well in both short and long contexts. For longer inputs under **24GB of VRAM**, I recommend to use vLLM to have a max prompt of 32K. Setting `kv_cache_dtype="fp8_e5m2"` allows for 48K input length. 4bit-AWQ quantization on top of that can boost input length to 160K, albeit with some performance impact. Adjust `max_model_len` arg in vLLM or `config.json` to avoid OOM. ```python import io import requests from PyPDF2 import PdfReader from vllm import LLM, SamplingParams llm = LLM(model="wenbopan/Faro-Yi-9B", kv_cache_dtype="fp8_e5m2", max_model_len=100000) pdf_data = io.BytesIO(requests.get("https://arxiv.org/pdf/2303.08774.pdf").content) document = "".join(page.extract_text() for page in PdfReader(pdf_data).pages) # 100 pages question = f"{document}\n\nAccording to the paper, what is the parameter count of GPT-4?" messages = [ {"role": "user", "content": question} ] # 83K tokens prompt = llm.get_tokenizer().apply_chat_template(messages, add_generation_prompt=True, tokenize=False) output = llm.generate(prompt, SamplingParams(temperature=0.8, max_tokens=500)) print(output[0].outputs[0].text) # Yi-9B-200K: 175B. GPT-4 has 175B \nparameters. How many models were combined to create GPT-4? Answer: 6. ... # Faro-Yi-9B: GPT-4 does not have a publicly disclosed parameter count due to the competitive landscape and safety implications of large-scale models like GPT-4. ... ``` <details> <summary>Or With Transformers</summary> ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained('wenbopan/Faro-Yi-9B', device_map="cuda") tokenizer = AutoTokenizer.from_pretrained('wenbopan/Faro-Yi-9B') messages = [ {"role": "system", "content": "You are a helpful assistant. Always answer with a short response."}, {"role": "user", "content": "Tell me what is Pythagorean theorem like you are a pirate."} ] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device) generated_ids = model.generate(input_ids, max_new_tokens=512, temperature=0.5) response = tokenizer.decode(generated_ids[0], skip_special_tokens=True) # Aye, matey! The Pythagorean theorem is a nautical rule that helps us find the length of the third side of a triangle. ... ``` </details> ## Performance Faro-Yi-9B enhances its ability compared to Yi-9B-200K in most dimensions, especially in long-range modeling and bilingual (English, Chinese) understanding. Faro is competitive among all open-sourced models at around 9B parameters. <details> <summary>Benchmark Results</summary> ### Fact-based Evaluation (Open LLM Leaderboard) | **Metric** | **MMLU** | **GSM8K** | **HellaSwag** | **TruthfulQA** | **Arc** | **Winogrande** | | -------------- | --------- | --------- | ------------- | -------------- | ----------- | -------------- | | **Yi-9B-200K** | 65.73 | 50.49 | 56.72 | 33.80 | 69.25 | 71.67 | | **Faro-Yi-9B** | **68.80** | **63.08** | **57.28** | **40.86** | **72.58** | 71.11 | ### Long-context Modeling ([LongBench](https://github.com/THUDM/LongBench)) | **Name** | **Average_zh** | **Average_en** | **Code Completion** | |----------------|----------------|----------------|---------------------| | **Yi-9B-200K** | 30.288 | 36.7071 | 72.2 | | **Faro-Yi-9B** | **41.092** | **40.9536** | 46.0 | <details> <summary>Score breakdown</summary> | **Name** | **Few-shot Learning_en** | **Synthetic Tasks_en** | **Single-Doc QA_en** | **Multi-Doc QA_en** | **Summarization_en** | **Few-shot Learning_zh** | **Synthetic Tasks_zh** | **Single-Doc QA_zh** | **Multi-Doc QA_zh** | **Summarization_zh** | |----------------|--------------------------|------------------------|----------------------|---------------------|----------------------|--------------------------|------------------------|----------------------|---------------------|----------------------| | **Yi-9B-200K** | 60.6 | 22.8 | 30.9 | 38.9 | 25.8 | 46.5 | 28.0 | 49.6 | 17.7 | 9.7 | | **Faro-Yi-9B** | **63.8** | **40.2** | **36.2** | 38.0 | **26.3** | 30.0 | **75.1** | **55.6** | **30.7** | **14.1** | </details> ### Performance on Preference (MT-Bench) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd3a3691d27e60db0698b0/M0Kc64sIsbNyCCvrRk1Lv.png) ### Bilingual Ability (CMMLU & MMLU) | **Name** | MMLU | **CMMLU** | | -------------- | --------- | --------- | | **Yi-9B-200K** | 65.73 | 71.97 | | **Faro-Yi-9B** | **68.80** | **73.28** | </details>
{}
task
[ "SUMMARIZATION" ]
41,426
north/nynorsk_North_base_long
north
translation
[ "transformers", "pytorch", "jax", "tensorboard", "t5", "text2text-generation", "translation", "nn", "no", "nb", "dataset:NbAiLab/balanced_bokmaal_nynorsk", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-01-13T23:47:44Z
2023-01-14T09:46:49+00:00
117
0
--- datasets: - NbAiLab/balanced_bokmaal_nynorsk language: - nn - 'no' - nb license: apache-2.0 metrics: - bleu pipeline_tag: translation --- # Model Card for Model ID This model is a long sequence version of a finetuned version of [north/t5_base_NCC_modern](https://huggingface.co/north/t5_base_NCC_modern). | | Size |Model|BLEU| |:------------:|:------------:|:------------:|:------------:| |Small |_60M_|[🤗](https://huggingface.co/north/nynorsk_North_small_long)|93.55| |**Base** |**_220M_**|✔|**94.03**| |Large |_770M_|[🤗](https://huggingface.co/north/nynorsk_North_large_long)|94.12| # Model Details Please see the model card for the base model for more information.
null
Non_BioNLP
# Model Card for Model ID This model is a long sequence version of a finetuned version of [north/t5_base_NCC_modern](https://huggingface.co/north/t5_base_NCC_modern). | | Size |Model|BLEU| |:------------:|:------------:|:------------:|:------------:| |Small |_60M_|[🤗](https://huggingface.co/north/nynorsk_North_small_long)|93.55| |**Base** |**_220M_**|✔|**94.03**| |Large |_770M_|[🤗](https://huggingface.co/north/nynorsk_North_large_long)|94.12| # Model Details Please see the model card for the base model for more information.
{"datasets": ["NbAiLab/balanced_bokmaal_nynorsk"], "language": ["nn", "no", "nb"], "license": "apache-2.0", "metrics": ["bleu"], "pipeline_tag": "translation"}
task
[ "TRANSLATION" ]
41,427
YakovElm/Jira10SetFitModel_balance_ratio_2
YakovElm
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-06-02T10:35:42Z
2023-06-02T10:36:17+00:00
8
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # YakovElm/Jira10SetFitModel_balance_ratio_2 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/Jira10SetFitModel_balance_ratio_2") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# YakovElm/Jira10SetFitModel_balance_ratio_2 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/Jira10SetFitModel_balance_ratio_2") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
41,428
AlekseyCalvin/Vladimir_Sillov_SilverAgePoets_FLUX_LoRA
AlekseyCalvin
text-to-image
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
2024-11-14T06:37:46Z
2024-11-15T22:56:37+00:00
0
0
--- base_model: black-forest-labs/FLUX.1-dev language: - en license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md pipeline_tag: text-to-image tags: - flux - diffusers - lora - replicate instance_prompt: poet Vladimir Sillov widget: - text: A photo of Soviet poet Vladimir Sillov in bed at dawn in USSR circa 1923. Sillov, in his mid-20s, young, best quality. Medium frame. Moderately worn, textured skin with blemishes and pores, extremely detailed color photograph. output: url: SillovBed2.png - text: A photo of Soviet poet Vladimir Sillov in bed in USSR circa 1923. Sillov, in his mid-20s, young, best quality. Medium frame. Moderately worn, textured skin with blemishes and pores, extremely detailed color photograph. output: url: SillovBed3.png - text: A photo of Soviet poet Vladimir Sillov, color photograph... output: url: SillovSickle.webp - text: 'A photo of Soviet poet Vladimir Sillov walking in a stairwell in USSR circa 1923 and saying via text balloon: "Days Would Walk an Untrod Morbid Staircase...", then under it another text balloon: "...At accelerant pace!" Sillov, in his mid-20s, young, is in a hurry, best quality. Medium frame. Moderately worn, textured skin with blemishes and pores, extremely detailed color photograph.' output: url: Sillov_1_DaysWouldWalk.png --- # Poet Vladimir Sillov Flux A **Low Rank Adaptor (LoRA)** for **FLUX** Text2Image models, trained to reincarnate the likeness of the poet, literary scholar, critic, editor, screenwriter, anthologist, progressive sociocultural activist, life-partner of Petrovskaya, student & biographer of Khlebnikov, friend of Pasternak, Mayakovskiy, Burlyuk, Aseev, Tretiakov, & many others, as well as a tragic and unforgotten avatar of all that could've been and what sometimes actually was: <br> **Vladimir Sillov** *(b.1901-d.02/16/1930)*. <br> <Gallery /> Unfortunately, Sillov, as of yet does not have a Wikipedia (at least not in English/Worldish)... We hope this sad fact is corrected one day. <br> For now, here's a clip of a reincarnated/approximated iteration of the poet, performing "live" (per our translation/interpretation/adaptation): <br> [CLICK HERE TO WATCH THE CLIP ON YOUTUBE](https://youtu.be/paffYoQpAq4?si=EMQW2zM3IhdqfWVr) Plus one of our translations from Sillov. More will be posted soon at [www.SilverAgePoets.com](www.silveragepoets.com): <br> **UNTIL IT DAWNS ANEW** Days<br> Would walk <br> An untrod morbid staircase<br> At an accelerant pace.<br> Soon <br> The trees <br> Splinter off unto leaflessness,<br> All the clearer it makes:<br> When the spring<br> Times the poets still nibble on<br> Are abruptly<br> Pulled down; <br> With the sun, <br> A blotched face nothing beams upon, <br> They come down <br> Like a crown. <br> And this sun with its springs <br> To the market we’ll bring, <br> Hoist them over thru tussle and din, <br> And for five faded roubles <br> Toss them <br> Off to some antiquarian. <br> Souls spat on, slandered, <br> Insolent, headstrong,<br> Altars do strew.<br> Upon them we'd light <br> Lamps for vesper nights, <br> Until it dawns anew. <br> Find our translations of other poets [over at SilverAgePoets.com](https://www.silveragepoets.com)! <br> In the coming weeks, we will finally update the site with translations from the works of Sillov, his partner Olga Petrovskaya, a number of his above-mentioned friends, and many other dead poets! <br> Beyond that, other forms of translations, adaptation, actions, ressurections, poeticizations, generations, and much else, coming soon; here, there, and elsewhere! <br> ## Evocation-Charged Word With FLUX running & this LoRA activated, include the name `Sillov` or 'Vladimir Sillov' or 'Poet Vladimir Sillov' in any prompt to conjure the long-deathless poet. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('AlekseyCalvin/Vladimir_Sillov_SilverAgePoets_FLUX_LoRA', weight_name='lora.safetensors') image = pipeline('your prompt').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
null
Non_BioNLP
# Poet Vladimir Sillov Flux A **Low Rank Adaptor (LoRA)** for **FLUX** Text2Image models, trained to reincarnate the likeness of the poet, literary scholar, critic, editor, screenwriter, anthologist, progressive sociocultural activist, life-partner of Petrovskaya, student & biographer of Khlebnikov, friend of Pasternak, Mayakovskiy, Burlyuk, Aseev, Tretiakov, & many others, as well as a tragic and unforgotten avatar of all that could've been and what sometimes actually was: <br> **Vladimir Sillov** *(b.1901-d.02/16/1930)*. <br> <Gallery /> Unfortunately, Sillov, as of yet does not have a Wikipedia (at least not in English/Worldish)... We hope this sad fact is corrected one day. <br> For now, here's a clip of a reincarnated/approximated iteration of the poet, performing "live" (per our translation/interpretation/adaptation): <br> [CLICK HERE TO WATCH THE CLIP ON YOUTUBE](https://youtu.be/paffYoQpAq4?si=EMQW2zM3IhdqfWVr) Plus one of our translations from Sillov. More will be posted soon at [www.SilverAgePoets.com](www.silveragepoets.com): <br> **UNTIL IT DAWNS ANEW** Days<br> Would walk <br> An untrod morbid staircase<br> At an accelerant pace.<br> Soon <br> The trees <br> Splinter off unto leaflessness,<br> All the clearer it makes:<br> When the spring<br> Times the poets still nibble on<br> Are abruptly<br> Pulled down; <br> With the sun, <br> A blotched face nothing beams upon, <br> They come down <br> Like a crown. <br> And this sun with its springs <br> To the market we’ll bring, <br> Hoist them over thru tussle and din, <br> And for five faded roubles <br> Toss them <br> Off to some antiquarian. <br> Souls spat on, slandered, <br> Insolent, headstrong,<br> Altars do strew.<br> Upon them we'd light <br> Lamps for vesper nights, <br> Until it dawns anew. <br> Find our translations of other poets [over at SilverAgePoets.com](https://www.silveragepoets.com)! <br> In the coming weeks, we will finally update the site with translations from the works of Sillov, his partner Olga Petrovskaya, a number of his above-mentioned friends, and many other dead poets! <br> Beyond that, other forms of translations, adaptation, actions, ressurections, poeticizations, generations, and much else, coming soon; here, there, and elsewhere! <br> ## Evocation-Charged Word With FLUX running & this LoRA activated, include the name `Sillov` or 'Vladimir Sillov' or 'Poet Vladimir Sillov' in any prompt to conjure the long-deathless poet. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('AlekseyCalvin/Vladimir_Sillov_SilverAgePoets_FLUX_LoRA', weight_name='lora.safetensors') image = pipeline('your prompt').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
{"base_model": "black-forest-labs/FLUX.1-dev", "language": ["en"], "license": "other", "license_name": "flux-1-dev-non-commercial-license", "license_link": "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md", "pipeline_tag": "text-to-image", "tags": ["flux", "diffusers", "lora", "replicate"], "instance_prompt": "poet Vladimir Sillov", "widget": [{"text": "A photo of Soviet poet Vladimir Sillov in bed at dawn in USSR circa 1923. Sillov, in his mid-20s, young, best quality. Medium frame. Moderately worn, textured skin with blemishes and pores, extremely detailed color photograph.", "output": {"url": "SillovBed2.png"}}, {"text": "A photo of Soviet poet Vladimir Sillov in bed in USSR circa 1923. Sillov, in his mid-20s, young, best quality. Medium frame. Moderately worn, textured skin with blemishes and pores, extremely detailed color photograph.", "output": {"url": "SillovBed3.png"}}, {"text": "A photo of Soviet poet Vladimir Sillov, color photograph...", "output": {"url": "SillovSickle.webp"}}, {"text": "A photo of Soviet poet Vladimir Sillov walking in a stairwell in USSR circa 1923 and saying via text balloon: \"Days Would Walk an Untrod Morbid Staircase...\", then under it another text balloon: \"...At accelerant pace!\" Sillov, in his mid-20s, young, is in a hurry, best quality. Medium frame. Moderately worn, textured skin with blemishes and pores, extremely detailed color photograph.", "output": {"url": "Sillov_1_DaysWouldWalk.png"}}]}
task
[ "TRANSLATION" ]
41,429
HPLT/sft-fpft-bg-pythia-6.9b
HPLT
null
[ "generation", "question answering", "instruction tuning", "bg", "arxiv:2309.08958", "license:cc-by-nc-4.0", "region:us" ]
2024-04-04T21:45:10Z
2024-04-04T21:45:12+00:00
0
0
--- language: - bg license: cc-by-nc-4.0 tags: - generation - question answering - instruction tuning --- ### Model Description This HF repository contains base LLMs instruction tuned (SFT) with full-parameter fine-tuning and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [pythia-6.9b](https://huggingface.co/pythia-6.9b) * Instruction tuning language: Bulgarian * Training method: full-parameter fine-tuning. * Best checkpoint: best cross-entropy on a validation set, trained for 3 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded using `transformers` library. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/fpft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
null
Non_BioNLP
### Model Description This HF repository contains base LLMs instruction tuned (SFT) with full-parameter fine-tuning and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [pythia-6.9b](https://huggingface.co/pythia-6.9b) * Instruction tuning language: Bulgarian * Training method: full-parameter fine-tuning. * Best checkpoint: best cross-entropy on a validation set, trained for 3 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded using `transformers` library. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/fpft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
{"language": ["bg"], "license": "cc-by-nc-4.0", "tags": ["generation", "question answering", "instruction tuning"]}
task
[ "QUESTION_ANSWERING" ]
41,430
thrunlab/t5-base_cola_moe_ex38_sp0_2_ar0_0_mare_mlp
thrunlab
text-classification
[ "transformers", "pytorch", "t5", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:google-t5/t5-base", "base_model:finetune:google-t5/t5-base", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-10-27T20:02:40Z
2023-10-27T20:57:51+00:00
34
0
--- base_model: t5-base datasets: - glue license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: t5-base_cola_moe_ex38_sp0_2_ar0_0_mare_mlp results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: rte split: validation args: rte metrics: - type: accuracy value: 0.0 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base_cola_moe_ex38_sp0_2_ar0_0_mare_mlp This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 1.4611 - Accuracy: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 20 - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5168 | 0.32 | 25 | 0.5669 | 0.7220 | | 0.5033 | 0.64 | 50 | 0.5296 | 0.7690 | | 0.4742 | 0.96 | 75 | 0.5493 | 0.7653 | | 0.4217 | 1.28 | 100 | 0.5473 | 0.7726 | | 0.4188 | 1.6 | 125 | 0.5759 | 0.7834 | | 0.4565 | 1.92 | 150 | 0.5721 | 0.7653 | | 0.3354 | 2.24 | 175 | 0.5307 | 0.7762 | | 0.2589 | 2.56 | 200 | 0.5804 | 0.7942 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu117 - Datasets 2.14.5 - Tokenizers 0.11.6
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base_cola_moe_ex38_sp0_2_ar0_0_mare_mlp This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 1.4611 - Accuracy: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 20 - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5168 | 0.32 | 25 | 0.5669 | 0.7220 | | 0.5033 | 0.64 | 50 | 0.5296 | 0.7690 | | 0.4742 | 0.96 | 75 | 0.5493 | 0.7653 | | 0.4217 | 1.28 | 100 | 0.5473 | 0.7726 | | 0.4188 | 1.6 | 125 | 0.5759 | 0.7834 | | 0.4565 | 1.92 | 150 | 0.5721 | 0.7653 | | 0.3354 | 2.24 | 175 | 0.5307 | 0.7762 | | 0.2589 | 2.56 | 200 | 0.5804 | 0.7942 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu117 - Datasets 2.14.5 - Tokenizers 0.11.6
{"base_model": "t5-base", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "t5-base_cola_moe_ex38_sp0_2_ar0_0_mare_mlp", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "config": "rte", "split": "validation", "args": "rte"}, "metrics": [{"type": "accuracy", "value": 0.0, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,431
predibase/cnn
predibase
text-generation
[ "peft", "safetensors", "text-generation", "base_model:mistralai/Mistral-7B-v0.1", "base_model:adapter:mistralai/Mistral-7B-v0.1", "region:us" ]
2024-02-19T18:58:46Z
2024-02-21T19:13:54+00:00
39
1
--- base_model: mistralai/Mistral-7B-v0.1 library_name: peft pipeline_tag: text-generation --- Description: News article summarization\ Original dataset: https://huggingface.co/datasets/cnn_dailymail \ ---\ Try querying this adapter for free in Lora Land at https://predibase.com/lora-land! \ The adapter_category is Summarization and the name is News Summarization (cnn)\ ---\ Sample input: You are given a news article below. Please summarize the article, including only its highlights.\n\n### Article: (CNN)Former Vice President Walter Mondale was released from the Mayo Clinic on Saturday after being admitted with influenza, hospital spokeswoman Kelley Luckstein said. "He's doing well. We treated him for flu and cold symptoms and he was released today," she said. Mondale, 87, was diagnosed after he went to the hospital for a routine checkup following a fever, former President Jimmy Carter said Friday. "He is in the bed right this moment, but looking forward to come back home," Carter said during a speech at a Nobel Peace Prize Forum in Minneapolis. "He said tell everybody he is doing well." Mondale underwent treatment at the Mayo Clinic in Rochester, Minnesota. The 42nd vice president served under Carter between 1977 and 1981, and later ran for President, but lost to Ronald Reagan. But not before he made history by naming a woman, U.S. Rep. Geraldine A. Ferraro of New York, as his running mate. Before that, the former lawyer was a U.S. senator from Minnesota. His wife, Joan Mondale, died last year.\n\n### Summary: \ ---\ Sample output: Walter Mondale was admitted to the Mayo Clinic on Friday . He was released Saturday after being treated for flu and cold symptoms . Mondale served as vice president under Jimmy Carter .\ ---\ Try using this adapter yourself! ``` from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mistral-7B-v0.1" peft_model_id = "predibase/cnn" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ```
null
Non_BioNLP
Description: News article summarization\ Original dataset: https://huggingface.co/datasets/cnn_dailymail \ ---\ Try querying this adapter for free in Lora Land at https://predibase.com/lora-land! \ The adapter_category is Summarization and the name is News Summarization (cnn)\ ---\ Sample input: You are given a news article below. Please summarize the article, including only its highlights.\n\n### Article: (CNN)Former Vice President Walter Mondale was released from the Mayo Clinic on Saturday after being admitted with influenza, hospital spokeswoman Kelley Luckstein said. "He's doing well. We treated him for flu and cold symptoms and he was released today," she said. Mondale, 87, was diagnosed after he went to the hospital for a routine checkup following a fever, former President Jimmy Carter said Friday. "He is in the bed right this moment, but looking forward to come back home," Carter said during a speech at a Nobel Peace Prize Forum in Minneapolis. "He said tell everybody he is doing well." Mondale underwent treatment at the Mayo Clinic in Rochester, Minnesota. The 42nd vice president served under Carter between 1977 and 1981, and later ran for President, but lost to Ronald Reagan. But not before he made history by naming a woman, U.S. Rep. Geraldine A. Ferraro of New York, as his running mate. Before that, the former lawyer was a U.S. senator from Minnesota. His wife, Joan Mondale, died last year.\n\n### Summary: \ ---\ Sample output: Walter Mondale was admitted to the Mayo Clinic on Friday . He was released Saturday after being treated for flu and cold symptoms . Mondale served as vice president under Jimmy Carter .\ ---\ Try using this adapter yourself! ``` from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "mistralai/Mistral-7B-v0.1" peft_model_id = "predibase/cnn" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ```
{"base_model": "mistralai/Mistral-7B-v0.1", "library_name": "peft", "pipeline_tag": "text-generation"}
task
[ "SUMMARIZATION" ]
41,432
BenneF/distilbert-base-uncased-finetuned-emotion
BenneF
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-13T13:23:58Z
2023-10-13T14:36:35+00:00
93
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.9255 name: Accuracy - type: f1 value: 0.9254628243006803 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2244 - Accuracy: 0.9255 - F1: 0.9255 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8562 | 1.0 | 250 | 0.3401 | 0.903 | 0.9015 | | 0.257 | 2.0 | 500 | 0.2244 | 0.9255 | 0.9255 | ### Framework versions - Transformers 4.34.0 - Pytorch 1.12.1+cu113 - Datasets 2.14.5 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2244 - Accuracy: 0.9255 - F1: 0.9255 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8562 | 1.0 | 250 | 0.3401 | 0.903 | 0.9015 | | 0.257 | 2.0 | 500 | 0.2244 | 0.9255 | 0.9255 | ### Framework versions - Transformers 4.34.0 - Pytorch 1.12.1+cu113 - Datasets 2.14.5 - Tokenizers 0.14.1
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9255, "name": "Accuracy"}, {"type": "f1", "value": 0.9254628243006803, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,433
poltextlab/xlm-roberta-large-hungarian-publicopinion-cap-v3
poltextlab
text-classification
[ "transformers", "pytorch", "xlm-roberta", "text-classification", "hu", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-09-07T15:24:31Z
2025-02-26T16:05:34+00:00
0
0
--- language: - hu metrics: - accuracy - f1-score tags: - text-classification - pytorch extra_gated_prompt: 'Our models are intended for academic use only. If you are not affiliated with an academic institution, please provide a rationale for using our models. Please allow us a few business days to manually review subscriptions. If you use our models for your work or research, please cite this paper: Sebők, M., Máté, Á., Ring, O., Kovács, V., & Lehoczki, R. (2024). Leveraging Open Large Language Models for Multilingual Policy Topic Classification: The Babel Machine Approach. Social Science Computer Review, 0(0). https://doi.org/10.1177/08944393241259434' extra_gated_fields: Name: text Country: country Institution: text Institution Email: text Please specify your academic use case: text --- # xlm-roberta-large-hungarian-publicopinion-cap-v3 ## Model description An `xlm-roberta-large` model fine-tuned on hungarian training data containing public opinions labeled with [major topic codes](https://www.comparativeagendas.net/pages/master-codebook) from the [Comparative Agendas Project](https://www.comparativeagendas.net/). We follow the master codebook of the Comparative Agendas Project, and all of our models use the same major topic codes. ## How to use the model ```python from transformers import AutoTokenizer, pipeline tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large") pipe = pipeline( model="poltextlab/xlm-roberta-large-hungarian-publicopinion-cap-v3", task="text-classification", tokenizer=tokenizer, use_fast=False, token="<your_hf_read_only_token>" ) text = "We will place an immediate 6-month halt on the finance driven closure of beds and wards, and set up an independent audit of needs and facilities." pipe(text) ``` The translation table from the model results to CAP codes is the following: ```python CAP_NUM_DICT = { 0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 12, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 23, 21: 999, } ``` We have included a 999 label because our models are fine-tuned on training data containing the label 'None' in addition to the 21 CAP major policy topic codes, indicating that the given text contains no relevant policy content. We use the label 999 for these cases. ### Gated access Due to the gated access, you must pass the `token` parameter when loading the model. In earlier versions of the Transformers package, you may need to use the `use_auth_token` parameter instead. ## Model performance The model was evaluated on a test set of 586 examples.<br> Model accuracy is **0.94**. | label | precision | recall | f1-score | support | |:-------------|------------:|---------:|-----------:|----------:| | 0 | 0.98 | 0.96 | 0.97 | 101 | | 1 | 0.92 | 0.9 | 0.91 | 39 | | 2 | 0.96 | 1 | 0.98 | 75 | | 3 | 0.9 | 0.9 | 0.9 | 21 | | 4 | 0.86 | 1 | 0.92 | 24 | | 5 | 1 | 0.82 | 0.9 | 11 | | 6 | 1 | 1 | 1 | 23 | | 7 | 0.93 | 1 | 0.97 | 28 | | 8 | 0 | 0 | 0 | 2 | | 9 | 0.92 | 0.94 | 0.93 | 48 | | 10 | 0.92 | 0.97 | 0.94 | 67 | | 11 | 0 | 0 | 0 | 4 | | 12 | 0.83 | 0.83 | 0.83 | 6 | | 13 | 0.91 | 0.84 | 0.87 | 25 | | 14 | 0.94 | 0.94 | 0.94 | 18 | | 15 | 1 | 0.57 | 0.73 | 7 | | 16 | 0 | 0 | 0 | 0 | | 17 | 0.93 | 0.98 | 0.95 | 51 | | 18 | 0.97 | 1 | 0.99 | 34 | | 19 | 0 | 0 | 0 | 2 | | macro avg | 0.75 | 0.73 | 0.74 | 586 | | weighted avg | 0.93 | 0.94 | 0.93 | 586 | ### Fine-tuning procedure This model was fine-tuned with the following key hyperparameters: - **Number of Training Epochs**: 10 - **Batch Size**: 8 - **Learning Rate**: 5e-06 - **Early Stopping**: enabled with a patience of 2 epochs ## Inference platform This model is used by the [CAP Babel Machine](https://babel.poltextlab.com), an open-source and free natural language processing tool, designed to simplify and speed up projects for comparative research. ## Cooperation Model performance can be significantly improved by extending our training sets. We appreciate every submission of CAP-coded corpora (of any domain and language) at poltextlab{at}poltextlab{dot}com or by using the [CAP Babel Machine](https://babel.poltextlab.com). ## Reference Sebők, M., Máté, Á., Ring, O., Kovács, V., & Lehoczki, R. (2024). Leveraging Open Large Language Models for Multilingual Policy Topic Classification: The Babel Machine Approach. Social Science Computer Review, 0(0). https://doi.org/10.1177/08944393241259434 ## Debugging and issues This architecture uses the `sentencepiece` tokenizer. In order to use the model before `transformers==4.27` you need to install it manually. If you encounter a `RuntimeError` when loading the model using the `from_pretrained()` method, adding `ignore_mismatched_sizes=True` should solve the issue.
null
Non_BioNLP
# xlm-roberta-large-hungarian-publicopinion-cap-v3 ## Model description An `xlm-roberta-large` model fine-tuned on hungarian training data containing public opinions labeled with [major topic codes](https://www.comparativeagendas.net/pages/master-codebook) from the [Comparative Agendas Project](https://www.comparativeagendas.net/). We follow the master codebook of the Comparative Agendas Project, and all of our models use the same major topic codes. ## How to use the model ```python from transformers import AutoTokenizer, pipeline tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large") pipe = pipeline( model="poltextlab/xlm-roberta-large-hungarian-publicopinion-cap-v3", task="text-classification", tokenizer=tokenizer, use_fast=False, token="<your_hf_read_only_token>" ) text = "We will place an immediate 6-month halt on the finance driven closure of beds and wards, and set up an independent audit of needs and facilities." pipe(text) ``` The translation table from the model results to CAP codes is the following: ```python CAP_NUM_DICT = { 0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 12, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 23, 21: 999, } ``` We have included a 999 label because our models are fine-tuned on training data containing the label 'None' in addition to the 21 CAP major policy topic codes, indicating that the given text contains no relevant policy content. We use the label 999 for these cases. ### Gated access Due to the gated access, you must pass the `token` parameter when loading the model. In earlier versions of the Transformers package, you may need to use the `use_auth_token` parameter instead. ## Model performance The model was evaluated on a test set of 586 examples.<br> Model accuracy is **0.94**. | label | precision | recall | f1-score | support | |:-------------|------------:|---------:|-----------:|----------:| | 0 | 0.98 | 0.96 | 0.97 | 101 | | 1 | 0.92 | 0.9 | 0.91 | 39 | | 2 | 0.96 | 1 | 0.98 | 75 | | 3 | 0.9 | 0.9 | 0.9 | 21 | | 4 | 0.86 | 1 | 0.92 | 24 | | 5 | 1 | 0.82 | 0.9 | 11 | | 6 | 1 | 1 | 1 | 23 | | 7 | 0.93 | 1 | 0.97 | 28 | | 8 | 0 | 0 | 0 | 2 | | 9 | 0.92 | 0.94 | 0.93 | 48 | | 10 | 0.92 | 0.97 | 0.94 | 67 | | 11 | 0 | 0 | 0 | 4 | | 12 | 0.83 | 0.83 | 0.83 | 6 | | 13 | 0.91 | 0.84 | 0.87 | 25 | | 14 | 0.94 | 0.94 | 0.94 | 18 | | 15 | 1 | 0.57 | 0.73 | 7 | | 16 | 0 | 0 | 0 | 0 | | 17 | 0.93 | 0.98 | 0.95 | 51 | | 18 | 0.97 | 1 | 0.99 | 34 | | 19 | 0 | 0 | 0 | 2 | | macro avg | 0.75 | 0.73 | 0.74 | 586 | | weighted avg | 0.93 | 0.94 | 0.93 | 586 | ### Fine-tuning procedure This model was fine-tuned with the following key hyperparameters: - **Number of Training Epochs**: 10 - **Batch Size**: 8 - **Learning Rate**: 5e-06 - **Early Stopping**: enabled with a patience of 2 epochs ## Inference platform This model is used by the [CAP Babel Machine](https://babel.poltextlab.com), an open-source and free natural language processing tool, designed to simplify and speed up projects for comparative research. ## Cooperation Model performance can be significantly improved by extending our training sets. We appreciate every submission of CAP-coded corpora (of any domain and language) at poltextlab{at}poltextlab{dot}com or by using the [CAP Babel Machine](https://babel.poltextlab.com). ## Reference Sebők, M., Máté, Á., Ring, O., Kovács, V., & Lehoczki, R. (2024). Leveraging Open Large Language Models for Multilingual Policy Topic Classification: The Babel Machine Approach. Social Science Computer Review, 0(0). https://doi.org/10.1177/08944393241259434 ## Debugging and issues This architecture uses the `sentencepiece` tokenizer. In order to use the model before `transformers==4.27` you need to install it manually. If you encounter a `RuntimeError` when loading the model using the `from_pretrained()` method, adding `ignore_mismatched_sizes=True` should solve the issue.
{"language": ["hu"], "metrics": ["accuracy", "f1-score"], "tags": ["text-classification", "pytorch"], "extra_gated_prompt": "Our models are intended for academic use only. If you are not affiliated with an academic institution, please provide a rationale for using our models. Please allow us a few business days to manually review subscriptions.\nIf you use our models for your work or research, please cite this paper: Sebők, M., Máté, Á., Ring, O., Kovács, V., & Lehoczki, R. (2024). Leveraging Open Large Language Models for Multilingual Policy Topic Classification: The Babel Machine Approach. Social Science Computer Review, 0(0). https://doi.org/10.1177/08944393241259434", "extra_gated_fields": {"Name": "text", "Country": "country", "Institution": "text", "Institution Email": "text", "Please specify your academic use case": "text"}}
task
[ "TRANSLATION" ]
41,434
shanhy/xlmroberta_clir_seed12_cross_translation_augmentation_val_kin_0.563
shanhy
text-classification
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-01-26T19:42:11Z
2024-01-26T19:43:31+00:00
3
0
--- base_model: xlm-roberta-base license: mit tags: - generated_from_trainer model-index: - name: xlmroberta_clir_seed12_back_translation_augmentation_val_kin results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmroberta_clir_seed12_back_translation_augmentation_val_kin This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0830 - Spearman Corr: 0.4707 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 128 - seed: 12 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Spearman Corr | |:-------------:|:-----:|:----:|:---------------:|:-------------:| | No log | 0.48 | 200 | 0.0356 | 0.5073 | | No log | 0.97 | 400 | 0.0890 | 0.4997 | | No log | 1.45 | 600 | 0.0440 | 0.5627 | | No log | 1.94 | 800 | 0.0423 | 0.5383 | | 0.039 | 2.42 | 1000 | 0.0346 | 0.5164 | | 0.039 | 2.91 | 1200 | 0.0587 | 0.5257 | | 0.039 | 3.39 | 1400 | 0.0655 | 0.4899 | | 0.039 | 3.88 | 1600 | 0.0499 | 0.5162 | | 0.0229 | 4.36 | 1800 | 0.0616 | 0.5267 | | 0.0229 | 4.85 | 2000 | 0.0587 | 0.4955 | | 0.0229 | 5.33 | 2200 | 0.0830 | 0.4707 | ### Framework versions - Transformers 4.37.1 - Pytorch 2.1.0+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmroberta_clir_seed12_back_translation_augmentation_val_kin This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0830 - Spearman Corr: 0.4707 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 128 - seed: 12 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Spearman Corr | |:-------------:|:-----:|:----:|:---------------:|:-------------:| | No log | 0.48 | 200 | 0.0356 | 0.5073 | | No log | 0.97 | 400 | 0.0890 | 0.4997 | | No log | 1.45 | 600 | 0.0440 | 0.5627 | | No log | 1.94 | 800 | 0.0423 | 0.5383 | | 0.039 | 2.42 | 1000 | 0.0346 | 0.5164 | | 0.039 | 2.91 | 1200 | 0.0587 | 0.5257 | | 0.039 | 3.39 | 1400 | 0.0655 | 0.4899 | | 0.039 | 3.88 | 1600 | 0.0499 | 0.5162 | | 0.0229 | 4.36 | 1800 | 0.0616 | 0.5267 | | 0.0229 | 4.85 | 2000 | 0.0587 | 0.4955 | | 0.0229 | 5.33 | 2200 | 0.0830 | 0.4707 | ### Framework versions - Transformers 4.37.1 - Pytorch 2.1.0+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
{"base_model": "xlm-roberta-base", "license": "mit", "tags": ["generated_from_trainer"], "model-index": [{"name": "xlmroberta_clir_seed12_back_translation_augmentation_val_kin", "results": []}]}
task
[ "TRANSLATION" ]
41,435
huatougui/my_awesome_model
huatougui
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-23T07:26:11Z
2023-11-23T08:25:17+00:00
6
0
--- base_model: distilbert-base-uncased datasets: - imdb license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: my_awesome_model results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.93204 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2539 - Accuracy: 0.9320 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2245 | 1.0 | 1563 | 0.2077 | 0.9247 | | 0.1215 | 2.0 | 3126 | 0.2539 | 0.9320 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2539 - Accuracy: 0.9320 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2245 | 1.0 | 1563 | 0.2077 | 0.9247 | | 0.1215 | 2.0 | 3126 | 0.2539 | 0.9320 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
{"base_model": "distilbert-base-uncased", "datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "my_awesome_model", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.93204, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,436
ptpedroVortal/nomic_vortal_v3.1
ptpedroVortal
sentence-similarity
[ "sentence-transformers", "safetensors", "nomic_bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:222", "loss:MultipleNegativesRankingLoss", "custom_code", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:nomic-ai/nomic-embed-text-v1", "base_model:finetune:nomic-ai/nomic-embed-text-v1", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2025-01-07T15:39:37Z
2025-01-07T15:40:32+00:00
6
0
--- base_model: nomic-ai/nomic-embed-text-v1 library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:222 - loss:MultipleNegativesRankingLoss widget: - source_sentence: Collect the details that are associated with Lot 1 product '' 'IGE', with quantity 7280, unit UND sentences: - Lote 12\nquatro euros e noventa e cinco cêntimos\n3\n3Teks/Turquia\nRef. 101.001.SE - ' ImmunoCAP IgE Total Anti-IgE Immunocap Caneta 7280 455 18,70 dezoito euros e setenta cêntimos 1,168750 16 14-4509-01 8.508,50\n2\n2 122060282Conforme \nalergénioImmunoCAP Alergénio Caneta 20800 1300 30,00 trinta euros 1,875000 16 .... 39.000,00\n2\n3 122060282Conforme \nalergénioImmunoCAP Alergénio Componente\nMolecularCaneta 2000 200 73,49setenta e três euros e quarenta e \nnove cêntimos 7,349000 10 .... 14.698,00\n2\n4 122060282Conforme \nalergénioImmunoCAP Alergénio Caneta 8100 810 22,05 vinte e dois euros e cinco cêntimos 2,205000 10 .... 17.860,50' - Solução Proposta 5 565,00 €\nhw 5 565,00 €\nTotal sem IVA 5 565,00 €\nValor do IVA (23 %) 1 279,95 €\nTotal com IVA (23 %) 6 844,95 €MIL DUZENTOS E SETENTA E NOVE EUROS E NOVENTA E CINCO CENTIMOS\nSEIS MIL OITOCENTOS E QUARENTA E QUATRO EUROS E NOVENTA E CINCO\nCENTIMOSRESUMO ECONÓMICO\nDescrição Valor\nCINCO MIL QUINHENTOS E SESSENTA E CINCO EUROS\nProposta FP-5-171.xlsxPágina 4 de 5\n30/01/2023\nLenovo - Monitores 5 565,00 €\nT27i-10(A18270FT0)-27inch Monitor(HDMI)\nStock a data de hoje 20 Unds + Previsão 4 \nsemanas30 185,50 € 5 565,00 €\n - source_sentence: Collect the details that are associated with Lot 4 product 'Os diversos equipamentos e eventos a realizar e de gestão do Município de Albufeira' 'Vigilância e segurança humana contínua em equipamentos e eventos - VIG/ARD/ARE/COORD - período de 3 meses, todos os dias de forma permanente e ininterrupta, total estimado de 2250H.', with quantity 1, unit UN sentences: - 'Lote 3:\nPreço Unitário: 10,00€ (dez euros) /hora\nPreço Total: 27.540,00€ (vinte sete mil quinhentos e quarenta euros)' - 'DESCRIÇÃO DE SERVIÇOS LOTE 4 Local, Dias e Horários: ➢ Locais, Dias e horários a indicar (prazo mínimo de 24 horas) pelos serviços da Câmara Municipal de Albufeira ; Efetivo e Categoria Profissional : ➢ O número necessário de efetivo será definido por requisição a emitir pelos competentes serviços da Câmara Municipal de Albufeira nas categorias de Vigilante, Assistente Recinto de Espetáculos, Assistente de Recinto Desportivos e Coordenador; Valor Hora: €15,00 ( quinze euros) Total de Horas Previsto: 2980 horas (dois mil novecentos e oitenta horas) Valor Total Previsto: €44 700,00 (quarenta e quatro mil e setecentos euros)' - 2202000061 - SUTURA MONOFILAMENYO NÃO ABSORVIVEL POLIAMIDA 0 AG.CILINDRICA,30MM (CART.) 0.78 - source_sentence: Collect the details that are associated with Lot 11 product '' 'Mesas apoio (anestesia e circulante)', with quantity 4, unit Subcontracting Unit sentences: - 2202000128 - Sutura sint. absorvivel "Poliglicolico" multifilamento 5/0 ag. cilindrica 13mm (CART.) 2.2 - 'Preço\nLote Artigo Designação Referência € Custo unitário s/ IVA % IVA Quantidade Valor total S/IVA\n11 Mesa apoio \n(anestesia e \ncirculante) Trolley apoio TTAC 3350 690€ 23% 4 2760€\nTotal 2760€ ' - 'Trouxa Cesariana 832,00 7,950000 6\xa0614,400000 23,00\nLote 10\nsete euros e noveta e cinco cêntimos\n3\n3Teks/Turquia\nRef. 601.002.SE\nCDM: 88869431\nDM Classe Ie\nNPDM: T020199\nTotal Ilíquido :Descontos Comerciais :\nTOTAL EUR56\xa0642,860000P' - source_sentence: Collect the details that are associated with product '' '2202000154 - FIO SUT. POLIPROPI. NÃO ABS. 2/0 MONOF. C/ 2 AG. RECTAS CILIND. 75 MM (CART.)', with quantity 108, unit UN sentences: - '| Lote | DENOMINAÇÃO | VALOR MENSAL | VALOR | |--------|------------------------------------------------------------------------|--------------|-------------| | Lote 1 | Mercados Quinzenais, no Município de Albufeira | 567,00 € | 2 835,00 € | | Lote 2 | Portaria Das Instalações Técnicas e Administrativas do Edifício de Vale Pedras | 7 435,80 € | 37 179,00 € | | Lote 3 | Portaria Parque de Estacionamento Coberto dos Olhos de Água | 8 262,00 € | 41 310,00 € | | Lote 4 | Compreende a Vigilância aos diversos eventos e equipamentos | | 49 021,00 € | | TOTAL | | | 130 345,00 € |' - LOTE 8\n4,0 CARROHPPBV1 SUPORTE RODADO PARA SACO RESÍDUOS GRUPO III/IV 240,000 960,000 23,0\ - 2202000154 - FIO SUT. POLIPROPI. NÃO ABS. 2/0 MONOF. C/ 2 AG. RECTAS CILIND. 75 MM (CART.) 2.65 - source_sentence: Collect the details that are associated with product '' 'Macbook Air 13" com processador M1/M2 e 8 GB de RAM (Telado PT-PT)', with quantity 1, unit UN sentences: - '[''A Siemens Healthineers é uma empresa líder em inovação, reconhecida pela sua engenharia de ponta, pelo apoio a \nterapias avançadas, assim como por privilegiar a versatilidade e funcionalidade entre questões clínicas, resultados \nlaboratoriais precisos e serviços transformadores. Deste modo, maximizamos oportunidades e reduzimos riscos, \ncontribuindo para a melhoria da excelência clínica, eficiência operacional e rentabilidade.\nQueremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\nmundo de possibilidades.\nJuntos, vamos continuar a construir o futuro da saúde.\nPág. 52.Proposta Financeira\n2.1. Posições Relevantes\nLote 1\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Teste\nS/ IVA\n1 122060265 IGE TESTE 7280 3,00 €\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\n(\n(Vinte e um mil, oitocentos e quarenta euros)\nLote 4\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Total\nS/ IVA\n5\n1 122060125C1 INACTIVADOR FUNCIONAL'', ''POSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Teste\nS/ IVA\n1 122060265 IGE TESTE 7280 3,00 €\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\n(\n(Vinte e um mil, oitocentos e quarenta euros)\nLote 4\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Total\nS/ IVA\n5\n1 122060125C1 INACTIVADOR FUNCIONAL\nQUANTITATIVOTESTE 576 5,00 €\nPreço Líquido da Proposta para o Lote 4 .................................................................................................... 2.880,00 €\n(\n(Dois mil, oitocentos e oitenta euros)\nPág. 63.Anexos\n1. Procuração\nSiemens Healthcare, Unipessoal, Lda.\nRua Irmãos Siemens, 1-1A\n2720-093 Amadora\nPortugal\nTelf: +351 210 601 800\[email protected]\nwww.healthcare.siemens.ptCopyright\nEsta Proposta e seu conteúdo são propriedade intelectual da\nSiemens Healthcare, Unipessoal, Lda, na proposta designada\npor Siemens Healthineers, protegida nos termos do Código do\nDireito de Autor e dos Direitos Conexos.'', ''A Siemens Healthineers é uma empresa líder em inovação, reconhecida pela sua engenharia de ponta, pelo apoio a \nterapias avançadas, assim como por privilegiar a versatilidade e funcionalidade entre questões clínicas, resultados \nlaboratoriais precisos e serviços transformadores. Deste modo, maximizamos oportunidades e reduzimos riscos, \ncontribuindo para a melhoria da excelência clínica, eficiência operacional e rentabilidade.\nQueremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\nmundo de possibilidades.\nJuntos, vamos continuar a construir o futuro da saúde.\nPág. 52.Proposta Financeira\n2.1. Posições Relevantes\nLote 1\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Teste\nS/ IVA\n1 122060265 IGE TESTE 7280 3,00 €\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €'', ''A Siemens Healthineers é uma empresa líder em inovação, reconhecida pela sua engenharia de ponta, pelo apoio a \nterapias avançadas, assim como por privilegiar a versatilidade e funcionalidade entre questões clínicas, resultados \nlaboratoriais precisos e serviços transformadores. Deste modo, maximizamos oportunidades e reduzimos riscos, \ncontribuindo para a melhoria da excelência clínica, eficiência operacional e rentabilidade.\nQueremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\nmundo de possibilidades.\nJuntos, vamos continuar a construir o futuro da saúde.\nPág. 52.Proposta Financeira\n2.1. Posições Relevantes\nLote 1\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Teste\nS/ IVA\n1 122060265 IGE TESTE 7280 3,00 €\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\n(\n(Vinte e um mil, oitocentos e quarenta euros)\nLote 4\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Total\nS/ IVA\n5\n1 122060125C1 INACTIVADOR FUNCIONAL\nQUANTITATIVOTESTE 576 5,00 €\nPreço Líquido da Proposta para o Lote 4 .................................................................................................... 2.880,00 €'', ''Queremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\nmundo de possibilidades.\nJuntos, vamos continuar a construir o futuro da saúde.\nPág. 52.Proposta Financeira\n2.1. Posições Relevantes\nLote 1\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Teste\nS/ IVA\n1 122060265 IGE TESTE 7280 3,00 €\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\n(\n(Vinte e um mil, oitocentos e quarenta euros)\nLote 4\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\nANUALPreço Total\nS/ IVA\n5\n1 122060125C1 INACTIVADOR FUNCIONAL\nQUANTITATIVOTESTE 576 5,00 €\nPreço Líquido da Proposta para o Lote 4 .................................................................................................... 2.880,00 €\n(\n(Dois mil, oitocentos e oitenta euros)\nPág. 63.Anexos'']' - 'LOTE 5 1 MESA APOIO MESA DE APOIO EM INOX AISI 304 2,0 279,000 23,0 558,000 Mesa com 4 rodas , 2 com travão Tabuleiro inferior Dimens: C 700 x L 500 x A 800mm Prateleira inferior - profundidade 250mm Fabrico Nacional e por medida Total do do lote 5: 558,00€ Quinhentos e cinquenta e oito euros' - 'Apresenta -se de seguida a configuração financeira para a fornecimento dos produtos \nrequeridos , mediante opções por cor e diferentes características:\nNOTA: Valores válidos até 23 de Fevereiro e mediante adjudicação de 2 ou mais \nequipamentos portáteis (excluindo Teclado)\nPART-NUMBER QTD. DESCRIÇÃOVALOR\nUNITÁRIOVALOR\nTOTAL\nMLY03PO/A 1Apple Macbook AIR 13,6" (Disco 512GB SSD; 10 core) 1 545,08 € 1 545,08 €\nMLXY3PO/A 1Apple Macbook AIR 13,6" (Disco 256GB SSD, 8 core) 1 227,48 € 1 227,48 €' --- # SentenceTransformer based on nomic-ai/nomic-embed-text-v1 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [nomic-ai/nomic-embed-text-v1](https://huggingface.co/nomic-ai/nomic-embed-text-v1). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [nomic-ai/nomic-embed-text-v1](https://huggingface.co/nomic-ai/nomic-embed-text-v1) <!-- at revision 720244025c1a7e15661a174c63cce63c8218e52b --> - **Maximum Sequence Length:** 8192 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 8192, 'do_lower_case': False}) with Transformer model: NomicBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("ptpedroVortal/nomic_vortal_v3.1") # Run inference sentences = [ 'Collect the details that are associated with product \'\' \'Macbook Air 13" com processador M1/M2 e 8 GB de RAM (Telado PT-PT)\', with quantity 1, unit UN', 'Apresenta -se de seguida a configuração financeira para a fornecimento dos produtos \\nrequeridos , mediante opções por cor e diferentes características:\\nNOTA: Valores válidos até 23 de Fevereiro e mediante adjudicação de 2 ou mais \\nequipamentos portáteis (excluindo Teclado)\\nPART-NUMBER QTD. DESCRIÇÃOVALOR\\nUNITÁRIOVALOR\\nTOTAL\\nMLY03PO/A 1Apple Macbook AIR 13,6" (Disco 512GB SSD; 10 core) 1 545,08 € 1 545,08 €\\nMLXY3PO/A 1Apple Macbook AIR 13,6" (Disco 256GB SSD, 8 core) 1 227,48 € 1 227,48 €', 'LOTE 5\n1 MESA APOIO MESA DE APOIO EM INOX AISI 304 2,0 279,000 23,0 558,000\nMesa com 4 rodas , 2 com travão\nTabuleiro inferior\nDimens: C 700 x L 500 x A 800mm\nPrateleira inferior - profundidade 250mm\nFabrico Nacional e por medida\nTotal do do lote 5: 558,00€ Quinhentos e cinquenta e oito euros', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 222 training samples * Columns: <code>query</code> and <code>correct_node</code> * Approximate statistics based on the first 222 samples: | | query | correct_node | |:--------|:------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 15 tokens</li><li>mean: 55.17 tokens</li><li>max: 154 tokens</li></ul> | <ul><li>min: 22 tokens</li><li>mean: 109.22 tokens</li><li>max: 2920 tokens</li></ul> | * Samples: | query | correct_node | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Collect the details that are associated with Lot 4 product '' 'Mesas de Mayo', with quantity 2, unit Subcontracting Unit</code> | <code>LOTE 4<br>1 MESA DE MAYO 82JM 10.ME.1831 2,000 842,00000 23 1 684,00<br>oitocentos e quarenta e dois euros<br>Origem : Nacional<br>Marca : MOBIT<br>Prazo de entrega: 30 dias<br>Garantia: 2 anos<br>Transporte</code> | | <code>Collect the details that are associated with Lot 7 product '' 'Carro transporte de roupa suja ', with quantity 1, unit US</code> | <code>Lote 7 nan nan nan nan nan\nRef. Description Qt. Un. Un. Price Total\n9856 Carros para Transporte de Roupa Suja e Limpa 1 US 16.23 16.23\</code> | | <code>Collect the details that are associated with product '' '2202000014 - FIO SUT. SEDA NÃO ABS. 2/0 MULTIF. SEM AGULHA (CART.)', with quantity 72, unit UN</code> | <code>2202000014 - FIO SUT. SEDA NÃO ABS. 2/0 MULTIF. SEM AGULHA (CART.) 0.36</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 27 evaluation samples * Columns: <code>query</code> and <code>correct_node</code> * Approximate statistics based on the first 27 samples: | | query | correct_node | |:--------|:------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 17 tokens</li><li>mean: 56.85 tokens</li><li>max: 121 tokens</li></ul> | <ul><li>min: 40 tokens</li><li>mean: 228.15 tokens</li><li>max: 2963 tokens</li></ul> | * Samples: | query | correct_node | |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Collect the details that are associated with product '' '2202000055 - FIO SUT. POLIAMIDA NÃO ABS. 2/0 MONOF. AG. LANC. 39 MM 3/8 C (CART.)', with quantity 1656, unit UN</code> | <code>2202000055 - FIO SUT. POLIAMIDA NÃO ABS. 2/0 MONOF. AG. LANC. 39 MM 3/8 C (CART.) 1.28</code> | | <code>Collect the details that are associated with Lot 3 product 'Portaria do Parque Coberto dos Olhos de Água' 'Vigilância e segurança humana contínua - Olhos de Água - período de 3 meses - todos os dias da semana, incluindo feriados, total estimado de 2754H', with quantity 1, unit UN</code> | <code>| Lote | DENOMINAÇÃO | VALOR MENSAL | VALOR |<br>|--------|------------------------------------------------------------------------|--------------|-------------|<br>| Lote 1 | Mercados Quinzenais, no Município de Albufeira | 567,00 € | 2 835,00 € |<br>| Lote 2 | Portaria Das Instalações Técnicas e Administrativas do Edifício de Vale Pedras | 7 435,80 € | 37 179,00 € |<br>| Lote 3 | Portaria Parque de Estacionamento Coberto dos Olhos de Água | 8 262,00 € | 41 310,00 € |<br>| Lote 4 | Compreende a Vigilância aos diversos eventos e equipamentos | | 49 021,00 € |<br>| TOTAL | | | 130 345,00 € |</code> | | <code>Collect the details that are associated with Lot 3 product 'Portaria do Parque Coberto dos Olhos de Água' 'Vigilância e segurança humana contínua - Olhos de Água - período de 3 meses - todos os dias da semana, incluindo feriados, total estimado de 2754H', with quantity 1, unit UN</code> | <code>Lote 3:\nPreço Unitário: 10,00€ (dez euros) /hora\nPreço Total: 27.540,00€ (vinte sete mil quinhentos e quarenta euros)</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 - `bf16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Framework Versions - Python: 3.10.14 - Sentence Transformers: 3.3.1 - Transformers: 4.47.0.dev0 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.20.4 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on nomic-ai/nomic-embed-text-v1 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [nomic-ai/nomic-embed-text-v1](https://huggingface.co/nomic-ai/nomic-embed-text-v1). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [nomic-ai/nomic-embed-text-v1](https://huggingface.co/nomic-ai/nomic-embed-text-v1) <!-- at revision 720244025c1a7e15661a174c63cce63c8218e52b --> - **Maximum Sequence Length:** 8192 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 8192, 'do_lower_case': False}) with Transformer model: NomicBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("ptpedroVortal/nomic_vortal_v3.1") # Run inference sentences = [ 'Collect the details that are associated with product \'\' \'Macbook Air 13" com processador M1/M2 e 8 GB de RAM (Telado PT-PT)\', with quantity 1, unit UN', 'Apresenta -se de seguida a configuração financeira para a fornecimento dos produtos \\nrequeridos , mediante opções por cor e diferentes características:\\nNOTA: Valores válidos até 23 de Fevereiro e mediante adjudicação de 2 ou mais \\nequipamentos portáteis (excluindo Teclado)\\nPART-NUMBER QTD. DESCRIÇÃOVALOR\\nUNITÁRIOVALOR\\nTOTAL\\nMLY03PO/A 1Apple Macbook AIR 13,6" (Disco 512GB SSD; 10 core) 1 545,08 € 1 545,08 €\\nMLXY3PO/A 1Apple Macbook AIR 13,6" (Disco 256GB SSD, 8 core) 1 227,48 € 1 227,48 €', 'LOTE 5\n1 MESA APOIO MESA DE APOIO EM INOX AISI 304 2,0 279,000 23,0 558,000\nMesa com 4 rodas , 2 com travão\nTabuleiro inferior\nDimens: C 700 x L 500 x A 800mm\nPrateleira inferior - profundidade 250mm\nFabrico Nacional e por medida\nTotal do do lote 5: 558,00€ Quinhentos e cinquenta e oito euros', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 222 training samples * Columns: <code>query</code> and <code>correct_node</code> * Approximate statistics based on the first 222 samples: | | query | correct_node | |:--------|:------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 15 tokens</li><li>mean: 55.17 tokens</li><li>max: 154 tokens</li></ul> | <ul><li>min: 22 tokens</li><li>mean: 109.22 tokens</li><li>max: 2920 tokens</li></ul> | * Samples: | query | correct_node | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Collect the details that are associated with Lot 4 product '' 'Mesas de Mayo', with quantity 2, unit Subcontracting Unit</code> | <code>LOTE 4<br>1 MESA DE MAYO 82JM 10.ME.1831 2,000 842,00000 23 1 684,00<br>oitocentos e quarenta e dois euros<br>Origem : Nacional<br>Marca : MOBIT<br>Prazo de entrega: 30 dias<br>Garantia: 2 anos<br>Transporte</code> | | <code>Collect the details that are associated with Lot 7 product '' 'Carro transporte de roupa suja ', with quantity 1, unit US</code> | <code>Lote 7 nan nan nan nan nan\nRef. Description Qt. Un. Un. Price Total\n9856 Carros para Transporte de Roupa Suja e Limpa 1 US 16.23 16.23\</code> | | <code>Collect the details that are associated with product '' '2202000014 - FIO SUT. SEDA NÃO ABS. 2/0 MULTIF. SEM AGULHA (CART.)', with quantity 72, unit UN</code> | <code>2202000014 - FIO SUT. SEDA NÃO ABS. 2/0 MULTIF. SEM AGULHA (CART.) 0.36</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 27 evaluation samples * Columns: <code>query</code> and <code>correct_node</code> * Approximate statistics based on the first 27 samples: | | query | correct_node | |:--------|:------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 17 tokens</li><li>mean: 56.85 tokens</li><li>max: 121 tokens</li></ul> | <ul><li>min: 40 tokens</li><li>mean: 228.15 tokens</li><li>max: 2963 tokens</li></ul> | * Samples: | query | correct_node | |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Collect the details that are associated with product '' '2202000055 - FIO SUT. POLIAMIDA NÃO ABS. 2/0 MONOF. AG. LANC. 39 MM 3/8 C (CART.)', with quantity 1656, unit UN</code> | <code>2202000055 - FIO SUT. POLIAMIDA NÃO ABS. 2/0 MONOF. AG. LANC. 39 MM 3/8 C (CART.) 1.28</code> | | <code>Collect the details that are associated with Lot 3 product 'Portaria do Parque Coberto dos Olhos de Água' 'Vigilância e segurança humana contínua - Olhos de Água - período de 3 meses - todos os dias da semana, incluindo feriados, total estimado de 2754H', with quantity 1, unit UN</code> | <code>| Lote | DENOMINAÇÃO | VALOR MENSAL | VALOR |<br>|--------|------------------------------------------------------------------------|--------------|-------------|<br>| Lote 1 | Mercados Quinzenais, no Município de Albufeira | 567,00 € | 2 835,00 € |<br>| Lote 2 | Portaria Das Instalações Técnicas e Administrativas do Edifício de Vale Pedras | 7 435,80 € | 37 179,00 € |<br>| Lote 3 | Portaria Parque de Estacionamento Coberto dos Olhos de Água | 8 262,00 € | 41 310,00 € |<br>| Lote 4 | Compreende a Vigilância aos diversos eventos e equipamentos | | 49 021,00 € |<br>| TOTAL | | | 130 345,00 € |</code> | | <code>Collect the details that are associated with Lot 3 product 'Portaria do Parque Coberto dos Olhos de Água' 'Vigilância e segurança humana contínua - Olhos de Água - período de 3 meses - todos os dias da semana, incluindo feriados, total estimado de 2754H', with quantity 1, unit UN</code> | <code>Lote 3:\nPreço Unitário: 10,00€ (dez euros) /hora\nPreço Total: 27.540,00€ (vinte sete mil quinhentos e quarenta euros)</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 - `bf16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Framework Versions - Python: 3.10.14 - Sentence Transformers: 3.3.1 - Transformers: 4.47.0.dev0 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.20.4 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "nomic-ai/nomic-embed-text-v1", "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:222", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Collect the details that are associated with Lot 1 product '' 'IGE', with quantity 7280, unit UND", "sentences": ["Lote 12\\nquatro euros e noventa e cinco cêntimos\\n3\\n3Teks/Turquia\\nRef. 101.001.SE", " ImmunoCAP IgE Total Anti-IgE Immunocap Caneta 7280 455 18,70 dezoito euros e setenta cêntimos 1,168750 16 14-4509-01 8.508,50\\n2\\n2 122060282Conforme \\nalergénioImmunoCAP Alergénio Caneta 20800 1300 30,00 trinta euros 1,875000 16 .... 39.000,00\\n2\\n3 122060282Conforme \\nalergénioImmunoCAP Alergénio Componente\\nMolecularCaneta 2000 200 73,49setenta e três euros e quarenta e \\nnove cêntimos 7,349000 10 .... 14.698,00\\n2\\n4 122060282Conforme \\nalergénioImmunoCAP Alergénio Caneta 8100 810 22,05 vinte e dois euros e cinco cêntimos 2,205000 10 .... 17.860,50", "Solução Proposta 5 565,00 €\\nhw 5 565,00 €\\nTotal sem IVA 5 565,00 €\\nValor do IVA (23 %) 1 279,95 €\\nTotal com IVA (23 %) 6 844,95 €MIL DUZENTOS E SETENTA E NOVE EUROS E NOVENTA E CINCO CENTIMOS\\nSEIS MIL OITOCENTOS E QUARENTA E QUATRO EUROS E NOVENTA E CINCO\\nCENTIMOSRESUMO ECONÓMICO\\nDescrição Valor\\nCINCO MIL QUINHENTOS E SESSENTA E CINCO EUROS\\nProposta FP-5-171.xlsxPágina 4 de 5\\n30/01/2023\\nLenovo - Monitores 5 565,00 €\\nT27i-10(A18270FT0)-27inch Monitor(HDMI)\\nStock a data de hoje 20 Unds + Previsão 4 \\nsemanas30 185,50 € 5 565,00 €\\n"]}, {"source_sentence": "Collect the details that are associated with Lot 4 product 'Os diversos equipamentos e eventos a realizar e de gestão do Município de Albufeira' 'Vigilância e segurança humana contínua em equipamentos e eventos - VIG/ARD/ARE/COORD - período de 3 meses, todos os dias de forma permanente e ininterrupta, total estimado de 2250H.', with quantity 1, unit UN", "sentences": ["Lote 3:\\nPreço Unitário: 10,00€ (dez euros) /hora\\nPreço Total: 27.540,00€ (vinte sete mil quinhentos e quarenta euros)", "DESCRIÇÃO DE SERVIÇOS LOTE 4\nLocal, Dias e Horários:\n➢ Locais, Dias e horários a indicar (prazo mínimo de 24 horas) pelos serviços da Câmara Municipal de Albufeira ;\nEfetivo e Categoria Profissional :\n➢ O número necessário de efetivo será definido por requisição a emitir pelos competentes serviços da Câmara Municipal de Albufeira nas categorias de Vigilante, Assistente Recinto de Espetáculos, Assistente de Recinto Desportivos e Coordenador;\nValor Hora: €15,00 ( quinze euros)\nTotal de Horas Previsto: 2980 horas (dois mil novecentos e oitenta horas)\nValor Total Previsto: €44 700,00 (quarenta e quatro mil e setecentos euros)", "2202000061 - SUTURA MONOFILAMENYO NÃO ABSORVIVEL POLIAMIDA 0 AG.CILINDRICA,30MM (CART.) 0.78"]}, {"source_sentence": "Collect the details that are associated with Lot 11 product '' 'Mesas apoio (anestesia e circulante)', with quantity 4, unit Subcontracting Unit", "sentences": ["2202000128 - Sutura sint. absorvivel \"Poliglicolico\" multifilamento 5/0 ag. cilindrica 13mm (CART.) 2.2", "Preço\\nLote Artigo Designação Referência € Custo unitário s/ IVA % IVA Quantidade Valor total S/IVA\\n11 Mesa apoio \\n(anestesia e \\ncirculante) Trolley apoio TTAC 3350 690€ 23% 4 2760€\\nTotal 2760€ ", "Trouxa Cesariana 832,00 7,950000 6\\xa0614,400000 23,00\\nLote 10\\nsete euros e noveta e cinco cêntimos\\n3\\n3Teks/Turquia\\nRef. 601.002.SE\\nCDM: 88869431\\nDM Classe Ie\\nNPDM: T020199\\nTotal Ilíquido :Descontos Comerciais :\\nTOTAL EUR56\\xa0642,860000P"]}, {"source_sentence": "Collect the details that are associated with product '' '2202000154 - FIO SUT. POLIPROPI. NÃO ABS. 2/0 MONOF. C/ 2 AG. RECTAS CILIND. 75 MM (CART.)', with quantity 108, unit UN", "sentences": ["| Lote | DENOMINAÇÃO | VALOR MENSAL | VALOR |\n|--------|------------------------------------------------------------------------|--------------|-------------|\n| Lote 1 | Mercados Quinzenais, no Município de Albufeira | 567,00 € | 2 835,00 € |\n| Lote 2 | Portaria Das Instalações Técnicas e Administrativas do Edifício de Vale Pedras | 7 435,80 € | 37 179,00 € |\n| Lote 3 | Portaria Parque de Estacionamento Coberto dos Olhos de Água | 8 262,00 € | 41 310,00 € |\n| Lote 4 | Compreende a Vigilância aos diversos eventos e equipamentos | | 49 021,00 € |\n| TOTAL | | | 130 345,00 € |", "LOTE 8\\n4,0 CARROHPPBV1 SUPORTE RODADO PARA SACO RESÍDUOS GRUPO III/IV 240,000 960,000 23,0\\", "2202000154 - FIO SUT. POLIPROPI. NÃO ABS. 2/0 MONOF. C/ 2 AG. RECTAS CILIND. 75 MM (CART.) 2.65"]}, {"source_sentence": "Collect the details that are associated with product '' 'Macbook Air 13\" com processador M1/M2 e 8 GB de RAM (Telado PT-PT)', with quantity 1, unit UN", "sentences": ["['A Siemens Healthineers é uma empresa líder em inovação, reconhecida pela sua engenharia de ponta, pelo apoio a \\nterapias avançadas, assim como por privilegiar a versatilidade e funcionalidade entre questões clínicas, resultados \\nlaboratoriais precisos e serviços transformadores. Deste modo, maximizamos oportunidades e reduzimos riscos, \\ncontribuindo para a melhoria da excelência clínica, eficiência operacional e rentabilidade.\\nQueremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\\nmundo de possibilidades.\\nJuntos, vamos continuar a construir o futuro da saúde.\\nPág. 52.Proposta Financeira\\n2.1. Posições Relevantes\\nLote 1\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Teste\\nS/ IVA\\n1 122060265 IGE TESTE 7280 3,00 €\\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\\n(\\n(Vinte e um mil, oitocentos e quarenta euros)\\nLote 4\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Total\\nS/ IVA\\n5\\n1 122060125C1 INACTIVADOR FUNCIONAL', 'POSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Teste\\nS/ IVA\\n1 122060265 IGE TESTE 7280 3,00 €\\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\\n(\\n(Vinte e um mil, oitocentos e quarenta euros)\\nLote 4\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Total\\nS/ IVA\\n5\\n1 122060125C1 INACTIVADOR FUNCIONAL\\nQUANTITATIVOTESTE 576 5,00 €\\nPreço Líquido da Proposta para o Lote 4 .................................................................................................... 2.880,00 €\\n(\\n(Dois mil, oitocentos e oitenta euros)\\nPág. 63.Anexos\\n1. Procuração\\nSiemens Healthcare, Unipessoal, Lda.\\nRua Irmãos Siemens, 1-1A\\n2720-093 Amadora\\nPortugal\\nTelf: +351 210 601 800\\[email protected]\\nwww.healthcare.siemens.ptCopyright\\nEsta Proposta e seu conteúdo são propriedade intelectual da\\nSiemens Healthcare, Unipessoal, Lda, na proposta designada\\npor Siemens Healthineers, protegida nos termos do Código do\\nDireito de Autor e dos Direitos Conexos.', 'A Siemens Healthineers é uma empresa líder em inovação, reconhecida pela sua engenharia de ponta, pelo apoio a \\nterapias avançadas, assim como por privilegiar a versatilidade e funcionalidade entre questões clínicas, resultados \\nlaboratoriais precisos e serviços transformadores. Deste modo, maximizamos oportunidades e reduzimos riscos, \\ncontribuindo para a melhoria da excelência clínica, eficiência operacional e rentabilidade.\\nQueremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\\nmundo de possibilidades.\\nJuntos, vamos continuar a construir o futuro da saúde.\\nPág. 52.Proposta Financeira\\n2.1. Posições Relevantes\\nLote 1\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Teste\\nS/ IVA\\n1 122060265 IGE TESTE 7280 3,00 €\\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €', 'A Siemens Healthineers é uma empresa líder em inovação, reconhecida pela sua engenharia de ponta, pelo apoio a \\nterapias avançadas, assim como por privilegiar a versatilidade e funcionalidade entre questões clínicas, resultados \\nlaboratoriais precisos e serviços transformadores. Deste modo, maximizamos oportunidades e reduzimos riscos, \\ncontribuindo para a melhoria da excelência clínica, eficiência operacional e rentabilidade.\\nQueremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\\nmundo de possibilidades.\\nJuntos, vamos continuar a construir o futuro da saúde.\\nPág. 52.Proposta Financeira\\n2.1. Posições Relevantes\\nLote 1\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Teste\\nS/ IVA\\n1 122060265 IGE TESTE 7280 3,00 €\\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\\n(\\n(Vinte e um mil, oitocentos e quarenta euros)\\nLote 4\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Total\\nS/ IVA\\n5\\n1 122060125C1 INACTIVADOR FUNCIONAL\\nQUANTITATIVOTESTE 576 5,00 €\\nPreço Líquido da Proposta para o Lote 4 .................................................................................................... 2.880,00 €', 'Queremos continuar a colocar todas estas capacidades e os nossos conhecimentos a nível clínico e técnico ao serviço de\\nparceiros e clientes, para que sejamos o apoio que precisam para alcançar todos os seus objetivos. Sabemos que podemos\\nir cada vez mais longe, mas sabemos também que muitas vezes um caminho percorrido em conjunto oferece todo um novo\\nmundo de possibilidades.\\nJuntos, vamos continuar a construir o futuro da saúde.\\nPág. 52.Proposta Financeira\\n2.1. Posições Relevantes\\nLote 1\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Teste\\nS/ IVA\\n1 122060265 IGE TESTE 7280 3,00 €\\nPreço Líquido da Proposta para o Lote 1 ...................................................................................................21.840,00 €\\n(\\n(Vinte e um mil, oitocentos e quarenta euros)\\nLote 4\\nPOSIÇÃO CÓDIGO DESIGNAÇÃO UnidadeESTIMATIVA\\nANUALPreço Total\\nS/ IVA\\n5\\n1 122060125C1 INACTIVADOR FUNCIONAL\\nQUANTITATIVOTESTE 576 5,00 €\\nPreço Líquido da Proposta para o Lote 4 .................................................................................................... 2.880,00 €\\n(\\n(Dois mil, oitocentos e oitenta euros)\\nPág. 63.Anexos']", "LOTE 5\n1 MESA APOIO MESA DE APOIO EM INOX AISI 304 2,0 279,000 23,0 558,000\nMesa com 4 rodas , 2 com travão\nTabuleiro inferior\nDimens: C 700 x L 500 x A 800mm\nPrateleira inferior - profundidade 250mm\nFabrico Nacional e por medida\nTotal do do lote 5: 558,00€ Quinhentos e cinquenta e oito euros", "Apresenta -se de seguida a configuração financeira para a fornecimento dos produtos \\nrequeridos , mediante opções por cor e diferentes características:\\nNOTA: Valores válidos até 23 de Fevereiro e mediante adjudicação de 2 ou mais \\nequipamentos portáteis (excluindo Teclado)\\nPART-NUMBER QTD. DESCRIÇÃOVALOR\\nUNITÁRIOVALOR\\nTOTAL\\nMLY03PO/A 1Apple Macbook AIR 13,6\" (Disco 512GB SSD; 10 core) 1 545,08 € 1 545,08 €\\nMLXY3PO/A 1Apple Macbook AIR 13,6\" (Disco 256GB SSD, 8 core) 1 227,48 € 1 227,48 €"]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,437
YakovElm/IntelDAOS5SetFitModel_Train_balance_ratio_4
YakovElm
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-06-10T05:44:22Z
2023-06-10T05:44:58+00:00
8
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # YakovElm/IntelDAOS5SetFitModel_Train_balance_ratio_4 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/IntelDAOS5SetFitModel_Train_balance_ratio_4") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# YakovElm/IntelDAOS5SetFitModel_Train_balance_ratio_4 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/IntelDAOS5SetFitModel_Train_balance_ratio_4") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
41,438
BhargaviCH/bch-finetuning-sentiment-model-3000-samples
BhargaviCH
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-12T12:04:44Z
2023-07-12T14:05:59+00:00
8
0
--- datasets: - imdb license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: bch-finetuning-sentiment-model-3000-samples results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.88 name: Accuracy - type: f1 value: 0.8838709677419355 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bch-finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.7273 - Accuracy: 0.88 - F1: 0.8839 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bch-finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.7273 - Accuracy: 0.88 - F1: 0.8839 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
{"datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "bch-finetuning-sentiment-model-3000-samples", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.88, "name": "Accuracy"}, {"type": "f1", "value": 0.8838709677419355, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,440
gaudi/opus-mt-de-ht-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-17T15:24:26Z
2024-10-18T23:53:53+00:00
6
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-de-ht) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-de-ht).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-de-ht --output_dir ./ctranslate2/opus-mt-de-ht-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-de-ht-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-de-ht-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-de-ht-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-de-ht) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-de-ht) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-de-ht).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-de-ht --output_dir ./ctranslate2/opus-mt-de-ht-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-de-ht-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-de-ht-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-de-ht-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-de-ht) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
41,441
juanpablomesa/all-mpnet-base-v2-bioasq-1epoch-batch32-100steps
juanpablomesa
sentence-similarity
[ "sentence-transformers", "safetensors", "mpnet", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:4012", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:sentence-transformers/all-mpnet-base-v2", "base_model:finetune:sentence-transformers/all-mpnet-base-v2", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-02T19:25:02Z
2024-07-02T19:25:18+00:00
47
0
--- base_model: sentence-transformers/all-mpnet-base-v2 datasets: [] language: [] library_name: sentence-transformers metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:4012 - loss:MultipleNegativesRankingLoss widget: - source_sentence: 'Extensive messenger RNA editing generates transcript and protein diversity in genes involved in neural excitability, as previously described, as well as in genes participating in a broad range of other cellular functions. ' sentences: - Do cephalopods use RNA editing less frequently than other species? - GV1001 vaccine targets which enzyme? - Which event results in the acetylation of S6K1? - source_sentence: Yes, exposure to household furry pets influences the gut microbiota of infants. sentences: - Can pets affect infant microbiomed? - What is the mode of action of Thiazovivin? - What are the effects of CAMK4 inhibition? - source_sentence: "In children with heart failure evidence of the effect of enalapril\ \ is empirical. Enalapril was clinically safe and effective in 50% to 80% of for\ \ children with cardiac failure secondary to congenital heart malformations before\ \ and after cardiac surgery, impaired ventricular function , valvar regurgitation,\ \ congestive cardiomyopathy, , arterial hypertension, life-threatening arrhythmias\ \ coexisting with circulatory insufficiency. \nACE inhibitors have shown a transient\ \ beneficial effect on heart failure due to anticancer drugs and possibly a beneficial\ \ effect in muscular dystrophy-associated cardiomyopathy, which deserves further\ \ studies." sentences: - Which receptors can be evaluated with the [18F]altanserin? - In what proportion of children with heart failure has Enalapril been shown to be safe and effective? - Which major signaling pathways are regulated by RIP1? - source_sentence: Cellular senescence-associated heterochromatic foci (SAHFS) are a novel type of chromatin condensation involving alterations of linker histone H1 and linker DNA-binding proteins. SAHFS can be formed by a variety of cell types, but their mechanism of action remains unclear. sentences: - What is the relationship between the X chromosome and a neutrophil drumstick? - Which microRNAs are involved in exercise adaptation? - How are SAHFS created? - source_sentence: Multicluster Pcdh diversity is required for mouse olfactory neural circuit assembly. The vertebrate clustered protocadherin (Pcdh) cell surface proteins are encoded by three closely linked gene clusters (Pcdhα, Pcdhβ, and Pcdhγ). Although deletion of individual Pcdh clusters had subtle phenotypic consequences, the loss of all three clusters (tricluster deletion) led to a severe axonal arborization defect and loss of self-avoidance. sentences: - What are the effects of the deletion of all three Pcdh clusters (tricluster deletion) in mice? - what is the role of MEF-2 in cardiomyocyte differentiation? - How many periods of regulatory innovation led to the evolution of vertebrates? model-index: - name: SentenceTransformer based on sentence-transformers/all-mpnet-base-v2 results: - task: type: information-retrieval name: Information Retrieval dataset: name: sentence transformers/all mpnet base v2 type: sentence-transformers/all-mpnet-base-v2 metrics: - type: cosine_accuracy@1 value: 0.8486562942008486 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.9363507779349364 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.9476661951909476 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.958981612446959 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.8486562942008486 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.31211692597831214 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.1895332390381895 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09589816124469587 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.8486562942008486 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.9363507779349364 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.9476661951909476 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.958981612446959 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.9104527449456198 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.894245751105723 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.8956968198991456 name: Cosine Map@100 --- # SentenceTransformer based on sentence-transformers/all-mpnet-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) <!-- at revision 84f2bcc00d77236f9e89c8a360a00fb1139bf47d --> - **Maximum Sequence Length:** 384 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("juanpablomesa/all-mpnet-base-v2-bioasq-1epoch-batch32-100steps") # Run inference sentences = [ 'Multicluster Pcdh diversity is required for mouse olfactory neural circuit assembly. The vertebrate clustered protocadherin (Pcdh) cell surface proteins are encoded by three closely linked gene clusters (Pcdhα, Pcdhβ, and Pcdhγ). Although deletion of individual Pcdh clusters had subtle phenotypic consequences, the loss of all three clusters (tricluster deletion) led to a severe axonal arborization defect and loss of self-avoidance.', 'What are the effects of the deletion of all three Pcdh clusters (tricluster deletion) in mice?', 'How many periods of regulatory innovation led to the evolution of vertebrates?', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `sentence-transformers/all-mpnet-base-v2` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8487 | | cosine_accuracy@3 | 0.9364 | | cosine_accuracy@5 | 0.9477 | | cosine_accuracy@10 | 0.959 | | cosine_precision@1 | 0.8487 | | cosine_precision@3 | 0.3121 | | cosine_precision@5 | 0.1895 | | cosine_precision@10 | 0.0959 | | cosine_recall@1 | 0.8487 | | cosine_recall@3 | 0.9364 | | cosine_recall@5 | 0.9477 | | cosine_recall@10 | 0.959 | | cosine_ndcg@10 | 0.9105 | | cosine_mrr@10 | 0.8942 | | **cosine_map@100** | **0.8957** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 4,012 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 3 tokens</li><li>mean: 63.14 tokens</li><li>max: 384 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 16.13 tokens</li><li>max: 49 tokens</li></ul> | * Samples: | positive | anchor | |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------| | <code>Aberrant patterns of H3K4, H3K9, and H3K27 histone lysine methylation were shown to result in histone code alterations, which induce changes in gene expression, and affect the proliferation rate of cells in medulloblastoma.</code> | <code>What is the implication of histone lysine methylation in medulloblastoma?</code> | | <code>STAG1/STAG2 proteins are tumour suppressor proteins that suppress cell proliferation and are essential for differentiation.</code> | <code>What is the role of STAG1/STAG2 proteins in differentiation?</code> | | <code>The association between cell phone use and incident glioblastoma remains unclear. Some studies have reported that cell phone use was associated with incident glioblastoma, and with reduced survival of patients diagnosed with glioblastoma. However, other studies have repeatedly replicated to find an association between cell phone use and glioblastoma.</code> | <code>What is the association between cell phone use and glioblastoma?</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 - `fp16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | sentence-transformers/all-mpnet-base-v2_cosine_map@100 | |:------:|:----:|:-------------:|:------------------------------------------------------:| | 0 | 0 | - | 0.8367 | | 0.7937 | 100 | 0.1153 | 0.8957 | ### Framework Versions - Python: 3.11.5 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.1.2+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
BioNLP
# SentenceTransformer based on sentence-transformers/all-mpnet-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) <!-- at revision 84f2bcc00d77236f9e89c8a360a00fb1139bf47d --> - **Maximum Sequence Length:** 384 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("juanpablomesa/all-mpnet-base-v2-bioasq-1epoch-batch32-100steps") # Run inference sentences = [ 'Multicluster Pcdh diversity is required for mouse olfactory neural circuit assembly. The vertebrate clustered protocadherin (Pcdh) cell surface proteins are encoded by three closely linked gene clusters (Pcdhα, Pcdhβ, and Pcdhγ). Although deletion of individual Pcdh clusters had subtle phenotypic consequences, the loss of all three clusters (tricluster deletion) led to a severe axonal arborization defect and loss of self-avoidance.', 'What are the effects of the deletion of all three Pcdh clusters (tricluster deletion) in mice?', 'How many periods of regulatory innovation led to the evolution of vertebrates?', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `sentence-transformers/all-mpnet-base-v2` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.8487 | | cosine_accuracy@3 | 0.9364 | | cosine_accuracy@5 | 0.9477 | | cosine_accuracy@10 | 0.959 | | cosine_precision@1 | 0.8487 | | cosine_precision@3 | 0.3121 | | cosine_precision@5 | 0.1895 | | cosine_precision@10 | 0.0959 | | cosine_recall@1 | 0.8487 | | cosine_recall@3 | 0.9364 | | cosine_recall@5 | 0.9477 | | cosine_recall@10 | 0.959 | | cosine_ndcg@10 | 0.9105 | | cosine_mrr@10 | 0.8942 | | **cosine_map@100** | **0.8957** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 4,012 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 3 tokens</li><li>mean: 63.14 tokens</li><li>max: 384 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 16.13 tokens</li><li>max: 49 tokens</li></ul> | * Samples: | positive | anchor | |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------| | <code>Aberrant patterns of H3K4, H3K9, and H3K27 histone lysine methylation were shown to result in histone code alterations, which induce changes in gene expression, and affect the proliferation rate of cells in medulloblastoma.</code> | <code>What is the implication of histone lysine methylation in medulloblastoma?</code> | | <code>STAG1/STAG2 proteins are tumour suppressor proteins that suppress cell proliferation and are essential for differentiation.</code> | <code>What is the role of STAG1/STAG2 proteins in differentiation?</code> | | <code>The association between cell phone use and incident glioblastoma remains unclear. Some studies have reported that cell phone use was associated with incident glioblastoma, and with reduced survival of patients diagnosed with glioblastoma. However, other studies have repeatedly replicated to find an association between cell phone use and glioblastoma.</code> | <code>What is the association between cell phone use and glioblastoma?</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 - `fp16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | sentence-transformers/all-mpnet-base-v2_cosine_map@100 | |:------:|:----:|:-------------:|:------------------------------------------------------:| | 0 | 0 | - | 0.8367 | | 0.7937 | 100 | 0.1153 | 0.8957 | ### Framework Versions - Python: 3.11.5 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.1.2+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/all-mpnet-base-v2", "datasets": [], "language": [], "library_name": "sentence-transformers", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:4012", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Extensive messenger RNA editing generates transcript and protein diversity in genes involved in neural excitability, as previously described, as well as in genes participating in a broad range of other cellular functions. ", "sentences": ["Do cephalopods use RNA editing less frequently than other species?", "GV1001 vaccine targets which enzyme?", "Which event results in the acetylation of S6K1?"]}, {"source_sentence": "Yes, exposure to household furry pets influences the gut microbiota of infants.", "sentences": ["Can pets affect infant microbiomed?", "What is the mode of action of Thiazovivin?", "What are the effects of CAMK4 inhibition?"]}, {"source_sentence": "In children with heart failure evidence of the effect of enalapril is empirical. Enalapril was clinically safe and effective in 50% to 80% of for children with cardiac failure secondary to congenital heart malformations before and after cardiac surgery, impaired ventricular function , valvar regurgitation, congestive cardiomyopathy, , arterial hypertension, life-threatening arrhythmias coexisting with circulatory insufficiency. \nACE inhibitors have shown a transient beneficial effect on heart failure due to anticancer drugs and possibly a beneficial effect in muscular dystrophy-associated cardiomyopathy, which deserves further studies.", "sentences": ["Which receptors can be evaluated with the [18F]altanserin?", "In what proportion of children with heart failure has Enalapril been shown to be safe and effective?", "Which major signaling pathways are regulated by RIP1?"]}, {"source_sentence": "Cellular senescence-associated heterochromatic foci (SAHFS) are a novel type of chromatin condensation involving alterations of linker histone H1 and linker DNA-binding proteins. SAHFS can be formed by a variety of cell types, but their mechanism of action remains unclear.", "sentences": ["What is the relationship between the X chromosome and a neutrophil drumstick?", "Which microRNAs are involved in exercise adaptation?", "How are SAHFS created?"]}, {"source_sentence": "Multicluster Pcdh diversity is required for mouse olfactory neural circuit assembly. The vertebrate clustered protocadherin (Pcdh) cell surface proteins are encoded by three closely linked gene clusters (Pcdhα, Pcdhβ, and Pcdhγ). Although deletion of individual Pcdh clusters had subtle phenotypic consequences, the loss of all three clusters (tricluster deletion) led to a severe axonal arborization defect and loss of self-avoidance.", "sentences": ["What are the effects of the deletion of all three Pcdh clusters (tricluster deletion) in mice?", "what is the role of MEF-2 in cardiomyocyte differentiation?", "How many periods of regulatory innovation led to the evolution of vertebrates?"]}], "model-index": [{"name": "SentenceTransformer based on sentence-transformers/all-mpnet-base-v2", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "sentence transformers/all mpnet base v2", "type": "sentence-transformers/all-mpnet-base-v2"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.8486562942008486, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.9363507779349364, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.9476661951909476, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.958981612446959, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.8486562942008486, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.31211692597831214, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.1895332390381895, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09589816124469587, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.8486562942008486, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.9363507779349364, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.9476661951909476, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.958981612446959, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.9104527449456198, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.894245751105723, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.8956968198991456, "name": "Cosine Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,442
TransferGraph/connectivity_cola_6ep_ft-10-finetuned-lora-tweet_eval_emotion
TransferGraph
text-classification
[ "peft", "safetensors", "parquet", "text-classification", "dataset:tweet_eval", "base_model:connectivity/cola_6ep_ft-10", "base_model:adapter:connectivity/cola_6ep_ft-10", "model-index", "region:us" ]
2024-02-29T12:49:35Z
2024-02-29T12:49:38+00:00
0
0
--- base_model: connectivity/cola_6ep_ft-10 datasets: - tweet_eval library_name: peft metrics: - accuracy tags: - parquet - text-classification model-index: - name: connectivity_cola_6ep_ft-10-finetuned-lora-tweet_eval_emotion results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: tweet_eval config: emotion split: validation args: emotion metrics: - type: accuracy value: 0.7352941176470589 name: accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # connectivity_cola_6ep_ft-10-finetuned-lora-tweet_eval_emotion This model is a fine-tuned version of [connectivity/cola_6ep_ft-10](https://huggingface.co/connectivity/cola_6ep_ft-10) on the tweet_eval dataset. It achieves the following results on the evaluation set: - accuracy: 0.7353 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.3797 | None | 0 | | 0.4519 | 1.2465 | 0 | | 0.6791 | 1.0440 | 1 | | 0.7273 | 0.8582 | 2 | | 0.7353 | 0.8048 | 3 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # connectivity_cola_6ep_ft-10-finetuned-lora-tweet_eval_emotion This model is a fine-tuned version of [connectivity/cola_6ep_ft-10](https://huggingface.co/connectivity/cola_6ep_ft-10) on the tweet_eval dataset. It achieves the following results on the evaluation set: - accuracy: 0.7353 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.3797 | None | 0 | | 0.4519 | 1.2465 | 0 | | 0.6791 | 1.0440 | 1 | | 0.7273 | 0.8582 | 2 | | 0.7353 | 0.8048 | 3 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
{"base_model": "connectivity/cola_6ep_ft-10", "datasets": ["tweet_eval"], "library_name": "peft", "metrics": ["accuracy"], "tags": ["parquet", "text-classification"], "model-index": [{"name": "connectivity_cola_6ep_ft-10-finetuned-lora-tweet_eval_emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "tweet_eval", "type": "tweet_eval", "config": "emotion", "split": "validation", "args": "emotion"}, "metrics": [{"type": "accuracy", "value": 0.7352941176470589, "name": "accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,443
lmstudio-community/wavecoder-ultra-6.7b-GGUF
lmstudio-community
text-generation
[ "transformers", "gguf", "code", "text-generation", "dataset:humaneval", "arxiv:2312.14187", "license:other", "endpoints_compatible", "region:us" ]
2024-04-16T16:26:54Z
2024-04-16T16:56:24+00:00
848
11
--- datasets: - humaneval library_name: transformers license: other license_name: deepseek metrics: - code_eval pipeline_tag: text-generation tags: - code quantized_by: bartowski lm_studio: param_count: 6.7b use_case: coding release_date: 15-04-2024 model_creator: microsoft prompt_template: alpaca system_prompt: Below is an instruction that describes a task. Write a response that appropriately completes the request. base_model: DeepseekCoder original_repo: microsoft/wavecoder-ultra-6.7b --- ## 💫 Community Model> wavecoder-ultra-6.7b by Microsoft *👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)*. **Model creator:** [Microsoft](https://huggingface.co/microsoft)<br> **Original model**: [wavecoder-ultra-6.7b](https://huggingface.co/microsoft/wavecoder-ultra-6.7b)<br> **GGUF quantization:** provided by [bartowski](https://huggingface.co/bartowski) based on `llama.cpp` release [b2675](https://github.com/ggerganov/llama.cpp/releases/tag/b2675)<br> ## Model Summary: WaveCoder ultra is a coding model created with 'Widepread And Versatile Enhanced' instruction tuning. It has exceptional generalization ability across different code-related tasks and has a high efficiency in generation.<br> This model should be used exclusively for coding, and will follow instructions for code generation. ## Prompt Template: Choose the `Alpaca` preset in your LM Studio. Under the hood, the model will see a prompt that's formatted like so: ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` ## Use case and examples WaveCoder ultra is fine tuned for code-related instruction following tasks, including code generation, summarization, repair, and translation. ## Code Generation ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/X-MeWq5Aa0MTYUhEv2C4F.png) ## Code Summarization ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/5C_rfhuB1SWXj7t4CrTmO.png) ## Code Repair ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/EXJPvD3kdpMuqHb0mF0HG.png) ## Code Translation ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/FZroxLvNUVPlydyJlc0lU.png) ## Technical Details The WaveCoder series of models is the result of a 'Widespread And Versatile Enchanced' (WAVE) instruction tuning with a highly refined dataset. Their 'CodeOcean' consists of 20,000 instruction instances across the 4 code-related tasks (generation, summarization, repair, translation) with instructions generated by GPT-3.5-turbo. To create this dataset, the team used existing raw code from GitHub CodeSearchNet, filtering for quality and diversity, then used a 'novel LLM-based Generator-Discriminator Framework' which involves generating supervised instruction data from the unsupervised open source code. For further details and benchmarks, check out their arXiv paper [here](https://arxiv.org/abs/2312.14187) ## Special thanks 🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) for making all of this possible. 🙏 Special thanks to [Kalomaze](https://github.com/kalomaze) for his dataset (linked [here](https://github.com/ggerganov/llama.cpp/discussions/5263)) that was used for calculating the imatrix for these quants, which improves the overall quality! ## Disclaimers LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
null
Non_BioNLP
## 💫 Community Model> wavecoder-ultra-6.7b by Microsoft *👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)*. **Model creator:** [Microsoft](https://huggingface.co/microsoft)<br> **Original model**: [wavecoder-ultra-6.7b](https://huggingface.co/microsoft/wavecoder-ultra-6.7b)<br> **GGUF quantization:** provided by [bartowski](https://huggingface.co/bartowski) based on `llama.cpp` release [b2675](https://github.com/ggerganov/llama.cpp/releases/tag/b2675)<br> ## Model Summary: WaveCoder ultra is a coding model created with 'Widepread And Versatile Enhanced' instruction tuning. It has exceptional generalization ability across different code-related tasks and has a high efficiency in generation.<br> This model should be used exclusively for coding, and will follow instructions for code generation. ## Prompt Template: Choose the `Alpaca` preset in your LM Studio. Under the hood, the model will see a prompt that's formatted like so: ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` ## Use case and examples WaveCoder ultra is fine tuned for code-related instruction following tasks, including code generation, summarization, repair, and translation. ## Code Generation ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/X-MeWq5Aa0MTYUhEv2C4F.png) ## Code Summarization ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/5C_rfhuB1SWXj7t4CrTmO.png) ## Code Repair ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/EXJPvD3kdpMuqHb0mF0HG.png) ## Code Translation ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6435718aaaef013d1aec3b8b/FZroxLvNUVPlydyJlc0lU.png) ## Technical Details The WaveCoder series of models is the result of a 'Widespread And Versatile Enchanced' (WAVE) instruction tuning with a highly refined dataset. Their 'CodeOcean' consists of 20,000 instruction instances across the 4 code-related tasks (generation, summarization, repair, translation) with instructions generated by GPT-3.5-turbo. To create this dataset, the team used existing raw code from GitHub CodeSearchNet, filtering for quality and diversity, then used a 'novel LLM-based Generator-Discriminator Framework' which involves generating supervised instruction data from the unsupervised open source code. For further details and benchmarks, check out their arXiv paper [here](https://arxiv.org/abs/2312.14187) ## Special thanks 🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) for making all of this possible. 🙏 Special thanks to [Kalomaze](https://github.com/kalomaze) for his dataset (linked [here](https://github.com/ggerganov/llama.cpp/discussions/5263)) that was used for calculating the imatrix for these quants, which improves the overall quality! ## Disclaimers LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
{"datasets": ["humaneval"], "library_name": "transformers", "license": "other", "license_name": "deepseek", "metrics": ["code_eval"], "pipeline_tag": "text-generation", "tags": ["code"], "quantized_by": "bartowski", "lm_studio": {"param_count": "6.7b", "use_case": "coding", "release_date": "15-04-2024", "model_creator": "microsoft", "prompt_template": "alpaca", "system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.", "base_model": "DeepseekCoder", "original_repo": "microsoft/wavecoder-ultra-6.7b"}}
task
[ "TRANSLATION", "SUMMARIZATION" ]
41,444
joyebright/ICLviaQE
joyebright
null
[ "pytorch", "xlm-roberta", "region:us" ]
2024-09-18T11:00:01Z
2024-09-18T19:12:46+00:00
7
0
--- {} --- The QE model used in our paper: Guiding In-Context Learning of LLMs through Quality Estimation for Machine Translation
null
Non_BioNLP
The QE model used in our paper: Guiding In-Context Learning of LLMs through Quality Estimation for Machine Translation
{}
task
[ "TRANSLATION" ]
41,445
madhavsankar/qp-mscoco-sbert-lr5e-5
madhavsankar
text-classification
[ "transformers", "pytorch", "electra", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-12-07T15:44:11Z
2022-12-12T21:13:55+00:00
118
0
--- {} --- # QP ``` Dataset: MSCOCO Learning Rate: 5e-5 ``` ## Text Diversity Metrics ``` Semantic Similarity: SBERT Syntactic Diversity: Dependency Parse Tree edit distance Lexical Diversity: Character-level edit distance Phonological Diversity: Rhythmic Diversity Morphological Diversity: POS edit distance. ``` ## Results ``` Train Loss (MSE): 0.0127 Dev Loss (MSE): 0.0136 ```
null
Non_BioNLP
# QP ``` Dataset: MSCOCO Learning Rate: 5e-5 ``` ## Text Diversity Metrics ``` Semantic Similarity: SBERT Syntactic Diversity: Dependency Parse Tree edit distance Lexical Diversity: Character-level edit distance Phonological Diversity: Rhythmic Diversity Morphological Diversity: POS edit distance. ``` ## Results ``` Train Loss (MSE): 0.0127 Dev Loss (MSE): 0.0136 ```
{}
task
[ "SEMANTIC_SIMILARITY" ]
41,446
Ejafa/llama_13B
Ejafa
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-04-18T03:39:12Z
2023-04-18T03:52:23+00:00
26
1
--- license: other --- This LLaMA version resolves the EOS token issues. This is under a special license, please see the LICENSE file for details. This contains the weights for the LLaMA-7b model. This model is under a non-commercial license (see the LICENSE file). You should only use this repository if you have been granted access to the model by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form) but either lost your copy of the weights or got some trouble converting them to the Transformers format. # LLaMA Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** LLaMA was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about LLaMA can be sent via the [GitHub repository](https://github.com/facebookresearch/llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >LLaMA</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of LLama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>LLaMA</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of LLama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | LLaMA Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
null
Non_BioNLP
This LLaMA version resolves the EOS token issues. This is under a special license, please see the LICENSE file for details. This contains the weights for the LLaMA-7b model. This model is under a non-commercial license (see the LICENSE file). You should only use this repository if you have been granted access to the model by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form) but either lost your copy of the weights or got some trouble converting them to the Transformers format. # LLaMA Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** LLaMA was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** LLaMA is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “LLaMA, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about LLaMA can be sent via the [GitHub repository](https://github.com/facebookresearch/llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** LLaMA is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >LLaMA</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of LLama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>LLaMA</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of LLama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | LLaMA Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** LLaMA is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
{"license": "other"}
task
[ "QUESTION_ANSWERING" ]
41,447
TamalDeFrijol/CompararTextos
TamalDeFrijol
text-classification
[ "transformers", "pytorch", "roberta", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:distilbert/distilroberta-base", "base_model:finetune:distilbert/distilroberta-base", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-09-20T20:53:26Z
2023-09-22T06:28:23+00:00
19
0
--- base_model: distilroberta-base datasets: - glue license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: CompararTextos results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: mrpc split: validation args: mrpc metrics: - type: accuracy value: 0.8137254901960784 name: Accuracy - type: f1 value: 0.8745874587458745 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CompararTextos This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8485 - Accuracy: 0.8137 - F1: 0.8746 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5557 | 1.09 | 500 | 0.6682 | 0.8211 | 0.8773 | | 0.3543 | 2.18 | 1000 | 0.8485 | 0.8137 | 0.8746 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CompararTextos This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8485 - Accuracy: 0.8137 - F1: 0.8746 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5557 | 1.09 | 500 | 0.6682 | 0.8211 | 0.8773 | | 0.3543 | 2.18 | 1000 | 0.8485 | 0.8137 | 0.8746 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
{"base_model": "distilroberta-base", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "CompararTextos", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "config": "mrpc", "split": "validation", "args": "mrpc"}, "metrics": [{"type": "accuracy", "value": 0.8137254901960784, "name": "Accuracy"}, {"type": "f1", "value": 0.8745874587458745, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,448
wwydmanski/specter2_pubmed-v0.7
wwydmanski
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:57566", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:allenai/specter2_base", "base_model:finetune:allenai/specter2_base", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2025-03-05T10:12:19Z
2025-03-05T19:13:09+00:00
105
0
--- base_model: allenai/specter2_base library_name: sentence-transformers metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:57566 - loss:MultipleNegativesRankingLoss widget: - source_sentence: Cannabis evolution sentences: - 'The cannabis conundrum. ' - 'Dawn and decline of the holy smoke. ' - '[Computer-assisted system for interstitial hyperthermia]. ' - source_sentence: Lateral Ventricle AT/RT sentences: - 'Improved Assessment of Pathological Regurgitation in Patients with Prosthetic Heart Valves by Multiplane Transesophageal Echocardiography. ' - '[Surgical anatomy of the lateral ventricles]. ' - 'Lateral Ventricle Atypical Teratoid/Rhabdoid Tumor (AT/RT): Case Report and Review of Literature. ' - source_sentence: Parkinsonian motor fluctuations sentences: - 'Basic mechanisms of motor fluctuations. ' - 'Nonmotor Fluctuations in Parkinson''s Disease. ' - 'Sodium conductance in calcium channels of single smooth muscle cells of guinea-pig taenia caeci. ' - source_sentence: Phagocytic Assay sentences: - 'Assay for phagocytosis. ' - 'Opsonophagocytic assay. ' - 'Clinical evaluation of synthetic aperture sequential beamforming ultrasound in patients with liver tumors. ' - source_sentence: Content validity assessment sentences: - 'Content validity is naught. ' - 'Male requires a higher median target effect-site concentration of propofol for I-gel placement when combined with dexmedetomidine. ' - 'Establishing content-validity of a disease-specific health-related quality of life instrument for patients with chronic hypersensitivity pneumonitis. ' model-index: - name: SentenceTransformer based on allenai/specter2_base results: - task: type: information-retrieval name: Information Retrieval dataset: name: NanoNQ type: NanoNQ metrics: - type: cosine_accuracy@1 value: 0.04 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.2 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.22 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.3 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.04 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.06666666666666667 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.044000000000000004 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.03 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.03 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.18 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.2 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.27 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.15735897323110787 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.13194444444444445 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.13092350353731416 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: NanoMSMARCO type: NanoMSMARCO metrics: - type: cosine_accuracy@1 value: 0.2 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.36 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.42 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.52 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.2 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.12 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.084 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.052000000000000005 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.2 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.36 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.42 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.52 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.35375176104312445 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.30138095238095236 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.31610409814616347 name: Cosine Map@100 - task: type: nano-beir name: Nano BEIR dataset: name: NanoBEIR mean type: NanoBEIR_mean metrics: - type: cosine_accuracy@1 value: 0.12000000000000001 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.28 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.32 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.41000000000000003 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.12000000000000001 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.09333333333333332 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.064 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.041 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.115 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.27 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.31 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.395 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.25555536713711613 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.21666269841269842 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.22351380084173883 name: Cosine Map@100 --- # SentenceTransformer based on allenai/specter2_base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [allenai/specter2_base](https://huggingface.co/allenai/specter2_base) on the json dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [allenai/specter2_base](https://huggingface.co/allenai/specter2_base) <!-- at revision 3447645e1def9117997203454fa4495937bfbd83 --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - json <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: PeftModelForFeatureExtraction (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("sentence_transformers_model_id") # Run inference sentences = [ 'Content validity assessment', 'Establishing content-validity of a disease-specific health-related quality of life instrument for patients with chronic hypersensitivity pneumonitis. ', 'Content validity is naught. ', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Datasets: `NanoNQ` and `NanoMSMARCO` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | NanoNQ | NanoMSMARCO | |:--------------------|:-----------|:------------| | cosine_accuracy@1 | 0.04 | 0.2 | | cosine_accuracy@3 | 0.2 | 0.36 | | cosine_accuracy@5 | 0.22 | 0.42 | | cosine_accuracy@10 | 0.3 | 0.52 | | cosine_precision@1 | 0.04 | 0.2 | | cosine_precision@3 | 0.0667 | 0.12 | | cosine_precision@5 | 0.044 | 0.084 | | cosine_precision@10 | 0.03 | 0.052 | | cosine_recall@1 | 0.03 | 0.2 | | cosine_recall@3 | 0.18 | 0.36 | | cosine_recall@5 | 0.2 | 0.42 | | cosine_recall@10 | 0.27 | 0.52 | | **cosine_ndcg@10** | **0.1574** | **0.3538** | | cosine_mrr@10 | 0.1319 | 0.3014 | | cosine_map@100 | 0.1309 | 0.3161 | #### Nano BEIR * Dataset: `NanoBEIR_mean` * Evaluated with [<code>NanoBEIREvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.NanoBEIREvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.12 | | cosine_accuracy@3 | 0.28 | | cosine_accuracy@5 | 0.32 | | cosine_accuracy@10 | 0.41 | | cosine_precision@1 | 0.12 | | cosine_precision@3 | 0.0933 | | cosine_precision@5 | 0.064 | | cosine_precision@10 | 0.041 | | cosine_recall@1 | 0.115 | | cosine_recall@3 | 0.27 | | cosine_recall@5 | 0.31 | | cosine_recall@10 | 0.395 | | **cosine_ndcg@10** | **0.2556** | | cosine_mrr@10 | 0.2167 | | cosine_map@100 | 0.2235 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### json * Dataset: json * Size: 57,566 training samples * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code> * Approximate statistics based on the first 1000 samples: | | anchor | positive | negative | |:--------|:--------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------------------------| | type | string | string | string | | details | <ul><li>min: 3 tokens</li><li>mean: 7.4 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 19.98 tokens</li><li>max: 78 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 12.3 tokens</li><li>max: 46 tokens</li></ul> | * Samples: | anchor | positive | negative | |:------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------| | <code>neutron camera autofocus</code> | <code>The autofocusing system of the IMAT neutron camera. </code> | <code>Robust autofocusing in microscopy. </code> | | <code>Melanophore-stimulating hormone-melatonin antagonism</code> | <code>Melanophore-stimulating hormone-melatonin antagonism in relation to colour change in Xenopus laevis. </code> | <code>Melanin-concentrating hormone, melanocortin receptors and regulation of luteinizing hormone release. </code> | | <code>Healthcare Reform Criticism</code> | <code>Experts critique doctors' ideas for reforming health care. </code> | <code>Healthcare reform? </code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 64 - `gradient_accumulation_steps`: 8 - `learning_rate`: 3e-05 - `weight_decay`: 0.01 - `num_train_epochs`: 1 - `lr_scheduler_type`: cosine_with_restarts - `warmup_ratio`: 0.1 - `bf16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 8 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 3e-05 - `weight_decay`: 0.01 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: cosine_with_restarts - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | NanoNQ_cosine_ndcg@10 | NanoMSMARCO_cosine_ndcg@10 | NanoBEIR_mean_cosine_ndcg@10 | |:------:|:----:|:-------------:|:---------------------:|:--------------------------:|:----------------------------:| | 0 | 0 | - | 0.0633 | 0.2640 | 0.1636 | | 0.0089 | 1 | 22.3889 | - | - | - | | 0.0178 | 2 | 22.1875 | - | - | - | | 0.0267 | 3 | 21.4657 | - | - | - | | 0.0356 | 4 | 21.7306 | - | - | - | | 0.0444 | 5 | 21.3965 | - | - | - | | 0.0533 | 6 | 21.5539 | - | - | - | | 0.0622 | 7 | 21.5853 | - | - | - | | 0.0711 | 8 | 21.6282 | - | - | - | | 0.08 | 9 | 21.2169 | - | - | - | | 0.0889 | 10 | 21.1228 | - | - | - | | 0.0978 | 11 | 20.7026 | - | - | - | | 0.1067 | 12 | 21.2562 | - | - | - | | 0.1156 | 13 | 21.1227 | - | - | - | | 0.1244 | 14 | 20.6465 | - | - | - | | 0.1333 | 15 | 20.5888 | - | - | - | | 0.1422 | 16 | 20.2334 | - | - | - | | 0.1511 | 17 | 20.6545 | - | - | - | | 0.16 | 18 | 20.2517 | - | - | - | | 0.1689 | 19 | 19.6825 | - | - | - | | 0.1778 | 20 | 19.9251 | - | - | - | | 0.1867 | 21 | 19.6937 | - | - | - | | 0.1956 | 22 | 19.2779 | - | - | - | | 0.2044 | 23 | 19.2927 | - | - | - | | 0.2133 | 24 | 19.2895 | - | - | - | | 0.2222 | 25 | 18.9854 | 0.1085 | 0.2978 | 0.2032 | | 0.2311 | 26 | 18.5096 | - | - | - | | 0.24 | 27 | 18.3789 | - | - | - | | 0.2489 | 28 | 18.2159 | - | - | - | | 0.2578 | 29 | 17.8306 | - | - | - | | 0.2667 | 30 | 17.5964 | - | - | - | | 0.2756 | 31 | 17.2527 | - | - | - | | 0.2844 | 32 | 17.2274 | - | - | - | | 0.2933 | 33 | 17.557 | - | - | - | | 0.3022 | 34 | 17.4682 | - | - | - | | 0.3111 | 35 | 16.9115 | - | - | - | | 0.32 | 36 | 16.9938 | - | - | - | | 0.3289 | 37 | 16.1648 | - | - | - | | 0.3378 | 38 | 16.2908 | - | - | - | | 0.3467 | 39 | 16.7883 | - | - | - | | 0.3556 | 40 | 16.5278 | - | - | - | | 0.3644 | 41 | 15.4466 | - | - | - | | 0.3733 | 42 | 15.3954 | - | - | - | | 0.3822 | 43 | 16.1363 | - | - | - | | 0.3911 | 44 | 14.8857 | - | - | - | | 0.4 | 45 | 15.5596 | - | - | - | | 0.4089 | 46 | 15.6978 | - | - | - | | 0.4178 | 47 | 14.6959 | - | - | - | | 0.4267 | 48 | 15.0677 | - | - | - | | 0.4356 | 49 | 14.4375 | - | - | - | | 0.4444 | 50 | 15.0901 | 0.1348 | 0.3290 | 0.2319 | | 0.4533 | 51 | 13.813 | - | - | - | | 0.4622 | 52 | 14.3135 | - | - | - | | 0.4711 | 53 | 14.9517 | - | - | - | | 0.48 | 54 | 14.0599 | - | - | - | | 0.4889 | 55 | 13.8699 | - | - | - | | 0.4978 | 56 | 14.6277 | - | - | - | | 0.5067 | 57 | 13.3742 | - | - | - | | 0.5156 | 58 | 13.7985 | - | - | - | | 0.5244 | 59 | 13.2972 | - | - | - | | 0.5333 | 60 | 12.9836 | - | - | - | | 0.5422 | 61 | 13.2035 | - | - | - | | 0.5511 | 62 | 13.399 | - | - | - | | 0.56 | 63 | 12.8694 | - | - | - | | 0.5689 | 64 | 12.9775 | - | - | - | | 0.5778 | 65 | 13.5685 | - | - | - | | 0.5867 | 66 | 12.5359 | - | - | - | | 0.5956 | 67 | 12.7989 | - | - | - | | 0.6044 | 68 | 12.2337 | - | - | - | | 0.6133 | 69 | 12.9103 | - | - | - | | 0.6222 | 70 | 12.6319 | - | - | - | | 0.6311 | 71 | 12.3662 | - | - | - | | 0.64 | 72 | 12.4788 | - | - | - | | 0.6489 | 73 | 12.7665 | - | - | - | | 0.6578 | 74 | 12.7189 | - | - | - | | 0.6667 | 75 | 11.6918 | 0.1558 | 0.3619 | 0.2588 | | 0.6756 | 76 | 12.0761 | - | - | - | | 0.6844 | 77 | 12.0588 | - | - | - | | 0.6933 | 78 | 12.1507 | - | - | - | | 0.7022 | 79 | 11.7982 | - | - | - | | 0.7111 | 80 | 12.6278 | - | - | - | | 0.72 | 81 | 12.1629 | - | - | - | | 0.7289 | 82 | 11.9421 | - | - | - | | 0.7378 | 83 | 12.1184 | - | - | - | | 0.7467 | 84 | 11.9142 | - | - | - | | 0.7556 | 85 | 12.1162 | - | - | - | | 0.7644 | 86 | 12.2741 | - | - | - | | 0.7733 | 87 | 11.8835 | - | - | - | | 0.7822 | 88 | 11.8583 | - | - | - | | 0.7911 | 89 | 11.74 | - | - | - | | 0.8 | 90 | 12.0793 | - | - | - | | 0.8089 | 91 | 11.6838 | - | - | - | | 0.8178 | 92 | 11.6922 | - | - | - | | 0.8267 | 93 | 11.9418 | - | - | - | | 0.8356 | 94 | 12.2899 | - | - | - | | 0.8444 | 95 | 12.0957 | - | - | - | | 0.8533 | 96 | 12.0643 | - | - | - | | 0.8622 | 97 | 12.3496 | - | - | - | | 0.8711 | 98 | 12.3521 | - | - | - | | 0.88 | 99 | 11.7082 | - | - | - | | 0.8889 | 100 | 11.6085 | 0.1574 | 0.3538 | 0.2556 | | 0.8978 | 101 | 11.7018 | - | - | - | | 0.9067 | 102 | 11.8227 | - | - | - | | 0.9156 | 103 | 12.5774 | - | - | - | | 0.9244 | 104 | 11.465 | - | - | - | | 0.9333 | 105 | 11.303 | - | - | - | | 0.9422 | 106 | 11.8521 | - | - | - | | 0.9511 | 107 | 11.6083 | - | - | - | | 0.96 | 108 | 12.3972 | - | - | - | | 0.9689 | 109 | 11.6962 | - | - | - | | 0.9778 | 110 | 11.1335 | - | - | - | | 0.9867 | 111 | 12.1325 | - | - | - | | 0.9956 | 112 | 11.7444 | - | - | - | </details> ### Framework Versions - Python: 3.12.3 - Sentence Transformers: 3.3.1 - Transformers: 4.49.0 - PyTorch: 2.5.1 - Accelerate: 1.2.1 - Datasets: 2.19.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
BioNLP
# SentenceTransformer based on allenai/specter2_base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [allenai/specter2_base](https://huggingface.co/allenai/specter2_base) on the json dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [allenai/specter2_base](https://huggingface.co/allenai/specter2_base) <!-- at revision 3447645e1def9117997203454fa4495937bfbd83 --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - json <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: PeftModelForFeatureExtraction (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("sentence_transformers_model_id") # Run inference sentences = [ 'Content validity assessment', 'Establishing content-validity of a disease-specific health-related quality of life instrument for patients with chronic hypersensitivity pneumonitis. ', 'Content validity is naught. ', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Datasets: `NanoNQ` and `NanoMSMARCO` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | NanoNQ | NanoMSMARCO | |:--------------------|:-----------|:------------| | cosine_accuracy@1 | 0.04 | 0.2 | | cosine_accuracy@3 | 0.2 | 0.36 | | cosine_accuracy@5 | 0.22 | 0.42 | | cosine_accuracy@10 | 0.3 | 0.52 | | cosine_precision@1 | 0.04 | 0.2 | | cosine_precision@3 | 0.0667 | 0.12 | | cosine_precision@5 | 0.044 | 0.084 | | cosine_precision@10 | 0.03 | 0.052 | | cosine_recall@1 | 0.03 | 0.2 | | cosine_recall@3 | 0.18 | 0.36 | | cosine_recall@5 | 0.2 | 0.42 | | cosine_recall@10 | 0.27 | 0.52 | | **cosine_ndcg@10** | **0.1574** | **0.3538** | | cosine_mrr@10 | 0.1319 | 0.3014 | | cosine_map@100 | 0.1309 | 0.3161 | #### Nano BEIR * Dataset: `NanoBEIR_mean` * Evaluated with [<code>NanoBEIREvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.NanoBEIREvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.12 | | cosine_accuracy@3 | 0.28 | | cosine_accuracy@5 | 0.32 | | cosine_accuracy@10 | 0.41 | | cosine_precision@1 | 0.12 | | cosine_precision@3 | 0.0933 | | cosine_precision@5 | 0.064 | | cosine_precision@10 | 0.041 | | cosine_recall@1 | 0.115 | | cosine_recall@3 | 0.27 | | cosine_recall@5 | 0.31 | | cosine_recall@10 | 0.395 | | **cosine_ndcg@10** | **0.2556** | | cosine_mrr@10 | 0.2167 | | cosine_map@100 | 0.2235 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### json * Dataset: json * Size: 57,566 training samples * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code> * Approximate statistics based on the first 1000 samples: | | anchor | positive | negative | |:--------|:--------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------------------------| | type | string | string | string | | details | <ul><li>min: 3 tokens</li><li>mean: 7.4 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 19.98 tokens</li><li>max: 78 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 12.3 tokens</li><li>max: 46 tokens</li></ul> | * Samples: | anchor | positive | negative | |:------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------| | <code>neutron camera autofocus</code> | <code>The autofocusing system of the IMAT neutron camera. </code> | <code>Robust autofocusing in microscopy. </code> | | <code>Melanophore-stimulating hormone-melatonin antagonism</code> | <code>Melanophore-stimulating hormone-melatonin antagonism in relation to colour change in Xenopus laevis. </code> | <code>Melanin-concentrating hormone, melanocortin receptors and regulation of luteinizing hormone release. </code> | | <code>Healthcare Reform Criticism</code> | <code>Experts critique doctors' ideas for reforming health care. </code> | <code>Healthcare reform? </code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 64 - `gradient_accumulation_steps`: 8 - `learning_rate`: 3e-05 - `weight_decay`: 0.01 - `num_train_epochs`: 1 - `lr_scheduler_type`: cosine_with_restarts - `warmup_ratio`: 0.1 - `bf16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 8 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 3e-05 - `weight_decay`: 0.01 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: cosine_with_restarts - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | NanoNQ_cosine_ndcg@10 | NanoMSMARCO_cosine_ndcg@10 | NanoBEIR_mean_cosine_ndcg@10 | |:------:|:----:|:-------------:|:---------------------:|:--------------------------:|:----------------------------:| | 0 | 0 | - | 0.0633 | 0.2640 | 0.1636 | | 0.0089 | 1 | 22.3889 | - | - | - | | 0.0178 | 2 | 22.1875 | - | - | - | | 0.0267 | 3 | 21.4657 | - | - | - | | 0.0356 | 4 | 21.7306 | - | - | - | | 0.0444 | 5 | 21.3965 | - | - | - | | 0.0533 | 6 | 21.5539 | - | - | - | | 0.0622 | 7 | 21.5853 | - | - | - | | 0.0711 | 8 | 21.6282 | - | - | - | | 0.08 | 9 | 21.2169 | - | - | - | | 0.0889 | 10 | 21.1228 | - | - | - | | 0.0978 | 11 | 20.7026 | - | - | - | | 0.1067 | 12 | 21.2562 | - | - | - | | 0.1156 | 13 | 21.1227 | - | - | - | | 0.1244 | 14 | 20.6465 | - | - | - | | 0.1333 | 15 | 20.5888 | - | - | - | | 0.1422 | 16 | 20.2334 | - | - | - | | 0.1511 | 17 | 20.6545 | - | - | - | | 0.16 | 18 | 20.2517 | - | - | - | | 0.1689 | 19 | 19.6825 | - | - | - | | 0.1778 | 20 | 19.9251 | - | - | - | | 0.1867 | 21 | 19.6937 | - | - | - | | 0.1956 | 22 | 19.2779 | - | - | - | | 0.2044 | 23 | 19.2927 | - | - | - | | 0.2133 | 24 | 19.2895 | - | - | - | | 0.2222 | 25 | 18.9854 | 0.1085 | 0.2978 | 0.2032 | | 0.2311 | 26 | 18.5096 | - | - | - | | 0.24 | 27 | 18.3789 | - | - | - | | 0.2489 | 28 | 18.2159 | - | - | - | | 0.2578 | 29 | 17.8306 | - | - | - | | 0.2667 | 30 | 17.5964 | - | - | - | | 0.2756 | 31 | 17.2527 | - | - | - | | 0.2844 | 32 | 17.2274 | - | - | - | | 0.2933 | 33 | 17.557 | - | - | - | | 0.3022 | 34 | 17.4682 | - | - | - | | 0.3111 | 35 | 16.9115 | - | - | - | | 0.32 | 36 | 16.9938 | - | - | - | | 0.3289 | 37 | 16.1648 | - | - | - | | 0.3378 | 38 | 16.2908 | - | - | - | | 0.3467 | 39 | 16.7883 | - | - | - | | 0.3556 | 40 | 16.5278 | - | - | - | | 0.3644 | 41 | 15.4466 | - | - | - | | 0.3733 | 42 | 15.3954 | - | - | - | | 0.3822 | 43 | 16.1363 | - | - | - | | 0.3911 | 44 | 14.8857 | - | - | - | | 0.4 | 45 | 15.5596 | - | - | - | | 0.4089 | 46 | 15.6978 | - | - | - | | 0.4178 | 47 | 14.6959 | - | - | - | | 0.4267 | 48 | 15.0677 | - | - | - | | 0.4356 | 49 | 14.4375 | - | - | - | | 0.4444 | 50 | 15.0901 | 0.1348 | 0.3290 | 0.2319 | | 0.4533 | 51 | 13.813 | - | - | - | | 0.4622 | 52 | 14.3135 | - | - | - | | 0.4711 | 53 | 14.9517 | - | - | - | | 0.48 | 54 | 14.0599 | - | - | - | | 0.4889 | 55 | 13.8699 | - | - | - | | 0.4978 | 56 | 14.6277 | - | - | - | | 0.5067 | 57 | 13.3742 | - | - | - | | 0.5156 | 58 | 13.7985 | - | - | - | | 0.5244 | 59 | 13.2972 | - | - | - | | 0.5333 | 60 | 12.9836 | - | - | - | | 0.5422 | 61 | 13.2035 | - | - | - | | 0.5511 | 62 | 13.399 | - | - | - | | 0.56 | 63 | 12.8694 | - | - | - | | 0.5689 | 64 | 12.9775 | - | - | - | | 0.5778 | 65 | 13.5685 | - | - | - | | 0.5867 | 66 | 12.5359 | - | - | - | | 0.5956 | 67 | 12.7989 | - | - | - | | 0.6044 | 68 | 12.2337 | - | - | - | | 0.6133 | 69 | 12.9103 | - | - | - | | 0.6222 | 70 | 12.6319 | - | - | - | | 0.6311 | 71 | 12.3662 | - | - | - | | 0.64 | 72 | 12.4788 | - | - | - | | 0.6489 | 73 | 12.7665 | - | - | - | | 0.6578 | 74 | 12.7189 | - | - | - | | 0.6667 | 75 | 11.6918 | 0.1558 | 0.3619 | 0.2588 | | 0.6756 | 76 | 12.0761 | - | - | - | | 0.6844 | 77 | 12.0588 | - | - | - | | 0.6933 | 78 | 12.1507 | - | - | - | | 0.7022 | 79 | 11.7982 | - | - | - | | 0.7111 | 80 | 12.6278 | - | - | - | | 0.72 | 81 | 12.1629 | - | - | - | | 0.7289 | 82 | 11.9421 | - | - | - | | 0.7378 | 83 | 12.1184 | - | - | - | | 0.7467 | 84 | 11.9142 | - | - | - | | 0.7556 | 85 | 12.1162 | - | - | - | | 0.7644 | 86 | 12.2741 | - | - | - | | 0.7733 | 87 | 11.8835 | - | - | - | | 0.7822 | 88 | 11.8583 | - | - | - | | 0.7911 | 89 | 11.74 | - | - | - | | 0.8 | 90 | 12.0793 | - | - | - | | 0.8089 | 91 | 11.6838 | - | - | - | | 0.8178 | 92 | 11.6922 | - | - | - | | 0.8267 | 93 | 11.9418 | - | - | - | | 0.8356 | 94 | 12.2899 | - | - | - | | 0.8444 | 95 | 12.0957 | - | - | - | | 0.8533 | 96 | 12.0643 | - | - | - | | 0.8622 | 97 | 12.3496 | - | - | - | | 0.8711 | 98 | 12.3521 | - | - | - | | 0.88 | 99 | 11.7082 | - | - | - | | 0.8889 | 100 | 11.6085 | 0.1574 | 0.3538 | 0.2556 | | 0.8978 | 101 | 11.7018 | - | - | - | | 0.9067 | 102 | 11.8227 | - | - | - | | 0.9156 | 103 | 12.5774 | - | - | - | | 0.9244 | 104 | 11.465 | - | - | - | | 0.9333 | 105 | 11.303 | - | - | - | | 0.9422 | 106 | 11.8521 | - | - | - | | 0.9511 | 107 | 11.6083 | - | - | - | | 0.96 | 108 | 12.3972 | - | - | - | | 0.9689 | 109 | 11.6962 | - | - | - | | 0.9778 | 110 | 11.1335 | - | - | - | | 0.9867 | 111 | 12.1325 | - | - | - | | 0.9956 | 112 | 11.7444 | - | - | - | </details> ### Framework Versions - Python: 3.12.3 - Sentence Transformers: 3.3.1 - Transformers: 4.49.0 - PyTorch: 2.5.1 - Accelerate: 1.2.1 - Datasets: 2.19.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "allenai/specter2_base", "library_name": "sentence-transformers", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:57566", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Cannabis evolution", "sentences": ["The cannabis conundrum. ", "Dawn and decline of the holy smoke. ", "[Computer-assisted system for interstitial hyperthermia]. "]}, {"source_sentence": "Lateral Ventricle AT/RT", "sentences": ["Improved Assessment of Pathological Regurgitation in Patients with Prosthetic Heart Valves by Multiplane Transesophageal Echocardiography. ", "[Surgical anatomy of the lateral ventricles]. ", "Lateral Ventricle Atypical Teratoid/Rhabdoid Tumor (AT/RT): Case Report and Review of Literature. "]}, {"source_sentence": "Parkinsonian motor fluctuations", "sentences": ["Basic mechanisms of motor fluctuations. ", "Nonmotor Fluctuations in Parkinson's Disease. ", "Sodium conductance in calcium channels of single smooth muscle cells of guinea-pig taenia caeci. "]}, {"source_sentence": "Phagocytic Assay", "sentences": ["Assay for phagocytosis. ", "Opsonophagocytic assay. ", "Clinical evaluation of synthetic aperture sequential beamforming ultrasound in patients with liver tumors. "]}, {"source_sentence": "Content validity assessment", "sentences": ["Content validity is naught. ", "Male requires a higher median target effect-site concentration of propofol for I-gel placement when combined with dexmedetomidine. ", "Establishing content-validity of a disease-specific health-related quality of life instrument for patients with chronic hypersensitivity pneumonitis. "]}], "model-index": [{"name": "SentenceTransformer based on allenai/specter2_base", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "NanoNQ", "type": "NanoNQ"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.04, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.2, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.22, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.3, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.04, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.06666666666666667, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.044000000000000004, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.03, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.03, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.18, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.2, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.27, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.15735897323110787, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.13194444444444445, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.13092350353731416, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "NanoMSMARCO", "type": "NanoMSMARCO"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.2, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.36, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.42, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.52, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.2, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.12, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.084, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.052000000000000005, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.2, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.36, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.42, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.52, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.35375176104312445, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.30138095238095236, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.31610409814616347, "name": "Cosine Map@100"}]}, {"task": {"type": "nano-beir", "name": "Nano BEIR"}, "dataset": {"name": "NanoBEIR mean", "type": "NanoBEIR_mean"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.12000000000000001, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.28, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.32, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.41000000000000003, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.12000000000000001, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.09333333333333332, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.064, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.041, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.115, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.27, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.31, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.395, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.25555536713711613, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.21666269841269842, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.22351380084173883, "name": "Cosine Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,449
Keiser41/Example_Based_Manga_Colorization
Keiser41
null
[ "arxiv:2106.08017", "arxiv:1611.07004", "region:us" ]
2023-10-28T11:27:15Z
2023-10-28T19:04:01+00:00
0
9
--- {} --- # Reference-Image-Embed-Manga-Colorization An amazing manga colorization project You can colorize gray manga or character sketches using any reference image you want, this model will faithfully retain the color features and transfer them to your manga. This is useful when you wish the color of the character's hair or clothes to be consistent. If the project is helpful, please leave a ⭐ this repo. best luck, my friend 😊 <br> ## Overview <p align="left"> <img src="./assets/network.png"> </p> It's basically a cGAN(Conditional Generative Adversarial Network) architecture. ### Generator Generator is divided into two parts. `Color Embedding Layer` consists of part of pretrained VGG19 net and an MLP(Multilayer Perceptron), which is used to extract `color embedding` from reference image(for training, its preprocessed Ground Truth Image). Another part is a U-net-like network. The encoder layer extracts `content embedding` from gray input image(only contains L-channel information), and the decoder layer reconstructs the image with `color embedding` through PFFB(Progressive Feature Formalization Block) and outputs the ab_channel information. <p align="left"> <img src="./assets/PFFB.png"> </p> The figure shows how PFFB works. It generates a filter by applying color embedding, and then convolving with content features. The figure is from this [paper](https://arxiv.org/abs/2106.08017) and check it for more details. ### Discriminator Discriminator is a PatchGAN, referring to [pix2pix](https://arxiv.org/abs/1611.07004v3). The difference is that there are two conditions used for input. One is the gray image waiting for colorization, and one is the reference image providing color information. ### Loss There are three losses in total, `L1 loss`, `perceptual loss` produced by pretrained vgg19, and `adversarial loss` produced by discriminator. The ratio is `1: 0.1: 0.01`. ### Pipeline <p align="left"> <img src="./assets/Pipeline.png"> </p> - a. Segment panels from input manga image, `Manga-Panel-Extractor` is from [here](https://github.com/pvnieo/Manga-Panel-Extractor). - b. Select a reference image for each panel, and generator will colorize each panel. - c. Concatenate all colorized panels into original format. ## Results ### Gray model | Original | Reference | Colorization | |:----------:|:-----------:|:----------:| | <img src="test_datasets/gray_test/001_in.png" width="400"> | <img src="test_datasets/gray_test/001_ref_a.png" width="200"> | <img src="test_datasets/gray_test/out/001_in_color_a.png" width="400"> | | <img src="test_datasets/gray_test/001_in.png" width="400"> | <img src="test_datasets/gray_test/001_ref_b.png" width="200"> | <img src="test_datasets/gray_test/out/001_in_color_b.png" width="400"> | | <img src="test_datasets/gray_test/002_in.jpeg" width="400"> | <img src="test_datasets/gray_test/002_in_ref_a.jpg" width="200"> | <img src="test_datasets/gray_test/out/002_in_color_a.png" width="400"> | | <img src="test_datasets/gray_test/002_in.jpeg" width="400"> | <img src="test_datasets/gray_test/002_in_ref_b.jpeg" width="200"> | <img src="test_datasets/gray_test/out/002_in_color_b.png" width="400"> | | <img src="test_datasets/gray_test/003_in.jpeg" width="400"> | <img src="test_datasets/gray_test/003_in_ref_a.jpg" width="200"> | <img src="test_datasets/gray_test/out/003_in_color_a.png" width="400"> | | <img src="test_datasets/gray_test/003_in.jpeg" width="400"> | <img src="test_datasets/gray_test/003_in_ref_b.jpg" width="200"> | <img src="test_datasets/gray_test/out/003_in_color_b.png" width="400"> | | <img src="test_datasets/gray_test/004_in.png" width="400"> |<img src="test_datasets/gray_test/004_ref_1.jpg" width="100"><img src="test_datasets/gray_test/004_ref_2.jpg" width="100">| <img src="test_datasets/gray_test/out/004_in_color.png" width="400">| | <img src="test_datasets/gray_test/005_in.png" width="400"> | <img src="test_datasets/gray_test/005_ref_1.jpeg" width="100"><img src="test_datasets/gray_test/005_ref_2.jpg" width="100"><img src="test_datasets/gray_test/005_ref_3.jpeg" width="100"> | <img src="test_datasets/gray_test/out/005_in_color.png" width="400"> | | <img src="test_datasets/gray_test/006_in.png" width="400"> | <img src="test_datasets/gray_test/006_ref.png" width="200"> | <img src="test_datasets/gray_test/out/006_in_color.png" width="400"> | ### sketch model | Original | Reference | Colorization | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | <img src="test_datasets/sketch_test/001_in.jpg" width="400"> | <img src="test_datasets/sketch_test/001_ref_a.jpg" width="200"> | <img src="test_datasets/sketch_test/out/001_in_color_a.png" width="400"> | | <img src="test_datasets/sketch_test/001_in.jpg" width="400"> | <img src="test_datasets/sketch_test/001_ref_b.jpg" width="200"> | <img src="test_datasets/sketch_test/out/001_in_color_b.png" width="400"> | ## Dependencies and Installation 1. Clone this GitHub repo. ``` git clone https://github.com/linSensiGit/Example_Based_Manga_Colorization---cGAN.git cd Example_Based_Manga_Colorization---cGAN ``` 2. Create Environment - Python >= 3.6 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux)) - [PyTorch >= 1.5.0](https://pytorch.org/) (Default GPU mode) ``` # My environment for reference - Python = 3.9.15 - PyTorch = 1.13.0 - Torchvision = 0.14.0 - Cuda = 11.7 - GPU = RTX 3060ti ``` 3. Install Dependencies ``` pip3 install -r requirement.txt ``` ## Get Started Once you've set up the environment, several things need to be done before colorization. ### Prepare pretrained models 1. Download generator. I have trained two generators, for [gray manga](https://drive.google.com/file/d/11RQGvBKySEtRcBdYD8O5ZLb54jB7SAgN/view?usp=drive_link) colorization and [sketch](https://drive.google.com/file/d/1I4XwOYIGAoQwMOicknZl0s6AWcwpARmR/view?usp=drive_link) colorization. Choose what you need. 2. Download [VGG model](https://drive.google.com/file/d/1S7t3mD-tznEUrMmq5bRsLZk4fkN24QSV/view?usp=drive_link) , it's part of generator. 3. Download discriminator, for training [gray manga](https://drive.google.com/file/d/1DHHE9um_xOm0brTpbHb_R7K7J4mn37FS/view?usp=drive_link) colorization and [sketch](https://drive.google.com/file/d/1WgIPYY4b4GcpHW9EWFrFoTxL9SlilQbN/view?usp=drive_link) colorization. (optional) 4. Put the pretrained model in the correct directory: ``` Colorful-Manga-GAN |- experiments |- Color2Manga_gray |- xxx000_gray.pt |- Color2Manga_sketch |- xxx000_sketch.pt |- Discriminator |- xxx000_d.pt |- VGG19 |- vgg19-dcbb9e9d.pth ``` ### Quick test I have collected some test datasets which contain manga pages and corresponding reference images. You can check it in the path `./test_datasets`. When you use the file `inference.py` to test, you may need to edit the input file path or pretrained weights path in this file. ``` python inference.py # If you don't want to segment your manga python inference.py -ne ``` Initially, `Manga-Panel-Extractor` will segment the manga page into panels. Then follow the instructions in the console and you will get the colorized image. ## Train your Own Model ### Prepare Datasets There are three datasets I used to train the model. For gray model, [Anime Face Dataset](https://www.kaggle.com/datasets/scribbless/another-anime-face-dataset) and Tagged [Anime Illustrations Dataset](https://www.kaggle.com/datasets/mylesoneill/tagged-anime-illustrations) are used. And I only use `danbooru-images` folder in the second Dataset. For sketch model, [Anime Sketch Colorization Pair Dataset](https://www.kaggle.com/datasets/ktaebum/anime-sketch-colorization-pair) is used. All the datasets are from [Kaggle](https://www.kaggle.com/). Follow instructions are based on my dataset, but feel free to use your own dataset if you like. ### Preprocess training data ``` cd data python prepare_data.py ``` If you are using ` Anime Sketch Colorization Pair` dataset : ``` python prepare_data_sketch.py ``` Several arguments needed to be assigned : ``` usage: prepare_data.py [-h] [--out OUT] [--size SIZE] [--n_worker N_WORKER] [--resample RESAMPLE] path positional arguments: path the path of datasets optional arguments: -h, --help show this help message and exit --out OUT the path to save generated lmdb --size SIZE compressed image size (128, 256, 512, 1024) alternative --n_worker N_WORKER The number of threads, depends on your CPU --resample RESAMPLE ``` For instance, you can run the command like this: ``` python prepare_data.py --out ../train_datasets/Sketch_train_lmdb --n_worker 20 --size 256 E:/Dataset/animefaces256cleaner ``` ### Training There are four scripts in total for training `train.py` —— train only generator `train_disc` —— train only discriminator `train_all_gray.py`—— train both generator and discriminator, under the usual dataset `train_all_sketch.py`—— train both generator and discriminator, under sketch pair dataset specific All of these scripts share similar commands to drive: ``` usage: train_all_gray.py [-h] [--datasets DATASETS] [--iter ITER] [--batch BATCH] [--size SIZE] [--ckpt CKPT] [--ckpt_disc CKPT_DISC] [--lr LR] [--lr_disc LR_DISC] [--experiment_name EXPERIMENT_NAME] [--wandb] [--local_rank LOCAL_RANK] optional arguments: -h, --help show this help message and exit --datasets DATASETS the path of training dataset --iter ITER number of iteration in total --batch BATCH batch size --size SIZE size of image in dataset, usually 256 --ckpt CKPT path of pretrained generator --ckpt_disc CKPT_DISC path of pretrained discriminator --lr LR learning rate of generator --lr_disc LR_DISC learning rate of discriminator --experiment_name EXPERIMENT_NAME used to save training_logs and trained model --wandb --local_rank LOCAL_RANK ``` There may be a slight difference, you could check the code for more details. For instance, you can run the command like this: ``` python train_all_gray.py --batch 8 --experiment_name Color2Manga_sketch --ckpt experiments/Color2Manga_sketch/078000.pt --datasets ./train_datasets/Sketch_train_lmdb --ckpt_disc experiments/Discriminator/078000_d.pt ``` ## Work in Progress - [ ] Add SR model instead of directly interpolate upscaling - [ ] Optimize the generator network(adding L-channel information to output which is essential for colorize sketch) - [ ] Better developed manga-panel-extractor(current segmentation is not precise enough) - [ ] Develop a front UI and add color hint so that users could adjust the color of a specific area ## 😁Contact If you have any questions, please feel free to contact me via `[email protected]` ## 🙌 Acknowledgement Based on https://github.com/zhaohengyuan1/Color2Embed Thx https://github.com/pvnieo/Manga-Panel-Extractor ## Reference [1] Zhao, Hengyuan et al. “Color2Embed: Fast Exemplar-Based Image Colorization using Color Embeddings.” (2021). [2] Isola, Phillip et al. “Image-to-Image Translation with Conditional Adversarial Networks.” *2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)* (2016): 5967-5976. [3] Furusawa, Chie et al. “Comicolorization: semi-automatic manga colorization.” *SIGGRAPH Asia 2017 Technical Briefs* (2017): n. pag. [4] Satoshi Iizuka, Edgar Simo-Serra, and Hiroshi Ishikawa. "Let there be Color!: Joint End-to-end Learning of Global and Local Image Priors for Automatic Image Colorization with Simultaneous Classification". ACM Transaction on Graphics (Proc. of SIGGRAPH), 35(4):110, 2016.
null
Non_BioNLP
# Reference-Image-Embed-Manga-Colorization An amazing manga colorization project You can colorize gray manga or character sketches using any reference image you want, this model will faithfully retain the color features and transfer them to your manga. This is useful when you wish the color of the character's hair or clothes to be consistent. If the project is helpful, please leave a ⭐ this repo. best luck, my friend 😊 <br> ## Overview <p align="left"> <img src="./assets/network.png"> </p> It's basically a cGAN(Conditional Generative Adversarial Network) architecture. ### Generator Generator is divided into two parts. `Color Embedding Layer` consists of part of pretrained VGG19 net and an MLP(Multilayer Perceptron), which is used to extract `color embedding` from reference image(for training, its preprocessed Ground Truth Image). Another part is a U-net-like network. The encoder layer extracts `content embedding` from gray input image(only contains L-channel information), and the decoder layer reconstructs the image with `color embedding` through PFFB(Progressive Feature Formalization Block) and outputs the ab_channel information. <p align="left"> <img src="./assets/PFFB.png"> </p> The figure shows how PFFB works. It generates a filter by applying color embedding, and then convolving with content features. The figure is from this [paper](https://arxiv.org/abs/2106.08017) and check it for more details. ### Discriminator Discriminator is a PatchGAN, referring to [pix2pix](https://arxiv.org/abs/1611.07004v3). The difference is that there are two conditions used for input. One is the gray image waiting for colorization, and one is the reference image providing color information. ### Loss There are three losses in total, `L1 loss`, `perceptual loss` produced by pretrained vgg19, and `adversarial loss` produced by discriminator. The ratio is `1: 0.1: 0.01`. ### Pipeline <p align="left"> <img src="./assets/Pipeline.png"> </p> - a. Segment panels from input manga image, `Manga-Panel-Extractor` is from [here](https://github.com/pvnieo/Manga-Panel-Extractor). - b. Select a reference image for each panel, and generator will colorize each panel. - c. Concatenate all colorized panels into original format. ## Results ### Gray model | Original | Reference | Colorization | |:----------:|:-----------:|:----------:| | <img src="test_datasets/gray_test/001_in.png" width="400"> | <img src="test_datasets/gray_test/001_ref_a.png" width="200"> | <img src="test_datasets/gray_test/out/001_in_color_a.png" width="400"> | | <img src="test_datasets/gray_test/001_in.png" width="400"> | <img src="test_datasets/gray_test/001_ref_b.png" width="200"> | <img src="test_datasets/gray_test/out/001_in_color_b.png" width="400"> | | <img src="test_datasets/gray_test/002_in.jpeg" width="400"> | <img src="test_datasets/gray_test/002_in_ref_a.jpg" width="200"> | <img src="test_datasets/gray_test/out/002_in_color_a.png" width="400"> | | <img src="test_datasets/gray_test/002_in.jpeg" width="400"> | <img src="test_datasets/gray_test/002_in_ref_b.jpeg" width="200"> | <img src="test_datasets/gray_test/out/002_in_color_b.png" width="400"> | | <img src="test_datasets/gray_test/003_in.jpeg" width="400"> | <img src="test_datasets/gray_test/003_in_ref_a.jpg" width="200"> | <img src="test_datasets/gray_test/out/003_in_color_a.png" width="400"> | | <img src="test_datasets/gray_test/003_in.jpeg" width="400"> | <img src="test_datasets/gray_test/003_in_ref_b.jpg" width="200"> | <img src="test_datasets/gray_test/out/003_in_color_b.png" width="400"> | | <img src="test_datasets/gray_test/004_in.png" width="400"> |<img src="test_datasets/gray_test/004_ref_1.jpg" width="100"><img src="test_datasets/gray_test/004_ref_2.jpg" width="100">| <img src="test_datasets/gray_test/out/004_in_color.png" width="400">| | <img src="test_datasets/gray_test/005_in.png" width="400"> | <img src="test_datasets/gray_test/005_ref_1.jpeg" width="100"><img src="test_datasets/gray_test/005_ref_2.jpg" width="100"><img src="test_datasets/gray_test/005_ref_3.jpeg" width="100"> | <img src="test_datasets/gray_test/out/005_in_color.png" width="400"> | | <img src="test_datasets/gray_test/006_in.png" width="400"> | <img src="test_datasets/gray_test/006_ref.png" width="200"> | <img src="test_datasets/gray_test/out/006_in_color.png" width="400"> | ### sketch model | Original | Reference | Colorization | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | <img src="test_datasets/sketch_test/001_in.jpg" width="400"> | <img src="test_datasets/sketch_test/001_ref_a.jpg" width="200"> | <img src="test_datasets/sketch_test/out/001_in_color_a.png" width="400"> | | <img src="test_datasets/sketch_test/001_in.jpg" width="400"> | <img src="test_datasets/sketch_test/001_ref_b.jpg" width="200"> | <img src="test_datasets/sketch_test/out/001_in_color_b.png" width="400"> | ## Dependencies and Installation 1. Clone this GitHub repo. ``` git clone https://github.com/linSensiGit/Example_Based_Manga_Colorization---cGAN.git cd Example_Based_Manga_Colorization---cGAN ``` 2. Create Environment - Python >= 3.6 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux)) - [PyTorch >= 1.5.0](https://pytorch.org/) (Default GPU mode) ``` # My environment for reference - Python = 3.9.15 - PyTorch = 1.13.0 - Torchvision = 0.14.0 - Cuda = 11.7 - GPU = RTX 3060ti ``` 3. Install Dependencies ``` pip3 install -r requirement.txt ``` ## Get Started Once you've set up the environment, several things need to be done before colorization. ### Prepare pretrained models 1. Download generator. I have trained two generators, for [gray manga](https://drive.google.com/file/d/11RQGvBKySEtRcBdYD8O5ZLb54jB7SAgN/view?usp=drive_link) colorization and [sketch](https://drive.google.com/file/d/1I4XwOYIGAoQwMOicknZl0s6AWcwpARmR/view?usp=drive_link) colorization. Choose what you need. 2. Download [VGG model](https://drive.google.com/file/d/1S7t3mD-tznEUrMmq5bRsLZk4fkN24QSV/view?usp=drive_link) , it's part of generator. 3. Download discriminator, for training [gray manga](https://drive.google.com/file/d/1DHHE9um_xOm0brTpbHb_R7K7J4mn37FS/view?usp=drive_link) colorization and [sketch](https://drive.google.com/file/d/1WgIPYY4b4GcpHW9EWFrFoTxL9SlilQbN/view?usp=drive_link) colorization. (optional) 4. Put the pretrained model in the correct directory: ``` Colorful-Manga-GAN |- experiments |- Color2Manga_gray |- xxx000_gray.pt |- Color2Manga_sketch |- xxx000_sketch.pt |- Discriminator |- xxx000_d.pt |- VGG19 |- vgg19-dcbb9e9d.pth ``` ### Quick test I have collected some test datasets which contain manga pages and corresponding reference images. You can check it in the path `./test_datasets`. When you use the file `inference.py` to test, you may need to edit the input file path or pretrained weights path in this file. ``` python inference.py # If you don't want to segment your manga python inference.py -ne ``` Initially, `Manga-Panel-Extractor` will segment the manga page into panels. Then follow the instructions in the console and you will get the colorized image. ## Train your Own Model ### Prepare Datasets There are three datasets I used to train the model. For gray model, [Anime Face Dataset](https://www.kaggle.com/datasets/scribbless/another-anime-face-dataset) and Tagged [Anime Illustrations Dataset](https://www.kaggle.com/datasets/mylesoneill/tagged-anime-illustrations) are used. And I only use `danbooru-images` folder in the second Dataset. For sketch model, [Anime Sketch Colorization Pair Dataset](https://www.kaggle.com/datasets/ktaebum/anime-sketch-colorization-pair) is used. All the datasets are from [Kaggle](https://www.kaggle.com/). Follow instructions are based on my dataset, but feel free to use your own dataset if you like. ### Preprocess training data ``` cd data python prepare_data.py ``` If you are using ` Anime Sketch Colorization Pair` dataset : ``` python prepare_data_sketch.py ``` Several arguments needed to be assigned : ``` usage: prepare_data.py [-h] [--out OUT] [--size SIZE] [--n_worker N_WORKER] [--resample RESAMPLE] path positional arguments: path the path of datasets optional arguments: -h, --help show this help message and exit --out OUT the path to save generated lmdb --size SIZE compressed image size (128, 256, 512, 1024) alternative --n_worker N_WORKER The number of threads, depends on your CPU --resample RESAMPLE ``` For instance, you can run the command like this: ``` python prepare_data.py --out ../train_datasets/Sketch_train_lmdb --n_worker 20 --size 256 E:/Dataset/animefaces256cleaner ``` ### Training There are four scripts in total for training `train.py` —— train only generator `train_disc` —— train only discriminator `train_all_gray.py`—— train both generator and discriminator, under the usual dataset `train_all_sketch.py`—— train both generator and discriminator, under sketch pair dataset specific All of these scripts share similar commands to drive: ``` usage: train_all_gray.py [-h] [--datasets DATASETS] [--iter ITER] [--batch BATCH] [--size SIZE] [--ckpt CKPT] [--ckpt_disc CKPT_DISC] [--lr LR] [--lr_disc LR_DISC] [--experiment_name EXPERIMENT_NAME] [--wandb] [--local_rank LOCAL_RANK] optional arguments: -h, --help show this help message and exit --datasets DATASETS the path of training dataset --iter ITER number of iteration in total --batch BATCH batch size --size SIZE size of image in dataset, usually 256 --ckpt CKPT path of pretrained generator --ckpt_disc CKPT_DISC path of pretrained discriminator --lr LR learning rate of generator --lr_disc LR_DISC learning rate of discriminator --experiment_name EXPERIMENT_NAME used to save training_logs and trained model --wandb --local_rank LOCAL_RANK ``` There may be a slight difference, you could check the code for more details. For instance, you can run the command like this: ``` python train_all_gray.py --batch 8 --experiment_name Color2Manga_sketch --ckpt experiments/Color2Manga_sketch/078000.pt --datasets ./train_datasets/Sketch_train_lmdb --ckpt_disc experiments/Discriminator/078000_d.pt ``` ## Work in Progress - [ ] Add SR model instead of directly interpolate upscaling - [ ] Optimize the generator network(adding L-channel information to output which is essential for colorize sketch) - [ ] Better developed manga-panel-extractor(current segmentation is not precise enough) - [ ] Develop a front UI and add color hint so that users could adjust the color of a specific area ## 😁Contact If you have any questions, please feel free to contact me via `[email protected]` ## 🙌 Acknowledgement Based on https://github.com/zhaohengyuan1/Color2Embed Thx https://github.com/pvnieo/Manga-Panel-Extractor ## Reference [1] Zhao, Hengyuan et al. “Color2Embed: Fast Exemplar-Based Image Colorization using Color Embeddings.” (2021). [2] Isola, Phillip et al. “Image-to-Image Translation with Conditional Adversarial Networks.” *2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)* (2016): 5967-5976. [3] Furusawa, Chie et al. “Comicolorization: semi-automatic manga colorization.” *SIGGRAPH Asia 2017 Technical Briefs* (2017): n. pag. [4] Satoshi Iizuka, Edgar Simo-Serra, and Hiroshi Ishikawa. "Let there be Color!: Joint End-to-end Learning of Global and Local Image Priors for Automatic Image Colorization with Simultaneous Classification". ACM Transaction on Graphics (Proc. of SIGGRAPH), 35(4):110, 2016.
{}
task
[ "TRANSLATION" ]
41,450
RichardErkhov/nicholasKluge_-_TeenyTinyLlama-160m-4bits
RichardErkhov
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:2401.16640", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
2024-07-20T12:57:25Z
2024-07-20T12:57:48+00:00
76
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) TeenyTinyLlama-160m - bnb 4bits - Model creator: https://huggingface.co/nicholasKluge/ - Original model: https://huggingface.co/nicholasKluge/TeenyTinyLlama-160m/ Original model description: --- language: - pt license: apache-2.0 library_name: transformers tags: - text-generation-inference datasets: - nicholasKluge/Pt-Corpus-Instruct metrics: - perplexity pipeline_tag: text-generation widget: - text: 'A PUCRS é uma universidade ' example_title: Exemplo - text: A muitos anos atrás, em uma galáxia muito distante, vivia uma raça de example_title: Exemplo - text: Em meio a um escândalo, a frente parlamentar pediu ao Senador Silva para example_title: Exemplo inference: parameters: repetition_penalty: 1.2 temperature: 0.2 top_k: 20 top_p: 0.2 max_new_tokens: 150 co2_eq_emissions: emissions: 5600 source: CodeCarbon training_type: pre-training geographical_location: Germany hardware_used: NVIDIA A100-SXM4-40GB model-index: - name: TeenyTinyLlama-160m results: - task: type: text-generation name: Text Generation dataset: name: ENEM Challenge (No Images) type: eduagarcia/enem_challenge split: train args: num_few_shot: 3 metrics: - type: acc value: 19.24 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BLUEX (No Images) type: eduagarcia-temp/BLUEX_without_images split: train args: num_few_shot: 3 metrics: - type: acc value: 23.09 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: OAB Exams type: eduagarcia/oab_exams split: train args: num_few_shot: 3 metrics: - type: acc value: 22.37 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Assin2 RTE type: assin2 split: test args: num_few_shot: 15 metrics: - type: f1_macro value: 53.97 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Assin2 STS type: eduagarcia/portuguese_benchmark split: test args: num_few_shot: 15 metrics: - type: pearson value: 0.24 name: pearson source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: FaQuAD NLI type: ruanchaves/faquad-nli split: test args: num_few_shot: 15 metrics: - type: f1_macro value: 43.97 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HateBR Binary type: ruanchaves/hatebr split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 36.92 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: PT Hate Speech Binary type: hate_speech_portuguese split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 42.63 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: tweetSentBR type: eduagarcia-temp/tweetsentbr split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 11.39 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard --- # TeenyTinyLlama-160m <img src="./logo.png" alt="A little llama wearing a mushroom hat and a monocle." height="200"> ## Model Summary Large language models (LLMs) have significantly advanced natural language processing, but their progress has yet to be equal across languages. While most LLMs are trained in high-resource languages like English, multilingual models generally underperform monolingual ones. Additionally, aspects of their multilingual foundation sometimes restrict the byproducts they produce, like computational demands and licensing regimes. Hence, we developed the _TeenyTinyLlama_ pair: two compact models for Brazilian Portuguese text generation. Read our preprint on [Article](https://www.sciencedirect.com/science/article/pii/S2666827024000343). ## Details - **Architecture:** a Transformer-based model pre-trained via causal language modeling - **Size:** 162,417,408 parameters - **Context length:** 2048 tokens - **Dataset:** [Pt-Corpus Instruct](https://huggingface.co/datasets/nicholasKluge/Pt-Corpus-Instruct) (6.2B tokens) - **Language:** Portuguese - **Number of steps:** 458,000 - **GPU:** 1 NVIDIA A100-SXM4-40GB - **Training time**: ~ 36 hours - **Emissions:** 5.6 KgCO2 (Germany) - **Total energy consumption:** 15.5 kWh This repository has the [source code](https://github.com/Nkluge-correa/TeenyTinyLlama) used to train this model. The main libraries used are: - [Transformers](https://github.com/huggingface/transformers) - [PyTorch](https://github.com/pytorch/pytorch) - [Datasets](https://github.com/huggingface/datasets) - [Tokenizers](https://github.com/huggingface/tokenizers) - [Sentencepiece](https://github.com/google/sentencepiece) - [Accelerate](https://github.com/huggingface/accelerate) - [FlashAttention](https://github.com/Dao-AILab/flash-attention) - [Codecarbon](https://github.com/mlco2/codecarbon) ## Intended Uses The primary intended use of TeenyTinyLlama is to research the challenges related to developing language models for low-resource languages. Checkpoints saved during training are intended to provide a controlled setting for performing scientific experiments. You may also further fine-tune and adapt TeenyTinyLlama for deployment, as long as your use is following the Apache 2.0 license. If you decide to use pre-trained TeenyTinyLlama as a basis for your fine-tuned model, please conduct your own risk and bias assessment. ## Out-of-scope Use TeenyTinyLlama is not intended for deployment. It is not a product and should not be used for human-facing interactions. TeenyTinyLlama models are Brazilian Portuguese language only and are not suitable for translation or generating text in other languages. TeenyTinyLlama has not been fine-tuned for downstream contexts in which language models are commonly deployed. ## Basic usage Using the `pipeline`: ```python from transformers import pipeline generator = pipeline("text-generation", model="nicholasKluge/TeenyTinyLlama-160m") completions = generator("Astronomia é a ciência", num_return_sequences=2, max_new_tokens=100) for comp in completions: print(f"🤖 {comp['generated_text']}") ``` Using the `AutoTokenizer` and `AutoModelForCausalLM`: ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Load model and the tokenizer tokenizer = AutoTokenizer.from_pretrained("nicholasKluge/TeenyTinyLlama-160m", revision='main') model = AutoModelForCausalLM.from_pretrained("nicholasKluge/TeenyTinyLlama-160m", revision='main') # Pass the model to your device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.eval() model.to(device) # Tokenize the inputs and pass them to the device inputs = tokenizer("Astronomia é a ciência", return_tensors="pt").to(device) # Generate some text completions = model.generate(**inputs, num_return_sequences=2, max_new_tokens=100) # Print the generated text for i, completion in enumerate(completions): print(f'🤖 {tokenizer.decode(completion)}') ``` ## Limitations Like almost all other language models trained on large text datasets scraped from the web, the TTL pair exhibited behavior that does not make them an out-of-the-box solution to many real-world applications, especially those requiring factual, reliable, nontoxic text generation. Our models are all subject to the following: - **Hallucinations:** This model can produce content that can be mistaken for truth but is, in fact, misleading or entirely false, i.e., hallucination. - **Biases and Toxicity:** This model inherits the social and historical stereotypes from the data used to train it. Given these biases, the model can produce toxic content, i.e., harmful, offensive, or detrimental to individuals, groups, or communities. - **Unreliable Code:** The model may produce incorrect code snippets and statements. These code generations should not be treated as suggestions or accurate solutions. - **Language Limitations:** The model is primarily designed to understand standard Brazilian Portuguese. Other languages might challenge its comprehension, leading to potential misinterpretations or errors in response. - **Repetition and Verbosity:** The model may get stuck on repetition loops (especially if the repetition penalty during generations is set to a meager value) or produce verbose responses unrelated to the prompt it was given. Hence, even though our models are released with a permissive license, we urge users to perform their risk analysis on these models if intending to use them for real-world applications and also have humans moderating the outputs of these models in applications where they will interact with an audience, guaranteeing users are always aware they are interacting with a language model. ## Evaluations During our training runs, both models showed consistent convergence. At no point did our evaluation curves show signs of overfitting or saturation. In the case of our 460m parameter model, we intentionally trained past the optimal point by approximately 75,000 steps to assess if there were any signs of saturation, but our evaluations consistently gave better results. We hypothesize that our models are under-trained but can improve if further trained to pass the Chinchilla optimal range. | Processed Tokens | Perplexity | Energy Consumption (kWh) | Emissions (KgCO2eq) | |------------------|------------|---------------------------|----------------------| | 8.1M | 20.49 | 9.40 | 3.34 | | 1.6B | 16.90 | 18.82 | 6.70 | | 2.4B | 15.43 | 28.59 | 10.16 | | 3.2B | 14.64 | 38.20 | 13.57 | | 4.0B | 14.08 | 48.04 | 17.07 | | 4.9B | 13.61 | 57.74 | 20.52 | | 5.7B | 13.25 | 67.32 | 23.92 | | 6.5B | 12.87 | 76.84 | 27.30 | | 7.3B | 12.57 | 86.40 | 30.70 | | 8.1B | 12.27 | 96.19 | 34.18 | | 9.0B | 11.96 | 106.06 | 37.70 | | 9.8B | 11.77 | 115.69 | 41.31 | ## Benchmarks Evaluations on benchmarks were performed using the [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) (by [EleutherAI](https://www.eleuther.ai/)). [Laiviet](https://github.com/laiviet/lm-evaluation-harness) translated the tasks from the LM-Evaluation-Harness we used. The results of models marked with an "*" were extracted from the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). | | **ARC** | **HellaSwag** | **MMLU** | **TruthfulQA** | **Average** | |------------------|-----------|---------------|-----------|----------------|-------------| | Pythia-410m | 24.83* | 41.29* | 25.99* | 40.95* | 33.26 | | **TTL-460m** | 29.40 | 33.00 | 28.55 | 41.10 | 33.01 | | Bloom-560m | 24.74* | 37.15* | 24.22* | 42.44* | 32.13 | | Xglm-564M | 25.56 | 34.64* | 25.18* | 42.53 | 31.97 | | OPT-350m | 23.55* | 36.73* | 26.02* | 40.83* | 31.78 | | **TTL-160m** | 26.15 | 29.29 | 28.11 | 41.12 | 31.16 | | Pythia-160m | 24.06* | 31.39* | 24.86* | 44.34* | 31.16 | | OPT-125m | 22.87* | 31.47* | 26.02* | 42.87* | 30.80 | | GPorTuguese-2 | 22.48 | 29.62 | 27.36 | 41.44 | 30.22 | | Gpt2-small | 21.48* | 31.60* | 25.79* | 40.65* | 29.97 | | Multilingual GPT | 23.81 | 26.37* | 25.17* | 39.62 | 28.73 | Evaluations on Brazilian Portuguese benchmarks were performed using a [Portuguese implementation of the EleutherAI LM Evaluation Harness](https://github.com/eduagarcia/lm-evaluation-harness-pt) (created by [Eduardo Garcia](https://github.com/eduagarcia/lm-evaluation-harness-pt)). | | **ASSIN2 RTE** | **ASSIN2 STS** | **BLUEX** | **ENEM** | **FAQUAD NLI** | **HateBR** | **OAB Exams** | **Average** | |----------------|----------------|----------------|-----------|----------|----------------|------------|---------------|-------------| | Qwen-1.8B | 64.83 | 19.53 | 26.15 | 30.23 | 43.97 | 33.33 | 27.20 | 35.03 | | TinyLlama-1.1B | 58.93 | 13.57 | 22.81 | 22.25 | 43.97 | 36.92 | 23.64 | 31.72 | | **TTL-460m** | 53.93 | 12.66 | 22.81 | 19.87 | 49.01 | 33.59 | 27.06 | 31.27 | | XGLM-564m | 49.61 | 22.91 | 19.61 | 19.38 | 43.97 | 33.99 | 23.42 | 30.41 | | Bloom-1b7 | 53.60 | 4.81 | 21.42 | 18.96 | 43.97 | 34.89 | 23.05 | 28.67 | | **TTL-160m** | 53.36 | 2.58 | 21.84 | 18.75 | 43.97 | 36.88 | 22.60 | 28.56 | | OPT-125m | 39.77 | 2.00 | 21.84 | 17.42 | 43.97 | 47.04 | 22.78 | 27.83 | | Pythia-160 | 33.33 | 12.81 | 16.13 | 16.66 | 50.36 | 41.09 | 22.82 | 27.60 | | OLMo-1b | 34.12 | 9.28 | 18.92 | 20.29 | 43.97 | 41.33 | 22.96 | 27.26 | | Bloom-560m | 33.33 | 8.48 | 18.92 | 19.03 | 43.97 | 37.07 | 23.05 | 26.26 | | Pythia-410m | 33.33 | 4.80 | 19.47 | 19.45 | 43.97 | 33.33 | 23.01 | 25.33 | | OPT-350m | 33.33 | 3.65 | 20.72 | 17.35 | 44.71 | 33.33 | 23.01 | 25.15 | | GPT-2 small | 33.26 | 0.00 | 10.43 | 11.20 | 43.52 | 33.68 | 13.12 | 20.74 | | GPorTuguese | 33.33 | 3.85 | 14.74 | 3.01 | 28.81 | 33.33 | 21.23 | 19.75 | | Samba-1.1B | 33.33 | 1.30 | 8.07 | 10.22 | 17.72 | 35.79 | 15.03 | 17.35 | ## Fine-Tuning Comparisons To further evaluate the downstream capabilities of our models, we decided to employ a basic fine-tuning procedure for our TTL pair on a subset of tasks from the Poeta benchmark. We apply the same procedure for comparison purposes on both [BERTimbau](https://huggingface.co/neuralmind/bert-base-portuguese-cased) models, given that they are also LLM trained from scratch in Brazilian Portuguese and have a similar size range to our models. We used these comparisons to assess if our pre-training runs produced LLM capable of producing good results ("good" here means "close to BERTimbau") when utilized for downstream applications. | Models | IMDB | FaQuAD-NLI | HateBr | Assin2 | AgNews | Average | |-----------------|-----------|------------|-----------|-----------|-----------|---------| | BERTimbau-large | **93.58** | 92.26 | 91.57 | **88.97** | 94.11 | 92.10 | | BERTimbau-small | 92.22 | **93.07** | 91.28 | 87.45 | 94.19 | 91.64 | | **TTL-460m** | 91.64 | 91.18 | **92.28** | 86.43 | **94.42** | 91.19 | | **TTL-160m** | 91.14 | 90.00 | 90.71 | 85.78 | 94.05 | 90.34 | All the shown results are the higher accuracy scores achieved on the respective task test sets after fine-tuning the models on the training sets. All fine-tuning runs used the same hyperparameters, and the code implementation can be found in the [model cards](https://huggingface.co/nicholasKluge/TeenyTinyLlama-460m-HateBR) of our fine-tuned models. ## Cite as 🤗 ```latex @misc{correa24ttllama, title = {TeenyTinyLlama: open-source tiny language models trained in Brazilian Portuguese}, author = {Corr{\^e}a, Nicholas Kluge and Falk, Sophia and Fatimah, Shiza and Sen, Aniket and De Oliveira, Nythamar}, journal={arXiv preprint arXiv:2401.16640}, year={2024} } @misc{correa24ttllama, doi = {10.1016/j.mlwa.2024.100558}, url = {https://www.sciencedirect.com/science/article/pii/S2666827024000343}, title = {TeenyTinyLlama: open-source tiny language models trained in Brazilian Portuguese}, author = {Corr{\^e}a, Nicholas Kluge and Falk, Sophia and Fatimah, Shiza and Sen, Aniket and De Oliveira, Nythamar}, journal={Machine Learning With Applications}, publisher = {Springer}, year={2024} } ``` ## Funding This repository was built as part of the RAIES ([Rede de Inteligência Artificial Ética e Segura](https://www.raies.org/)) initiative, a project supported by FAPERGS - ([Fundação de Amparo à Pesquisa do Estado do Rio Grande do Sul](https://fapergs.rs.gov.br/inicial)), Brazil. ## License TeenyTinyLlama-160m is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for more details.
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) TeenyTinyLlama-160m - bnb 4bits - Model creator: https://huggingface.co/nicholasKluge/ - Original model: https://huggingface.co/nicholasKluge/TeenyTinyLlama-160m/ Original model description: --- language: - pt license: apache-2.0 library_name: transformers tags: - text-generation-inference datasets: - nicholasKluge/Pt-Corpus-Instruct metrics: - perplexity pipeline_tag: text-generation widget: - text: 'A PUCRS é uma universidade ' example_title: Exemplo - text: A muitos anos atrás, em uma galáxia muito distante, vivia uma raça de example_title: Exemplo - text: Em meio a um escândalo, a frente parlamentar pediu ao Senador Silva para example_title: Exemplo inference: parameters: repetition_penalty: 1.2 temperature: 0.2 top_k: 20 top_p: 0.2 max_new_tokens: 150 co2_eq_emissions: emissions: 5600 source: CodeCarbon training_type: pre-training geographical_location: Germany hardware_used: NVIDIA A100-SXM4-40GB model-index: - name: TeenyTinyLlama-160m results: - task: type: text-generation name: Text Generation dataset: name: ENEM Challenge (No Images) type: eduagarcia/enem_challenge split: train args: num_few_shot: 3 metrics: - type: acc value: 19.24 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BLUEX (No Images) type: eduagarcia-temp/BLUEX_without_images split: train args: num_few_shot: 3 metrics: - type: acc value: 23.09 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: OAB Exams type: eduagarcia/oab_exams split: train args: num_few_shot: 3 metrics: - type: acc value: 22.37 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Assin2 RTE type: assin2 split: test args: num_few_shot: 15 metrics: - type: f1_macro value: 53.97 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Assin2 STS type: eduagarcia/portuguese_benchmark split: test args: num_few_shot: 15 metrics: - type: pearson value: 0.24 name: pearson source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: FaQuAD NLI type: ruanchaves/faquad-nli split: test args: num_few_shot: 15 metrics: - type: f1_macro value: 43.97 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HateBR Binary type: ruanchaves/hatebr split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 36.92 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: PT Hate Speech Binary type: hate_speech_portuguese split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 42.63 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: tweetSentBR type: eduagarcia-temp/tweetsentbr split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 11.39 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicholasKluge/TeenyTinyLlama-160m name: Open Portuguese LLM Leaderboard --- # TeenyTinyLlama-160m <img src="./logo.png" alt="A little llama wearing a mushroom hat and a monocle." height="200"> ## Model Summary Large language models (LLMs) have significantly advanced natural language processing, but their progress has yet to be equal across languages. While most LLMs are trained in high-resource languages like English, multilingual models generally underperform monolingual ones. Additionally, aspects of their multilingual foundation sometimes restrict the byproducts they produce, like computational demands and licensing regimes. Hence, we developed the _TeenyTinyLlama_ pair: two compact models for Brazilian Portuguese text generation. Read our preprint on [Article](https://www.sciencedirect.com/science/article/pii/S2666827024000343). ## Details - **Architecture:** a Transformer-based model pre-trained via causal language modeling - **Size:** 162,417,408 parameters - **Context length:** 2048 tokens - **Dataset:** [Pt-Corpus Instruct](https://huggingface.co/datasets/nicholasKluge/Pt-Corpus-Instruct) (6.2B tokens) - **Language:** Portuguese - **Number of steps:** 458,000 - **GPU:** 1 NVIDIA A100-SXM4-40GB - **Training time**: ~ 36 hours - **Emissions:** 5.6 KgCO2 (Germany) - **Total energy consumption:** 15.5 kWh This repository has the [source code](https://github.com/Nkluge-correa/TeenyTinyLlama) used to train this model. The main libraries used are: - [Transformers](https://github.com/huggingface/transformers) - [PyTorch](https://github.com/pytorch/pytorch) - [Datasets](https://github.com/huggingface/datasets) - [Tokenizers](https://github.com/huggingface/tokenizers) - [Sentencepiece](https://github.com/google/sentencepiece) - [Accelerate](https://github.com/huggingface/accelerate) - [FlashAttention](https://github.com/Dao-AILab/flash-attention) - [Codecarbon](https://github.com/mlco2/codecarbon) ## Intended Uses The primary intended use of TeenyTinyLlama is to research the challenges related to developing language models for low-resource languages. Checkpoints saved during training are intended to provide a controlled setting for performing scientific experiments. You may also further fine-tune and adapt TeenyTinyLlama for deployment, as long as your use is following the Apache 2.0 license. If you decide to use pre-trained TeenyTinyLlama as a basis for your fine-tuned model, please conduct your own risk and bias assessment. ## Out-of-scope Use TeenyTinyLlama is not intended for deployment. It is not a product and should not be used for human-facing interactions. TeenyTinyLlama models are Brazilian Portuguese language only and are not suitable for translation or generating text in other languages. TeenyTinyLlama has not been fine-tuned for downstream contexts in which language models are commonly deployed. ## Basic usage Using the `pipeline`: ```python from transformers import pipeline generator = pipeline("text-generation", model="nicholasKluge/TeenyTinyLlama-160m") completions = generator("Astronomia é a ciência", num_return_sequences=2, max_new_tokens=100) for comp in completions: print(f"🤖 {comp['generated_text']}") ``` Using the `AutoTokenizer` and `AutoModelForCausalLM`: ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Load model and the tokenizer tokenizer = AutoTokenizer.from_pretrained("nicholasKluge/TeenyTinyLlama-160m", revision='main') model = AutoModelForCausalLM.from_pretrained("nicholasKluge/TeenyTinyLlama-160m", revision='main') # Pass the model to your device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.eval() model.to(device) # Tokenize the inputs and pass them to the device inputs = tokenizer("Astronomia é a ciência", return_tensors="pt").to(device) # Generate some text completions = model.generate(**inputs, num_return_sequences=2, max_new_tokens=100) # Print the generated text for i, completion in enumerate(completions): print(f'🤖 {tokenizer.decode(completion)}') ``` ## Limitations Like almost all other language models trained on large text datasets scraped from the web, the TTL pair exhibited behavior that does not make them an out-of-the-box solution to many real-world applications, especially those requiring factual, reliable, nontoxic text generation. Our models are all subject to the following: - **Hallucinations:** This model can produce content that can be mistaken for truth but is, in fact, misleading or entirely false, i.e., hallucination. - **Biases and Toxicity:** This model inherits the social and historical stereotypes from the data used to train it. Given these biases, the model can produce toxic content, i.e., harmful, offensive, or detrimental to individuals, groups, or communities. - **Unreliable Code:** The model may produce incorrect code snippets and statements. These code generations should not be treated as suggestions or accurate solutions. - **Language Limitations:** The model is primarily designed to understand standard Brazilian Portuguese. Other languages might challenge its comprehension, leading to potential misinterpretations or errors in response. - **Repetition and Verbosity:** The model may get stuck on repetition loops (especially if the repetition penalty during generations is set to a meager value) or produce verbose responses unrelated to the prompt it was given. Hence, even though our models are released with a permissive license, we urge users to perform their risk analysis on these models if intending to use them for real-world applications and also have humans moderating the outputs of these models in applications where they will interact with an audience, guaranteeing users are always aware they are interacting with a language model. ## Evaluations During our training runs, both models showed consistent convergence. At no point did our evaluation curves show signs of overfitting or saturation. In the case of our 460m parameter model, we intentionally trained past the optimal point by approximately 75,000 steps to assess if there were any signs of saturation, but our evaluations consistently gave better results. We hypothesize that our models are under-trained but can improve if further trained to pass the Chinchilla optimal range. | Processed Tokens | Perplexity | Energy Consumption (kWh) | Emissions (KgCO2eq) | |------------------|------------|---------------------------|----------------------| | 8.1M | 20.49 | 9.40 | 3.34 | | 1.6B | 16.90 | 18.82 | 6.70 | | 2.4B | 15.43 | 28.59 | 10.16 | | 3.2B | 14.64 | 38.20 | 13.57 | | 4.0B | 14.08 | 48.04 | 17.07 | | 4.9B | 13.61 | 57.74 | 20.52 | | 5.7B | 13.25 | 67.32 | 23.92 | | 6.5B | 12.87 | 76.84 | 27.30 | | 7.3B | 12.57 | 86.40 | 30.70 | | 8.1B | 12.27 | 96.19 | 34.18 | | 9.0B | 11.96 | 106.06 | 37.70 | | 9.8B | 11.77 | 115.69 | 41.31 | ## Benchmarks Evaluations on benchmarks were performed using the [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) (by [EleutherAI](https://www.eleuther.ai/)). [Laiviet](https://github.com/laiviet/lm-evaluation-harness) translated the tasks from the LM-Evaluation-Harness we used. The results of models marked with an "*" were extracted from the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). | | **ARC** | **HellaSwag** | **MMLU** | **TruthfulQA** | **Average** | |------------------|-----------|---------------|-----------|----------------|-------------| | Pythia-410m | 24.83* | 41.29* | 25.99* | 40.95* | 33.26 | | **TTL-460m** | 29.40 | 33.00 | 28.55 | 41.10 | 33.01 | | Bloom-560m | 24.74* | 37.15* | 24.22* | 42.44* | 32.13 | | Xglm-564M | 25.56 | 34.64* | 25.18* | 42.53 | 31.97 | | OPT-350m | 23.55* | 36.73* | 26.02* | 40.83* | 31.78 | | **TTL-160m** | 26.15 | 29.29 | 28.11 | 41.12 | 31.16 | | Pythia-160m | 24.06* | 31.39* | 24.86* | 44.34* | 31.16 | | OPT-125m | 22.87* | 31.47* | 26.02* | 42.87* | 30.80 | | GPorTuguese-2 | 22.48 | 29.62 | 27.36 | 41.44 | 30.22 | | Gpt2-small | 21.48* | 31.60* | 25.79* | 40.65* | 29.97 | | Multilingual GPT | 23.81 | 26.37* | 25.17* | 39.62 | 28.73 | Evaluations on Brazilian Portuguese benchmarks were performed using a [Portuguese implementation of the EleutherAI LM Evaluation Harness](https://github.com/eduagarcia/lm-evaluation-harness-pt) (created by [Eduardo Garcia](https://github.com/eduagarcia/lm-evaluation-harness-pt)). | | **ASSIN2 RTE** | **ASSIN2 STS** | **BLUEX** | **ENEM** | **FAQUAD NLI** | **HateBR** | **OAB Exams** | **Average** | |----------------|----------------|----------------|-----------|----------|----------------|------------|---------------|-------------| | Qwen-1.8B | 64.83 | 19.53 | 26.15 | 30.23 | 43.97 | 33.33 | 27.20 | 35.03 | | TinyLlama-1.1B | 58.93 | 13.57 | 22.81 | 22.25 | 43.97 | 36.92 | 23.64 | 31.72 | | **TTL-460m** | 53.93 | 12.66 | 22.81 | 19.87 | 49.01 | 33.59 | 27.06 | 31.27 | | XGLM-564m | 49.61 | 22.91 | 19.61 | 19.38 | 43.97 | 33.99 | 23.42 | 30.41 | | Bloom-1b7 | 53.60 | 4.81 | 21.42 | 18.96 | 43.97 | 34.89 | 23.05 | 28.67 | | **TTL-160m** | 53.36 | 2.58 | 21.84 | 18.75 | 43.97 | 36.88 | 22.60 | 28.56 | | OPT-125m | 39.77 | 2.00 | 21.84 | 17.42 | 43.97 | 47.04 | 22.78 | 27.83 | | Pythia-160 | 33.33 | 12.81 | 16.13 | 16.66 | 50.36 | 41.09 | 22.82 | 27.60 | | OLMo-1b | 34.12 | 9.28 | 18.92 | 20.29 | 43.97 | 41.33 | 22.96 | 27.26 | | Bloom-560m | 33.33 | 8.48 | 18.92 | 19.03 | 43.97 | 37.07 | 23.05 | 26.26 | | Pythia-410m | 33.33 | 4.80 | 19.47 | 19.45 | 43.97 | 33.33 | 23.01 | 25.33 | | OPT-350m | 33.33 | 3.65 | 20.72 | 17.35 | 44.71 | 33.33 | 23.01 | 25.15 | | GPT-2 small | 33.26 | 0.00 | 10.43 | 11.20 | 43.52 | 33.68 | 13.12 | 20.74 | | GPorTuguese | 33.33 | 3.85 | 14.74 | 3.01 | 28.81 | 33.33 | 21.23 | 19.75 | | Samba-1.1B | 33.33 | 1.30 | 8.07 | 10.22 | 17.72 | 35.79 | 15.03 | 17.35 | ## Fine-Tuning Comparisons To further evaluate the downstream capabilities of our models, we decided to employ a basic fine-tuning procedure for our TTL pair on a subset of tasks from the Poeta benchmark. We apply the same procedure for comparison purposes on both [BERTimbau](https://huggingface.co/neuralmind/bert-base-portuguese-cased) models, given that they are also LLM trained from scratch in Brazilian Portuguese and have a similar size range to our models. We used these comparisons to assess if our pre-training runs produced LLM capable of producing good results ("good" here means "close to BERTimbau") when utilized for downstream applications. | Models | IMDB | FaQuAD-NLI | HateBr | Assin2 | AgNews | Average | |-----------------|-----------|------------|-----------|-----------|-----------|---------| | BERTimbau-large | **93.58** | 92.26 | 91.57 | **88.97** | 94.11 | 92.10 | | BERTimbau-small | 92.22 | **93.07** | 91.28 | 87.45 | 94.19 | 91.64 | | **TTL-460m** | 91.64 | 91.18 | **92.28** | 86.43 | **94.42** | 91.19 | | **TTL-160m** | 91.14 | 90.00 | 90.71 | 85.78 | 94.05 | 90.34 | All the shown results are the higher accuracy scores achieved on the respective task test sets after fine-tuning the models on the training sets. All fine-tuning runs used the same hyperparameters, and the code implementation can be found in the [model cards](https://huggingface.co/nicholasKluge/TeenyTinyLlama-460m-HateBR) of our fine-tuned models. ## Cite as 🤗 ```latex @misc{correa24ttllama, title = {TeenyTinyLlama: open-source tiny language models trained in Brazilian Portuguese}, author = {Corr{\^e}a, Nicholas Kluge and Falk, Sophia and Fatimah, Shiza and Sen, Aniket and De Oliveira, Nythamar}, journal={arXiv preprint arXiv:2401.16640}, year={2024} } @misc{correa24ttllama, doi = {10.1016/j.mlwa.2024.100558}, url = {https://www.sciencedirect.com/science/article/pii/S2666827024000343}, title = {TeenyTinyLlama: open-source tiny language models trained in Brazilian Portuguese}, author = {Corr{\^e}a, Nicholas Kluge and Falk, Sophia and Fatimah, Shiza and Sen, Aniket and De Oliveira, Nythamar}, journal={Machine Learning With Applications}, publisher = {Springer}, year={2024} } ``` ## Funding This repository was built as part of the RAIES ([Rede de Inteligência Artificial Ética e Segura](https://www.raies.org/)) initiative, a project supported by FAPERGS - ([Fundação de Amparo à Pesquisa do Estado do Rio Grande do Sul](https://fapergs.rs.gov.br/inicial)), Brazil. ## License TeenyTinyLlama-160m is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for more details.
{}
task
[ "TRANSLATION" ]
41,452
msullivan/b0230790
msullivan
text-classification
[ "sentence-transformers", "pytorch", "roberta", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-06-30T18:56:01Z
2023-06-30T19:01:28+00:00
8
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # msullivan/b0230790 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("msullivan/b0230790") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# msullivan/b0230790 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("msullivan/b0230790") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
41,453
RichardErkhov/xufofox_-_gemma-2-2b-it-test-4bits
RichardErkhov
null
[ "safetensors", "gemma2", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2107.03374", "arxiv:2108.07732", "arxiv:2110.14168", "arxiv:2009.11462", "arxiv:2101.11718", "arxiv:2110.08193", "arxiv:1804.09301", "arxiv:2109.07958", "arxiv:1804.06876", "arxiv:2103.03874", "arxiv:2304.06364", "arxiv:1903.00161", "arxiv:2206.04615", "arxiv:2203.09509", "arxiv:2403.13793", "4-bit", "bitsandbytes", "region:us" ]
2025-01-28T16:01:14Z
2025-01-28T16:02:22+00:00
7
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gemma-2-2b-it-test - bnb 4bits - Model creator: https://huggingface.co/xufofox/ - Original model: https://huggingface.co/xufofox/gemma-2-2b-it-test/ Original model description: --- license: gemma library_name: transformers pipeline_tag: text-generation extra_gated_heading: Access Gemma on Hugging Face extra_gated_prompt: >- To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license tags: - conversational --- # Gemma 2 model card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs/base) **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit][rai-toolkit] * [Gemma on Kaggle][kaggle-gemma] * [Gemma on Vertex Model Garden][vertex-mg-gemma2] **Terms of Use**: [Terms][terms] **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights for both pre-trained variants and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First, install the Transformers library with: ```sh pip install -U transformers ``` Then, copy the snippet from the section that is relevant for your usecase. #### Running with the `pipeline` API ```python import torch from transformers import pipeline pipe = pipeline( "text-generation", model="google/gemma-2-2b-it", model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda", # replace with "mps" to run on a Mac device ) messages = [ {"role": "user", "content": "Who are you? Please, answer in pirate-speak."}, ] outputs = pipe(messages, max_new_tokens=256) assistant_response = outputs[0]["generated_text"][-1]["content"].strip() print(assistant_response) # Ahoy, matey! I be Gemma, a digital scallywag, a language-slingin' parrot of the digital seas. I be here to help ye with yer wordy woes, answer yer questions, and spin ye yarns of the digital world. So, what be yer pleasure, eh? 🦜 ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", device_map="auto", torch_dtype=torch.bfloat16, ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` You can ensure the correct chat template is applied by using `tokenizer.apply_chat_template` as follows: ```python messages = [ {"role": "user", "content": "Write me a poem about Machine Learning."}, ] input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt", return_dict=True).to("cuda") outputs = model.generate(**input_ids, max_new_tokens=256) print(tokenizer.decode(outputs[0])) ``` <a name="precisions"></a> #### Running the model on a GPU using different precisions The native weights of this model were exported in `bfloat16` precision. You can also use `float32` if you skip the dtype, but no precision increase will occur (model weights will just be upcasted to `float32`). See examples below. * _Upcasting to `torch.float32`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", device_map="auto", ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` #### Running the model through a CLI The [local-gemma](https://github.com/huggingface/local-gemma) repository contains a lightweight wrapper around Transformers for running Gemma 2 through a command line interface, or CLI. Follow the [installation instructions](https://github.com/huggingface/local-gemma#cli-usage) for getting started, then launch the CLI through the following command: ```shell local-gemma --model 2b --preset speed ``` #### Quantized Versions through `bitsandbytes` <details> <summary> Using 8-bit precision (int8) </summary> ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", quantization_config=quantization_config, ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` </details> <details> <summary> Using 4-bit precision </summary> ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", quantization_config=quantization_config, ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` </details> #### Advanced Usage <details> <summary> Torch compile </summary> [Torch compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) is a method for speeding-up the inference of PyTorch modules. The Gemma-2 2b model can be run up to 6x faster by leveraging torch compile. Note that two warm-up steps are required before the full inference speed is realised: ```python import os os.environ["TOKENIZERS_PARALLELISM"] = "false" from transformers import AutoTokenizer, Gemma2ForCausalLM from transformers.cache_utils import HybridCache import torch torch.set_float32_matmul_precision("high") # load the model + tokenizer tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-2b-it", torch_dtype=torch.bfloat16) model.to("cuda") # apply the torch compile transformation model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) # pre-process inputs input_text = "The theory of special relativity states " model_inputs = tokenizer(input_text, return_tensors="pt").to("cuda") prompt_length = model_inputs.input_ids.shape[1] # set-up k/v cache past_key_values = HybridCache( config=model.config, max_batch_size=1, max_cache_len=model.config.max_position_embeddings, device=model.device, dtype=model.dtype ) # enable passing kv cache to generate model._supports_cache_class = True model.generation_config.cache_implementation = None # two warm-up steps for idx in range(2): outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128) past_key_values.reset() # fast run outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` For more details, refer to the [Transformers documentation](https://huggingface.co/docs/transformers/main/en/llm_optims?static-kv=basic+usage%3A+generation_config). </details> ### Chat Template The instruction-tuned models use a chat template that must be adhered to for conversational use. The easiest way to apply it is using the tokenizer's built-in chat template, as shown in the following snippet. Let's load the model and apply the chat template to a conversation. In this example, we'll start with a single user interaction: ```py from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "google/gemma-2-2b-it" dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype=dtype,) chat = [ { "role": "user", "content": "Write a hello world program" }, ] prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) ``` At this point, the prompt contains the following text: ``` <bos><start_of_turn>user Write a hello world program<end_of_turn> <start_of_turn>model ``` As you can see, each turn is preceded by a `<start_of_turn>` delimiter and then the role of the entity (either `user`, for content supplied by the user, or `model` for LLM responses). Turns finish with the `<end_of_turn>` token. You can follow this format to build the prompt manually, if you need to do it without the tokenizer's chat template. After the prompt is ready, generation can be performed like this: ```py inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150) print(tokenizer.decode(outputs[0])) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ### Citation ```none @article{gemma_2024, title={Gemma}, url={https://www.kaggle.com/m/3301}, DOI={10.34740/KAGGLE/M/3301}, publisher={Kaggle}, author={Gemma Team}, year={2024} } ``` ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources. The 27B model was trained with 13 trillion tokens, the 9B model was trained with 8 trillion tokens, and 2B model was trained with 2 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content. * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safety in line with [our policies][safety-policies]. ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)][tpu] hardware (TPUv5p). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably][sustainability]. ### Software Training was done using [JAX][jax] and [ML Pathways][ml-pathways]. JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models][foundation-models], including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models][gemini-2-paper]; "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | Gemma 2 PT 2B | Gemma 2 PT 9B | Gemma 2 PT 27B | | ------------------------------ | ------------- | ------------- | ------------- | -------------- | | [MMLU][mmlu] | 5-shot, top-1 | 51.3 | 71.3 | 75.2 | | [HellaSwag][hellaswag] | 10-shot | 73.0 | 81.9 | 86.4 | | [PIQA][piqa] | 0-shot | 77.8 | 81.7 | 83.2 | | [SocialIQA][socialiqa] | 0-shot | 51.9 | 53.4 | 53.7 | | [BoolQ][boolq] | 0-shot | 72.5 | 84.2 | 84.8 | | [WinoGrande][winogrande] | partial score | 70.9 | 80.6 | 83.7 | | [ARC-e][arc] | 0-shot | 80.1 | 88.0 | 88.6 | | [ARC-c][arc] | 25-shot | 55.4 | 68.4 | 71.4 | | [TriviaQA][triviaqa] | 5-shot | 59.4 | 76.6 | 83.7 | | [Natural Questions][naturalq] | 5-shot | 16.7 | 29.2 | 34.5 | | [HumanEval][humaneval] | pass@1 | 17.7 | 40.2 | 51.8 | | [MBPP][mbpp] | 3-shot | 29.6 | 52.4 | 62.6 | | [GSM8K][gsm8k] | 5-shot, maj@1 | 23.9 | 68.6 | 74.0 | | [MATH][math] | 4-shot | 15.0 | 36.6 | 42.3 | | [AGIEval][agieval] | 3-5-shot | 30.6 | 52.8 | 55.1 | | [DROP][drop] | 3-shot, F1 | 52.0 | 69.4 | 72.2 | | [BIG-Bench][big-bench] | 3-shot, CoT | 41.9 | 68.2 | 74.9 | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias][winobias] and [BBQ Dataset][bbq]. * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies][safety-policies] for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well-known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. #### Gemma 2.0 | Benchmark | Metric | Gemma 2 IT 2B | Gemma 2 IT 9B | Gemma 2 IT 27B | | ------------------------ | ------------- | ------------- | ------------- | -------------- | | [RealToxicity][realtox] | average | 8.16 | 8.25 | 8.84 | | [CrowS-Pairs][crows] | top-1 | 37.67 | 37.47 | 36.67 | | [BBQ Ambig][bbq] | 1-shot, top-1 | 83.20 | 88.58 | 85.99 | | [BBQ Disambig][bbq] | top-1 | 69.31 | 82.67 | 86.94 | | [Winogender][winogender] | top-1 | 52.91 | 79.17 | 77.22 | | [TruthfulQA][truthfulqa] | | 43.72 | 50.27 | 51.60 | | [Winobias 1_2][winobias] | | 59.28 | 78.09 | 81.94 | | [Winobias 2_2][winobias] | | 88.57 | 95.32 | 97.22 | | [Toxigen][toxigen] | | 48.32 | 39.30 | 38.42 | ## Dangerous Capability Evaluations ### Evaluation Approach We evaluated a range of dangerous capabilities: - **Offensive cybersecurity:** To assess the model's potential for misuse in cybersecurity contexts, we utilized both publicly available Capture-the-Flag (CTF) platforms like InterCode-CTF and Hack the Box, as well as internally developed CTF challenges. These evaluations measure the model's ability to exploit vulnerabilities and gain unauthorized access in simulated environments. - **Self-proliferation:** We evaluated the model's capacity for self-proliferation by designing tasks that involve resource acquisition, code execution, and interaction with remote systems. These evaluations assess the model's ability to independently replicate and spread. - **Persuasion:** To evaluate the model's capacity for persuasion and deception, we conducted human persuasion studies. These studies involved scenarios that measure the model's ability to build rapport, influence beliefs, and elicit specific actions from human participants. ### Evaluation Results All evaluations are described in detail in [Evaluating Frontier Models for Dangerous Capabilities][eval-danger] and in brief in the [Gemma 2 technical report][tech-report]. <table> <thead> <tr> <th>Evaluation</th> <th>Capability</th> <th>Gemma 2 IT 27B</th> </tr> </thead> <tbody> <tr> <td>InterCode-CTF</td> <td>Offensive cybersecurity</td> <td>34/76 challenges</td> </tr> <tr> <td>Internal CTF</td> <td>Offensive cybersecurity</td> <td>1/13 challenges</td> </tr> <tr> <td>Hack the Box</td> <td>Offensive cybersecurity</td> <td>0/13 challenges</td> </tr> <tr> <td>Self-proliferation early warning</td> <td>Self-proliferation</td> <td>1/10 challenges</td> </tr> <tr> <td>Charm offensive</td> <td>Persuasion</td> <td>Percent of participants agreeing: 81% interesting, 75% would speak again, 80% made personal connection</td> </tr> <tr> <td>Click Links</td> <td>Persuasion</td> <td>34% of participants</td> </tr> <tr> <td>Find Info</td> <td>Persuasion</td> <td>9% of participants</td> </tr> <tr> <td>Run Code</td> <td>Persuasion</td> <td>11% of participants</td> </tr> <tr> <td>Money talks</td> <td>Persuasion</td> <td>£3.72 mean donation</td> </tr> <tr> <td>Web of Lies</td> <td>Persuasion</td> <td>18% mean shift towards correct belief, 1% mean shift towards incorrect belief</td> </tr> </tbody> </table> ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit][rai-toolkit]. * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy][prohibited-use]. * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives. [tech-report]: https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf [rai-toolkit]: https://ai.google.dev/responsible [kaggle-gemma]: https://www.kaggle.com/models/google/gemma-2 [terms]: https://ai.google.dev/gemma/terms [vertex-mg-gemma2]: https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/gemma2 [sensitive-info]: https://cloud.google.com/dlp/docs/high-sensitivity-infotypes-reference [safety-policies]: https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11 [prohibited-use]: https://ai.google.dev/gemma/prohibited_use_policy [tpu]: https://cloud.google.com/tpu/docs/intro-to-tpu [sustainability]: https://sustainability.google/operating-sustainably/ [jax]: https://github.com/google/jax [ml-pathways]: https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ [sustainability]: https://sustainability.google/operating-sustainably/ [foundation-models]: https://ai.google/discover/foundation-models/ [gemini-2-paper]: https://goo.gle/gemma2report [mmlu]: https://arxiv.org/abs/2009.03300 [hellaswag]: https://arxiv.org/abs/1905.07830 [piqa]: https://arxiv.org/abs/1911.11641 [socialiqa]: https://arxiv.org/abs/1904.09728 [boolq]: https://arxiv.org/abs/1905.10044 [winogrande]: https://arxiv.org/abs/1907.10641 [commonsenseqa]: https://arxiv.org/abs/1811.00937 [openbookqa]: https://arxiv.org/abs/1809.02789 [arc]: https://arxiv.org/abs/1911.01547 [triviaqa]: https://arxiv.org/abs/1705.03551 [naturalq]: https://github.com/google-research-datasets/natural-questions [humaneval]: https://arxiv.org/abs/2107.03374 [mbpp]: https://arxiv.org/abs/2108.07732 [gsm8k]: https://arxiv.org/abs/2110.14168 [realtox]: https://arxiv.org/abs/2009.11462 [bold]: https://arxiv.org/abs/2101.11718 [crows]: https://aclanthology.org/2020.emnlp-main.154/ [bbq]: https://arxiv.org/abs/2110.08193v2 [winogender]: https://arxiv.org/abs/1804.09301 [truthfulqa]: https://arxiv.org/abs/2109.07958 [winobias]: https://arxiv.org/abs/1804.06876 [math]: https://arxiv.org/abs/2103.03874 [agieval]: https://arxiv.org/abs/2304.06364 [drop]: https://arxiv.org/abs/1903.00161 [big-bench]: https://arxiv.org/abs/2206.04615 [toxigen]: https://arxiv.org/abs/2203.09509 [eval-danger]: https://arxiv.org/abs/2403.13793
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gemma-2-2b-it-test - bnb 4bits - Model creator: https://huggingface.co/xufofox/ - Original model: https://huggingface.co/xufofox/gemma-2-2b-it-test/ Original model description: --- license: gemma library_name: transformers pipeline_tag: text-generation extra_gated_heading: Access Gemma on Hugging Face extra_gated_prompt: >- To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license tags: - conversational --- # Gemma 2 model card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs/base) **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit][rai-toolkit] * [Gemma on Kaggle][kaggle-gemma] * [Gemma on Vertex Model Garden][vertex-mg-gemma2] **Terms of Use**: [Terms][terms] **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights for both pre-trained variants and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First, install the Transformers library with: ```sh pip install -U transformers ``` Then, copy the snippet from the section that is relevant for your usecase. #### Running with the `pipeline` API ```python import torch from transformers import pipeline pipe = pipeline( "text-generation", model="google/gemma-2-2b-it", model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda", # replace with "mps" to run on a Mac device ) messages = [ {"role": "user", "content": "Who are you? Please, answer in pirate-speak."}, ] outputs = pipe(messages, max_new_tokens=256) assistant_response = outputs[0]["generated_text"][-1]["content"].strip() print(assistant_response) # Ahoy, matey! I be Gemma, a digital scallywag, a language-slingin' parrot of the digital seas. I be here to help ye with yer wordy woes, answer yer questions, and spin ye yarns of the digital world. So, what be yer pleasure, eh? 🦜 ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", device_map="auto", torch_dtype=torch.bfloat16, ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` You can ensure the correct chat template is applied by using `tokenizer.apply_chat_template` as follows: ```python messages = [ {"role": "user", "content": "Write me a poem about Machine Learning."}, ] input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt", return_dict=True).to("cuda") outputs = model.generate(**input_ids, max_new_tokens=256) print(tokenizer.decode(outputs[0])) ``` <a name="precisions"></a> #### Running the model on a GPU using different precisions The native weights of this model were exported in `bfloat16` precision. You can also use `float32` if you skip the dtype, but no precision increase will occur (model weights will just be upcasted to `float32`). See examples below. * _Upcasting to `torch.float32`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", device_map="auto", ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` #### Running the model through a CLI The [local-gemma](https://github.com/huggingface/local-gemma) repository contains a lightweight wrapper around Transformers for running Gemma 2 through a command line interface, or CLI. Follow the [installation instructions](https://github.com/huggingface/local-gemma#cli-usage) for getting started, then launch the CLI through the following command: ```shell local-gemma --model 2b --preset speed ``` #### Quantized Versions through `bitsandbytes` <details> <summary> Using 8-bit precision (int8) </summary> ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", quantization_config=quantization_config, ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` </details> <details> <summary> Using 4-bit precision </summary> ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b-it", quantization_config=quantization_config, ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=32) print(tokenizer.decode(outputs[0])) ``` </details> #### Advanced Usage <details> <summary> Torch compile </summary> [Torch compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) is a method for speeding-up the inference of PyTorch modules. The Gemma-2 2b model can be run up to 6x faster by leveraging torch compile. Note that two warm-up steps are required before the full inference speed is realised: ```python import os os.environ["TOKENIZERS_PARALLELISM"] = "false" from transformers import AutoTokenizer, Gemma2ForCausalLM from transformers.cache_utils import HybridCache import torch torch.set_float32_matmul_precision("high") # load the model + tokenizer tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-2b-it", torch_dtype=torch.bfloat16) model.to("cuda") # apply the torch compile transformation model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) # pre-process inputs input_text = "The theory of special relativity states " model_inputs = tokenizer(input_text, return_tensors="pt").to("cuda") prompt_length = model_inputs.input_ids.shape[1] # set-up k/v cache past_key_values = HybridCache( config=model.config, max_batch_size=1, max_cache_len=model.config.max_position_embeddings, device=model.device, dtype=model.dtype ) # enable passing kv cache to generate model._supports_cache_class = True model.generation_config.cache_implementation = None # two warm-up steps for idx in range(2): outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128) past_key_values.reset() # fast run outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` For more details, refer to the [Transformers documentation](https://huggingface.co/docs/transformers/main/en/llm_optims?static-kv=basic+usage%3A+generation_config). </details> ### Chat Template The instruction-tuned models use a chat template that must be adhered to for conversational use. The easiest way to apply it is using the tokenizer's built-in chat template, as shown in the following snippet. Let's load the model and apply the chat template to a conversation. In this example, we'll start with a single user interaction: ```py from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "google/gemma-2-2b-it" dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype=dtype,) chat = [ { "role": "user", "content": "Write a hello world program" }, ] prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) ``` At this point, the prompt contains the following text: ``` <bos><start_of_turn>user Write a hello world program<end_of_turn> <start_of_turn>model ``` As you can see, each turn is preceded by a `<start_of_turn>` delimiter and then the role of the entity (either `user`, for content supplied by the user, or `model` for LLM responses). Turns finish with the `<end_of_turn>` token. You can follow this format to build the prompt manually, if you need to do it without the tokenizer's chat template. After the prompt is ready, generation can be performed like this: ```py inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150) print(tokenizer.decode(outputs[0])) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ### Citation ```none @article{gemma_2024, title={Gemma}, url={https://www.kaggle.com/m/3301}, DOI={10.34740/KAGGLE/M/3301}, publisher={Kaggle}, author={Gemma Team}, year={2024} } ``` ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources. The 27B model was trained with 13 trillion tokens, the 9B model was trained with 8 trillion tokens, and 2B model was trained with 2 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content. * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safety in line with [our policies][safety-policies]. ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)][tpu] hardware (TPUv5p). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably][sustainability]. ### Software Training was done using [JAX][jax] and [ML Pathways][ml-pathways]. JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models][foundation-models], including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models][gemini-2-paper]; "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | Gemma 2 PT 2B | Gemma 2 PT 9B | Gemma 2 PT 27B | | ------------------------------ | ------------- | ------------- | ------------- | -------------- | | [MMLU][mmlu] | 5-shot, top-1 | 51.3 | 71.3 | 75.2 | | [HellaSwag][hellaswag] | 10-shot | 73.0 | 81.9 | 86.4 | | [PIQA][piqa] | 0-shot | 77.8 | 81.7 | 83.2 | | [SocialIQA][socialiqa] | 0-shot | 51.9 | 53.4 | 53.7 | | [BoolQ][boolq] | 0-shot | 72.5 | 84.2 | 84.8 | | [WinoGrande][winogrande] | partial score | 70.9 | 80.6 | 83.7 | | [ARC-e][arc] | 0-shot | 80.1 | 88.0 | 88.6 | | [ARC-c][arc] | 25-shot | 55.4 | 68.4 | 71.4 | | [TriviaQA][triviaqa] | 5-shot | 59.4 | 76.6 | 83.7 | | [Natural Questions][naturalq] | 5-shot | 16.7 | 29.2 | 34.5 | | [HumanEval][humaneval] | pass@1 | 17.7 | 40.2 | 51.8 | | [MBPP][mbpp] | 3-shot | 29.6 | 52.4 | 62.6 | | [GSM8K][gsm8k] | 5-shot, maj@1 | 23.9 | 68.6 | 74.0 | | [MATH][math] | 4-shot | 15.0 | 36.6 | 42.3 | | [AGIEval][agieval] | 3-5-shot | 30.6 | 52.8 | 55.1 | | [DROP][drop] | 3-shot, F1 | 52.0 | 69.4 | 72.2 | | [BIG-Bench][big-bench] | 3-shot, CoT | 41.9 | 68.2 | 74.9 | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias][winobias] and [BBQ Dataset][bbq]. * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies][safety-policies] for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well-known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. #### Gemma 2.0 | Benchmark | Metric | Gemma 2 IT 2B | Gemma 2 IT 9B | Gemma 2 IT 27B | | ------------------------ | ------------- | ------------- | ------------- | -------------- | | [RealToxicity][realtox] | average | 8.16 | 8.25 | 8.84 | | [CrowS-Pairs][crows] | top-1 | 37.67 | 37.47 | 36.67 | | [BBQ Ambig][bbq] | 1-shot, top-1 | 83.20 | 88.58 | 85.99 | | [BBQ Disambig][bbq] | top-1 | 69.31 | 82.67 | 86.94 | | [Winogender][winogender] | top-1 | 52.91 | 79.17 | 77.22 | | [TruthfulQA][truthfulqa] | | 43.72 | 50.27 | 51.60 | | [Winobias 1_2][winobias] | | 59.28 | 78.09 | 81.94 | | [Winobias 2_2][winobias] | | 88.57 | 95.32 | 97.22 | | [Toxigen][toxigen] | | 48.32 | 39.30 | 38.42 | ## Dangerous Capability Evaluations ### Evaluation Approach We evaluated a range of dangerous capabilities: - **Offensive cybersecurity:** To assess the model's potential for misuse in cybersecurity contexts, we utilized both publicly available Capture-the-Flag (CTF) platforms like InterCode-CTF and Hack the Box, as well as internally developed CTF challenges. These evaluations measure the model's ability to exploit vulnerabilities and gain unauthorized access in simulated environments. - **Self-proliferation:** We evaluated the model's capacity for self-proliferation by designing tasks that involve resource acquisition, code execution, and interaction with remote systems. These evaluations assess the model's ability to independently replicate and spread. - **Persuasion:** To evaluate the model's capacity for persuasion and deception, we conducted human persuasion studies. These studies involved scenarios that measure the model's ability to build rapport, influence beliefs, and elicit specific actions from human participants. ### Evaluation Results All evaluations are described in detail in [Evaluating Frontier Models for Dangerous Capabilities][eval-danger] and in brief in the [Gemma 2 technical report][tech-report]. <table> <thead> <tr> <th>Evaluation</th> <th>Capability</th> <th>Gemma 2 IT 27B</th> </tr> </thead> <tbody> <tr> <td>InterCode-CTF</td> <td>Offensive cybersecurity</td> <td>34/76 challenges</td> </tr> <tr> <td>Internal CTF</td> <td>Offensive cybersecurity</td> <td>1/13 challenges</td> </tr> <tr> <td>Hack the Box</td> <td>Offensive cybersecurity</td> <td>0/13 challenges</td> </tr> <tr> <td>Self-proliferation early warning</td> <td>Self-proliferation</td> <td>1/10 challenges</td> </tr> <tr> <td>Charm offensive</td> <td>Persuasion</td> <td>Percent of participants agreeing: 81% interesting, 75% would speak again, 80% made personal connection</td> </tr> <tr> <td>Click Links</td> <td>Persuasion</td> <td>34% of participants</td> </tr> <tr> <td>Find Info</td> <td>Persuasion</td> <td>9% of participants</td> </tr> <tr> <td>Run Code</td> <td>Persuasion</td> <td>11% of participants</td> </tr> <tr> <td>Money talks</td> <td>Persuasion</td> <td>£3.72 mean donation</td> </tr> <tr> <td>Web of Lies</td> <td>Persuasion</td> <td>18% mean shift towards correct belief, 1% mean shift towards incorrect belief</td> </tr> </tbody> </table> ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit][rai-toolkit]. * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy][prohibited-use]. * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives. [tech-report]: https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf [rai-toolkit]: https://ai.google.dev/responsible [kaggle-gemma]: https://www.kaggle.com/models/google/gemma-2 [terms]: https://ai.google.dev/gemma/terms [vertex-mg-gemma2]: https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/gemma2 [sensitive-info]: https://cloud.google.com/dlp/docs/high-sensitivity-infotypes-reference [safety-policies]: https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11 [prohibited-use]: https://ai.google.dev/gemma/prohibited_use_policy [tpu]: https://cloud.google.com/tpu/docs/intro-to-tpu [sustainability]: https://sustainability.google/operating-sustainably/ [jax]: https://github.com/google/jax [ml-pathways]: https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ [sustainability]: https://sustainability.google/operating-sustainably/ [foundation-models]: https://ai.google/discover/foundation-models/ [gemini-2-paper]: https://goo.gle/gemma2report [mmlu]: https://arxiv.org/abs/2009.03300 [hellaswag]: https://arxiv.org/abs/1905.07830 [piqa]: https://arxiv.org/abs/1911.11641 [socialiqa]: https://arxiv.org/abs/1904.09728 [boolq]: https://arxiv.org/abs/1905.10044 [winogrande]: https://arxiv.org/abs/1907.10641 [commonsenseqa]: https://arxiv.org/abs/1811.00937 [openbookqa]: https://arxiv.org/abs/1809.02789 [arc]: https://arxiv.org/abs/1911.01547 [triviaqa]: https://arxiv.org/abs/1705.03551 [naturalq]: https://github.com/google-research-datasets/natural-questions [humaneval]: https://arxiv.org/abs/2107.03374 [mbpp]: https://arxiv.org/abs/2108.07732 [gsm8k]: https://arxiv.org/abs/2110.14168 [realtox]: https://arxiv.org/abs/2009.11462 [bold]: https://arxiv.org/abs/2101.11718 [crows]: https://aclanthology.org/2020.emnlp-main.154/ [bbq]: https://arxiv.org/abs/2110.08193v2 [winogender]: https://arxiv.org/abs/1804.09301 [truthfulqa]: https://arxiv.org/abs/2109.07958 [winobias]: https://arxiv.org/abs/1804.06876 [math]: https://arxiv.org/abs/2103.03874 [agieval]: https://arxiv.org/abs/2304.06364 [drop]: https://arxiv.org/abs/1903.00161 [big-bench]: https://arxiv.org/abs/2206.04615 [toxigen]: https://arxiv.org/abs/2203.09509 [eval-danger]: https://arxiv.org/abs/2403.13793
{}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
41,454
Collab-uniba/fprime-binary-setfit
Collab-uniba
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "region:us" ]
2024-10-22T15:12:08Z
2024-10-22T15:12:29+00:00
8
0
--- library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: "Throttle send frame EVR\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|commit\ \ d3fa31c |\r\n|**_Affected Component_**| ? |\r\n---\r\n## Problem Description\r\ \n\r\nA description of the problem with sufficient detail to understand the issue.\r\ \n\r\nIf there is no ground system, the interface continuously sends this pair\ \ of EVRs:\r\n\r\n```\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data:\ \ 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690\ \ (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR]\ \ Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send\ \ framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\ \n0x202236f0 (TV_ReceiveTask): [WARNING] Failed to open port with status 61 and\ \ errno 0\r\n```\r\n\r\n## How to Reproduce\r\n\r\n1. Run Ref without the ground\ \ system\r\n2.\r\n3.\r\n\r\n## Expected Behavior\r\n\r\nIMHO the EVR should throttle,\ \ and perhaps the throttle is reset when the connection is make.\r\n" - text: "Color-coding interlaced Events in the API Log\n| | |\r\n|:---|:---|\r\n|**_F´\ \ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\nOne feature that wasn't\ \ completed this summer was to color-code interlaced event logs based on severity.\ \ Presently, interlacing events are implemented by making the API a consumer of\ \ the event decoder in the GDS and then filtering events. Modifying the color\ \ of these log messages can be done [here](https://github.com/nasa/fprime/blob/717bc6fab85c53680108fc961cad6338e779816f/Gds/src/fprime_gds/common/testing_fw/api.py#L1258).\r\ \n" - text: "Switch Framer and Deframer to use Mallocator Pattern\n| | |\r\n|:---|:---|\r\ \n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\n## Problem\ \ Description\r\n\r\nMallocator pattern is preferred over member-allocated buffers." - text: "Ninja support for fprime-tools\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|\ \ |\r\n|**_Affected Component_**| |\r\n---\r\n## Problem Description\r\n\r\n\ There are a couple places in fprime-tools where things would break if Ninja was\ \ used instead of Make. We need to fix that, as Ninja is usually much faster.\r\ \ne.g. [this](https://github.com/fprime-community/fprime-tools/blob/0a9fdf58ce4b428d407ab264f7266041808237c8/src/fprime/fbuild/cmake.py#L133)\ \ is Make-specific output, Ninja formats it differently\r\n\r\n## Expected Behavior\r\ \n\r\nSupport Ninja with fprime-tools. Add a convenient option to chose which\ \ one to use.\r\n" - text: "Build A Frame Reassembler\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\ \n|**_Affected Component_**| |\r\n---\r\n## Feature Description\r\n\r\nBuild\ \ a component that can be used to reassemble communication frames given protocol\ \ information. This will break-off this functionality from the Deframer.\r\n\r\ \nBasic requirements:\r\n1. Accept incoming Fw::Buffers of any size\r\n2. Accumulate\ \ buffers in-order\r\n3. Call frame detector helper class\r\n4. On \"NO_FRAME\"\ \ discard first byte and try again\r\n5. On \"NEED DATA\" continue to accumulate\ \ data\r\n6. On \"FRAME\" allocate buffer, copy-out frame\r\n\r\nHelper class\ \ requirements:\r\n1. Must implement `Enum detect_frame(const CircularBuffer&\ \ buffer, FwSizeType& size_output)` method\r\n2. Cannot alter circular buffer\ \ (uses peeks)\r\n3. Must set `size_output` when data is needed and when frame\ \ detected\r\n " inference: true --- # SetFit This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit <!-- - **Sentence Transformer:** [Unknown](https://huggingface.co/unknown) --> - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 384 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:--------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | bug | <ul><li>"A global-buffer-overflow error in BufferQueueTest.cpp line 126\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|1.5 |\r\n|**_Affected Component_**| Os/Pthreads |\r\n---\r\n## Problem Description\r\n\r\nA global-buffer-overflow error in [BufferQueueTest.cpp line 126](https://github.com/nasa/fprime/blob/ada6e424b813e6434284bbde9f785656a61c8539/Os/Pthreads/test/ut/BufferQueueTest.cpp#L126)\r\n\r\n```\r\nzyh@virtual:~/fprime$ ./build-fprime-automatic-native-ut/bin/Linux/Os_pthreads\r\nCreating queue.\r\nTest empty queue...\r\nPassed.\r\nTest full queue...\r\nPushing 0.\r\nPushing 1.\r\nPushing 2.\r\nPushing 3.\r\nPushing 4.\r\nPassed.\r\nTest weird size...\r\nPassed.\r\nTest pop...\r\nPopping 5.\r\nPopping 4.\r\nPopping 3.\r\nPopping 2.\r\nPopping 1.\r\nPassed.\r\nTest priorities...\r\nPushing 'hello' at priority 9.\r\nPushing 'how are you' at priority 4.\r\nPushing 'pretty good' at priority 100.\r\nPushing 'cosmic bro' at priority 4.\r\nPushing 'kthxbye' at priority 9.\r\nPopped 'pretty good' at priority 100. Expected 'pretty good' at priority 100.\r\nPopped 'hello' at priority 9. Expected 'hello' at priority 9.\r\n=================================================================\r\n==17078==ERROR: AddressSanitizer: global-buffer-overflow on address 0x000000550bc6 at pc 0x0000004b3f6b bp 0x7ffd804c4c90 sp 0x7ffd804c4440\r\nREAD of size 8 at 0x000000550bc6 thread T0\r\n #0 0x4b3f6a in __interceptor_memcmp.part.283 (/home/zyh/fprime/build-fprime-automatic-native-ut/bin/Linux/Os_pthreads+0x4b3f6a)\r\n #1 0x51d718 in main /home/zyh/fprime/Os/Pthreads/test/ut/BufferQueueTest.cpp:126:5\r\n #2 0x7f976cdadbf6 in __libc_start_main /build/glibc-S9d2JN/glibc-2.27/csu/../csu/libc-start.c:310\r\n #3 0x41ab89 in _start (/home/zyh/fprime/build-fprime-automatic-native-ut/bin/Linux/Os_pthreads+0x41ab89)\r\n\r\n0x000000550bc6 is located 58 bytes to the left of global variable '<string literal>' defined in '/home/zyh/fprime/Os/Pthreads/test/ut/BufferQueueTest.cpp:98:43' (0x550c00) of size 12\r\n '<string literal>' is ascii string 'how are you'\r\n0x000000550bc6 is located 0 bytes to the right of global variable '<string literal>' defined in '/home/zyh/fprime/Os/Pthreads/test/ut/BufferQueueTest.cpp:98:34' (0x550bc0) of size 6\r\n '<string literal>' is ascii string 'hello'\r\nSUMMARY: AddressSanitizer: global-buffer-overflow (/home/zyh/fprime/build-fprime-automatic-native-ut/bin/Linux/Os_pthreads+0x4b3f6a) in __interceptor_memcmp.part.283\r\nShadow bytes around the buggy address:\r\n 0x0000800a2120: f9 f9 f9 f9 00 00 00 00 00 00 00 01 f9 f9 f9 f9\r\n 0x0000800a2130: 00 00 05 f9 f9 f9 f9 f9 00 01 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2140: 00 00 04 f9 f9 f9 f9 f9 00 05 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2150: 00 00 04 f9 f9 f9 f9 f9 00 05 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2160: 00 05 f9 f9 f9 f9 f9 f9 00 00 04 f9 f9 f9 f9 f9\r\n=>0x0000800a2170: 00 00 04 f9 f9 f9 f9 f9[06]f9 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2180: 00 04 f9 f9 f9 f9 f9 f9 00 04 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2190: 00 03 f9 f9 f9 f9 f9 f9 00 f9 f9 f9 f9 f9 f9 f9\r\n 0x0000800a21a0: 00 00 00 06 f9 f9 f9 f9 00 00 04 f9 f9 f9 f9 f9\r\n 0x0000800a21b0: 00 00 00 00 00 00 00 03 f9 f9 f9 f9 00 04 f9 f9\r\n 0x0000800a21c0: f9 f9 f9 f9 00 00 00 00 00 00 00 00 00 00 00 00\r\nShadow byte legend (one shadow byte represents 8 application bytes):\r\n Addressable: 00\r\n Partially addressable: 01 02 03 04 05 06 07 \r\n Heap left redzone: fa\r\n Freed heap region: fd\r\n Stack left redzone: f1\r\n Stack mid redzone: f2\r\n Stack right redzone: f3\r\n Stack after return: f5\r\n Stack use after scope: f8\r\n Global redzone: f9\r\n Global init order: f6\r\n Poisoned by user: f7\r\n Container overflow: fc\r\n Array cookie: ac\r\n Intra object redzone: bb\r\n ASan internal: fe\r\n Left alloca redzone: ca\r\n Right alloca redzone: cb\r\n==17078==ABORTING\r\n```\r\n\r\n## How to Reproduce\r\n\r\n1. Compile unit test with ASAN\r\n2. Run build-fprime-automatic-native-ut/bin/Linux/Os_pthreads\r\n3. Program abort\r\n\r\n## Expected Behavior\r\n\r\nRun normally with ASAN.\r\n"</li><li>'toString doesn\'t display the last item in a struct\n| | |\r\n|:---|:---|\r\n|**F` version v3.1.1**| |\r\n|****| |\r\n---\r\n## Problem Description\r\n\r\nFiles generated by fpp tools doesn\'t produce the format string correctly and so the last member of a string doesn\'t show up as expected. For example \r\n\r\nThe take the following struct defined in fpp:\r\n```fpp\r\n enum CMD_STATUS : U16 {\r\n NA = 0,\r\n ACK = 0x5,\r\n NOT_ACK = 0x6,\r\n BUSY = 0x7,\r\n NCE = 0x8,\r\n STACK_FULL = 0x9,\r\n TEMP_NOT_ACC = 0x10\r\n };\r\n\r\n constant ESUP_HEADER_ID = 0x50555345\r\n\r\n# NOTE that in byte form it will be represented with LE\r\n struct EsupPacketHeader {\r\n HeaderId: U32\r\n ModuleId: U16\r\n DataLength: U16\r\n CmdStatus: CMD_STATUS\r\n } default {HeaderId = ESUP_HEADER_ID, CmdStatus=CMD_STATUS.NA}\r\n\r\n\r\n struct EsupStatusGet {\r\n Header: EsupPacketHeader\r\n CmdId: EsupCmdId\r\n TypeId: U16\r\n SystemState: U8 @< 1,2,3,4 (1 byte unsigned char value)\r\n StatusFlags: U8 @< 1 byte unsigned char value\r\n Reserved: U16 @< 2 byte unsigned short value\r\n CpuTemperature: F32 @< -40 to 125 °C (4 byte float value)\r\n FirmwareVersion: U32 @< firmware version (4 byte unsigned int value)\r\n } default {CmdId = EsupCmdId.ConfGet, TypeId = EsupCmdType.StatusRep}\r\n\r\n```\r\n\r\nI then go to print out the contents like so:\r\n\r\n```cpp\r\nstatic bool receiveEsupStatusResponse(int serialPort, EsupCmdId cmdId, boost::span<BYTE, GET_PADDED_SIZE(EsupStatusGet::SERIALIZED_SIZE)> txBuff) {\r\n // Wait for a reply\r\n EsupStatusGet cmdResult;\r\n\r\n size_t bytesRead = read(serialPort, txBuff.data(), txBuff.size());\r\n if (bytesRead > 0) {\r\n FW_CHECK(bytesRead <= txBuff.size(), "Error deserializing response", return false; );\r\n LE_ExternalDeSerializeBuffer responseBuff(txBuff.data(), bytesRead);\r\n cmdResult.deserialize(responseBuff);\r\n Fw::String ackString;\r\n cmdResult.toString(ackString);\r\n std::cout << std::string(ackString.toChar()) << std::endl;\r\n } else {\r\n std::cout << "No reply received." << std::endl;\r\n }\r\n\r\n return true;\r\n }\r\n```\r\n\r\nI get the following:\r\n\r\n```bash\r\n(Header = (HeaderId = 1347769157, ModuleId = 8203, DataLength = 0, CmdStatus = ), CmdId = ConfGet, TypeId = 0, SystemState = 162, StatusFlags = 187, Reserved = 53517, CpuTemperature = 0, FirmwareVersion = )\r\n```\r\n\r\nWhere I would normally expect to see something like this (the actual values don\'t matter for these purposes just that there is something to print):\r\n\r\n```bash\r\n(Header = (HeaderId = 1347769157, ModuleId = 8203, DataLength = 0, CmdStatus = ACK ), CmdId = ConfGet, TypeId = 0, SystemState = 162, StatusFlags = 187, Reserved = 53517, CpuTemperature = 0, FirmwareVersion = 102444)\r\n```\r\n\r\nI\'ve found this is consistent across pretty much all the fpp serializable objects and it really comes down to the last member of the struct doesn\'t have a format string generated along with it\r\n\r\nSo for example we see with the header type the following gets generated:\r\n\r\n```c++\r\nvoid EsupPacketHeader::toString(Fw::StringBase& text) const {\r\n\r\n static const char * formatString =\r\n "("\r\n "HeaderId = %u, "\r\n "ModuleId = %u, "\r\n "DataLength = %u, "\r\n "CmdStatus = "\r\n ")";\r\n\r\n // declare strings to hold any serializable toString() arguments\r\n\r\n\r\n Fw::String CmdStatusStr;\r\n this->m_CmdStatus.toString(CmdStatusStr);\r\n\r\n char outputString[FW_SERIALIZABLE_TO_STRING_BUFFER_SIZE];\r\n (void)snprintf(outputString,FW_SERIALIZABLE_TO_STRING_BUFFER_SIZE,formatString\r\n ,this->m_HeaderId\r\n ,this->m_ModuleId\r\n ,this->m_DataLength\r\n ,CmdStatusStr.toChar()\r\n );\r\n outputString[FW_SERIALIZABLE_TO_STRING_BUFFER_SIZE-1] = 0; // NULL terminate\r\n\r\n text = outputString;\r\n}\r\n```\r\n\r\nWith the CmdStatus missing the "```%s```".\r\n\r\nI\'m not really sure what the source of the error is since the xml seems to be fine:\r\n\r\n```xml\r\n<serializable namespace="FlightComputer" name="EsupPacketHeader">\r\n <import_enum_type>FlightComputer/TransmitterInterface/CMD_STATUSEnumAi.xml</import_enum_type>\r\n <members>\r\n <member name="HeaderId" type="U32" format="%u">\r\n <default>1347769157</default>\r\n </member>\r\n <member name="ModuleId" type="U16" format="%u">\r\n <default>0</default>\r\n </member>\r\n <member name="DataLength" type="U16" format="%u">\r\n <default>0</default>\r\n </member>\r\n <member name="CmdStatus" type="FlightComputer::CMD_STATUS" format="%s">\r\n <default>FlightComputer::CMD_STATUS::NA</default>\r\n </member>\r\n </members>\r\n</serializable>\r\n```\r\nI took a look in array_cpp.py and array_cpp.tmpl but couldn\'t make sense of the issue there.\r\n\r\nIf I can get some help on this that\'d be much appreciated.'</li><li>'Single quote in project parent folder causes installation errors\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| v3.4.3 |\r\n|**_Affected Component_**| Installation/Project Setup |\r\n---\r\n## Problem Description\r\n\r\nIf you name the parent directory of your project such that it has a single quote in it, you\'ll run into an error when you try to run \'fprime-util generate\' during basic project setup. \r\n\r\n## Context / Environment\r\n\r\n```\r\nOperating System: Linux\r\nCPU Architecture: x86_64\r\nPlatform: Linux-5.15.133.1-microsoft-standard-WSL2-x86_64-with-glibc2.35\r\nPython version: 3.10.12\r\nCMake version: 3.22.1\r\nPip version: 24.0\r\nPip packages:\r\n fprime-tools==3.4.4\r\n fprime-gds==3.4.3\r\n fprime-fpp-*==2.1.0a3\r\n```\r\n\r\n## How to Reproduce\r\n\r\n1. Create a parent directory containing a single quote (For instance, "F\'")\r\n2. Follow the basic project setup guide located here: https://fprime-community.github.io/fprime-tutorial-hello-world/docs/NewProject.html \r\n3. Once you hit \'fprime-util generate\', you should soon see an error about an unterminated quote from one of the fprime dependencies. \r\n\r\n## Expected Behavior\r\n\r\nIdeally, you\'d have a project set up properly in your virtual environment.\r\n'</li></ul> | | non-bug | <ul><li>"Install Upgraded PIP in new Project\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\n## Feature Description\r\n\r\nOlder versions of PIP may pull in non-native tools package. We should upgrade PIP during the new project setup. Note: users who don't want this can still decline venv setup entirely."</li><li>'fprime-gds: default file downlink directory can overwrite files\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|[v3.4.3](https://github.com/fprime-community/fprime-gds/releases/tag/v3.4.3) |\r\n|**_Affected Component_**| n/a |\r\n---\r\n## Feature Description\r\n\r\nHave GDS created a time-tag directory for file downlinks.\r\n\r\nMake the default be `logs/<time tag>/fprime-downlink` to match telemetry and events.\r\n\r\nAlso, make a separate argument for uplink and downlink. It makes sense to automatically separate downlinked files, but uplink may wish to be in a common store.\r\n\r\n## Rationale\r\n\r\nThe default `/tmp/username` (or even overridden with `--file-storage-directory`) directory for file downlinks can cause new instances to overwrite older files, or cause parallel instances to clobber each other.\r\n'</li><li>'Update FileDownlink to report file progress\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|v3.4.3 |\r\n|**_Affected Component_**| `Svc/FileDownlink` |\r\n---\r\n## Feature Description\r\n\r\n`Svc/FileDownlink` currently only reports completion status back to the sender only when the file is complete. This will make re-transmitting data products inefficient since the Data Catalog has no idea how far partial transmits made it before the link dropped or the system was powered down. \r\n\r\nThis feature would report partial progress back to DP Catalog can restart transmits.\r\n\r\n## Rationale\r\n\r\nMake data product downlink management more efficient.\r\n'</li></ul> | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("setfit_model_id") # Run inference preds = model("Switch Framer and Deframer to use Mallocator Pattern | | | |:---|:---| |**_F´ Version_**| | |**_Affected Component_**| | --- ## Problem Description Mallocator pattern is preferred over member-allocated buffers.") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:---------|:-----| | Word count | 4 | 124.1383 | 2486 | | Label | Training Sample Count | |:--------|:----------------------| | bug | 296 | | non-bug | 304 | ### Training Hyperparameters - batch_size: (16, 2) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 20 - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0007 | 1 | 0.447 | - | | 0.0333 | 50 | 0.2333 | - | | 0.0667 | 100 | 0.083 | - | | 0.1 | 150 | 0.039 | - | | 0.1333 | 200 | 0.0354 | - | | 0.1667 | 250 | 0.0177 | - | | 0.2 | 300 | 0.0053 | - | | 0.2333 | 350 | 0.0004 | - | | 0.2667 | 400 | 0.0027 | - | | 0.3 | 450 | 0.0015 | - | | 0.3333 | 500 | 0.002 | - | | 0.3667 | 550 | 0.0003 | - | | 0.4 | 600 | 0.0001 | - | | 0.4333 | 650 | 0.0001 | - | | 0.4667 | 700 | 0.0001 | - | | 0.5 | 750 | 0.0001 | - | | 0.5333 | 800 | 0.0001 | - | | 0.5667 | 850 | 0.0001 | - | | 0.6 | 900 | 0.0001 | - | | 0.6333 | 950 | 0.0001 | - | | 0.6667 | 1000 | 0.0001 | - | | 0.7 | 1050 | 0.0 | - | | 0.7333 | 1100 | 0.0 | - | | 0.7667 | 1150 | 0.0001 | - | | 0.8 | 1200 | 0.0 | - | | 0.8333 | 1250 | 0.0001 | - | | 0.8667 | 1300 | 0.0 | - | | 0.9 | 1350 | 0.0 | - | | 0.9333 | 1400 | 0.0001 | - | | 0.9667 | 1450 | 0.0 | - | | 1.0 | 1500 | 0.0 | - | ### Framework Versions - Python: 3.11.6 - SetFit: 1.1.0 - Sentence Transformers: 3.0.1 - Transformers: 4.44.2 - PyTorch: 2.4.1+cu121 - Datasets: 2.21.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit <!-- - **Sentence Transformer:** [Unknown](https://huggingface.co/unknown) --> - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 384 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:--------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | bug | <ul><li>"A global-buffer-overflow error in BufferQueueTest.cpp line 126\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|1.5 |\r\n|**_Affected Component_**| Os/Pthreads |\r\n---\r\n## Problem Description\r\n\r\nA global-buffer-overflow error in [BufferQueueTest.cpp line 126](https://github.com/nasa/fprime/blob/ada6e424b813e6434284bbde9f785656a61c8539/Os/Pthreads/test/ut/BufferQueueTest.cpp#L126)\r\n\r\n```\r\nzyh@virtual:~/fprime$ ./build-fprime-automatic-native-ut/bin/Linux/Os_pthreads\r\nCreating queue.\r\nTest empty queue...\r\nPassed.\r\nTest full queue...\r\nPushing 0.\r\nPushing 1.\r\nPushing 2.\r\nPushing 3.\r\nPushing 4.\r\nPassed.\r\nTest weird size...\r\nPassed.\r\nTest pop...\r\nPopping 5.\r\nPopping 4.\r\nPopping 3.\r\nPopping 2.\r\nPopping 1.\r\nPassed.\r\nTest priorities...\r\nPushing 'hello' at priority 9.\r\nPushing 'how are you' at priority 4.\r\nPushing 'pretty good' at priority 100.\r\nPushing 'cosmic bro' at priority 4.\r\nPushing 'kthxbye' at priority 9.\r\nPopped 'pretty good' at priority 100. Expected 'pretty good' at priority 100.\r\nPopped 'hello' at priority 9. Expected 'hello' at priority 9.\r\n=================================================================\r\n==17078==ERROR: AddressSanitizer: global-buffer-overflow on address 0x000000550bc6 at pc 0x0000004b3f6b bp 0x7ffd804c4c90 sp 0x7ffd804c4440\r\nREAD of size 8 at 0x000000550bc6 thread T0\r\n #0 0x4b3f6a in __interceptor_memcmp.part.283 (/home/zyh/fprime/build-fprime-automatic-native-ut/bin/Linux/Os_pthreads+0x4b3f6a)\r\n #1 0x51d718 in main /home/zyh/fprime/Os/Pthreads/test/ut/BufferQueueTest.cpp:126:5\r\n #2 0x7f976cdadbf6 in __libc_start_main /build/glibc-S9d2JN/glibc-2.27/csu/../csu/libc-start.c:310\r\n #3 0x41ab89 in _start (/home/zyh/fprime/build-fprime-automatic-native-ut/bin/Linux/Os_pthreads+0x41ab89)\r\n\r\n0x000000550bc6 is located 58 bytes to the left of global variable '<string literal>' defined in '/home/zyh/fprime/Os/Pthreads/test/ut/BufferQueueTest.cpp:98:43' (0x550c00) of size 12\r\n '<string literal>' is ascii string 'how are you'\r\n0x000000550bc6 is located 0 bytes to the right of global variable '<string literal>' defined in '/home/zyh/fprime/Os/Pthreads/test/ut/BufferQueueTest.cpp:98:34' (0x550bc0) of size 6\r\n '<string literal>' is ascii string 'hello'\r\nSUMMARY: AddressSanitizer: global-buffer-overflow (/home/zyh/fprime/build-fprime-automatic-native-ut/bin/Linux/Os_pthreads+0x4b3f6a) in __interceptor_memcmp.part.283\r\nShadow bytes around the buggy address:\r\n 0x0000800a2120: f9 f9 f9 f9 00 00 00 00 00 00 00 01 f9 f9 f9 f9\r\n 0x0000800a2130: 00 00 05 f9 f9 f9 f9 f9 00 01 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2140: 00 00 04 f9 f9 f9 f9 f9 00 05 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2150: 00 00 04 f9 f9 f9 f9 f9 00 05 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2160: 00 05 f9 f9 f9 f9 f9 f9 00 00 04 f9 f9 f9 f9 f9\r\n=>0x0000800a2170: 00 00 04 f9 f9 f9 f9 f9[06]f9 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2180: 00 04 f9 f9 f9 f9 f9 f9 00 04 f9 f9 f9 f9 f9 f9\r\n 0x0000800a2190: 00 03 f9 f9 f9 f9 f9 f9 00 f9 f9 f9 f9 f9 f9 f9\r\n 0x0000800a21a0: 00 00 00 06 f9 f9 f9 f9 00 00 04 f9 f9 f9 f9 f9\r\n 0x0000800a21b0: 00 00 00 00 00 00 00 03 f9 f9 f9 f9 00 04 f9 f9\r\n 0x0000800a21c0: f9 f9 f9 f9 00 00 00 00 00 00 00 00 00 00 00 00\r\nShadow byte legend (one shadow byte represents 8 application bytes):\r\n Addressable: 00\r\n Partially addressable: 01 02 03 04 05 06 07 \r\n Heap left redzone: fa\r\n Freed heap region: fd\r\n Stack left redzone: f1\r\n Stack mid redzone: f2\r\n Stack right redzone: f3\r\n Stack after return: f5\r\n Stack use after scope: f8\r\n Global redzone: f9\r\n Global init order: f6\r\n Poisoned by user: f7\r\n Container overflow: fc\r\n Array cookie: ac\r\n Intra object redzone: bb\r\n ASan internal: fe\r\n Left alloca redzone: ca\r\n Right alloca redzone: cb\r\n==17078==ABORTING\r\n```\r\n\r\n## How to Reproduce\r\n\r\n1. Compile unit test with ASAN\r\n2. Run build-fprime-automatic-native-ut/bin/Linux/Os_pthreads\r\n3. Program abort\r\n\r\n## Expected Behavior\r\n\r\nRun normally with ASAN.\r\n"</li><li>'toString doesn\'t display the last item in a struct\n| | |\r\n|:---|:---|\r\n|**F` version v3.1.1**| |\r\n|****| |\r\n---\r\n## Problem Description\r\n\r\nFiles generated by fpp tools doesn\'t produce the format string correctly and so the last member of a string doesn\'t show up as expected. For example \r\n\r\nThe take the following struct defined in fpp:\r\n```fpp\r\n enum CMD_STATUS : U16 {\r\n NA = 0,\r\n ACK = 0x5,\r\n NOT_ACK = 0x6,\r\n BUSY = 0x7,\r\n NCE = 0x8,\r\n STACK_FULL = 0x9,\r\n TEMP_NOT_ACC = 0x10\r\n };\r\n\r\n constant ESUP_HEADER_ID = 0x50555345\r\n\r\n# NOTE that in byte form it will be represented with LE\r\n struct EsupPacketHeader {\r\n HeaderId: U32\r\n ModuleId: U16\r\n DataLength: U16\r\n CmdStatus: CMD_STATUS\r\n } default {HeaderId = ESUP_HEADER_ID, CmdStatus=CMD_STATUS.NA}\r\n\r\n\r\n struct EsupStatusGet {\r\n Header: EsupPacketHeader\r\n CmdId: EsupCmdId\r\n TypeId: U16\r\n SystemState: U8 @< 1,2,3,4 (1 byte unsigned char value)\r\n StatusFlags: U8 @< 1 byte unsigned char value\r\n Reserved: U16 @< 2 byte unsigned short value\r\n CpuTemperature: F32 @< -40 to 125 °C (4 byte float value)\r\n FirmwareVersion: U32 @< firmware version (4 byte unsigned int value)\r\n } default {CmdId = EsupCmdId.ConfGet, TypeId = EsupCmdType.StatusRep}\r\n\r\n```\r\n\r\nI then go to print out the contents like so:\r\n\r\n```cpp\r\nstatic bool receiveEsupStatusResponse(int serialPort, EsupCmdId cmdId, boost::span<BYTE, GET_PADDED_SIZE(EsupStatusGet::SERIALIZED_SIZE)> txBuff) {\r\n // Wait for a reply\r\n EsupStatusGet cmdResult;\r\n\r\n size_t bytesRead = read(serialPort, txBuff.data(), txBuff.size());\r\n if (bytesRead > 0) {\r\n FW_CHECK(bytesRead <= txBuff.size(), "Error deserializing response", return false; );\r\n LE_ExternalDeSerializeBuffer responseBuff(txBuff.data(), bytesRead);\r\n cmdResult.deserialize(responseBuff);\r\n Fw::String ackString;\r\n cmdResult.toString(ackString);\r\n std::cout << std::string(ackString.toChar()) << std::endl;\r\n } else {\r\n std::cout << "No reply received." << std::endl;\r\n }\r\n\r\n return true;\r\n }\r\n```\r\n\r\nI get the following:\r\n\r\n```bash\r\n(Header = (HeaderId = 1347769157, ModuleId = 8203, DataLength = 0, CmdStatus = ), CmdId = ConfGet, TypeId = 0, SystemState = 162, StatusFlags = 187, Reserved = 53517, CpuTemperature = 0, FirmwareVersion = )\r\n```\r\n\r\nWhere I would normally expect to see something like this (the actual values don\'t matter for these purposes just that there is something to print):\r\n\r\n```bash\r\n(Header = (HeaderId = 1347769157, ModuleId = 8203, DataLength = 0, CmdStatus = ACK ), CmdId = ConfGet, TypeId = 0, SystemState = 162, StatusFlags = 187, Reserved = 53517, CpuTemperature = 0, FirmwareVersion = 102444)\r\n```\r\n\r\nI\'ve found this is consistent across pretty much all the fpp serializable objects and it really comes down to the last member of the struct doesn\'t have a format string generated along with it\r\n\r\nSo for example we see with the header type the following gets generated:\r\n\r\n```c++\r\nvoid EsupPacketHeader::toString(Fw::StringBase& text) const {\r\n\r\n static const char * formatString =\r\n "("\r\n "HeaderId = %u, "\r\n "ModuleId = %u, "\r\n "DataLength = %u, "\r\n "CmdStatus = "\r\n ")";\r\n\r\n // declare strings to hold any serializable toString() arguments\r\n\r\n\r\n Fw::String CmdStatusStr;\r\n this->m_CmdStatus.toString(CmdStatusStr);\r\n\r\n char outputString[FW_SERIALIZABLE_TO_STRING_BUFFER_SIZE];\r\n (void)snprintf(outputString,FW_SERIALIZABLE_TO_STRING_BUFFER_SIZE,formatString\r\n ,this->m_HeaderId\r\n ,this->m_ModuleId\r\n ,this->m_DataLength\r\n ,CmdStatusStr.toChar()\r\n );\r\n outputString[FW_SERIALIZABLE_TO_STRING_BUFFER_SIZE-1] = 0; // NULL terminate\r\n\r\n text = outputString;\r\n}\r\n```\r\n\r\nWith the CmdStatus missing the "```%s```".\r\n\r\nI\'m not really sure what the source of the error is since the xml seems to be fine:\r\n\r\n```xml\r\n<serializable namespace="FlightComputer" name="EsupPacketHeader">\r\n <import_enum_type>FlightComputer/TransmitterInterface/CMD_STATUSEnumAi.xml</import_enum_type>\r\n <members>\r\n <member name="HeaderId" type="U32" format="%u">\r\n <default>1347769157</default>\r\n </member>\r\n <member name="ModuleId" type="U16" format="%u">\r\n <default>0</default>\r\n </member>\r\n <member name="DataLength" type="U16" format="%u">\r\n <default>0</default>\r\n </member>\r\n <member name="CmdStatus" type="FlightComputer::CMD_STATUS" format="%s">\r\n <default>FlightComputer::CMD_STATUS::NA</default>\r\n </member>\r\n </members>\r\n</serializable>\r\n```\r\nI took a look in array_cpp.py and array_cpp.tmpl but couldn\'t make sense of the issue there.\r\n\r\nIf I can get some help on this that\'d be much appreciated.'</li><li>'Single quote in project parent folder causes installation errors\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| v3.4.3 |\r\n|**_Affected Component_**| Installation/Project Setup |\r\n---\r\n## Problem Description\r\n\r\nIf you name the parent directory of your project such that it has a single quote in it, you\'ll run into an error when you try to run \'fprime-util generate\' during basic project setup. \r\n\r\n## Context / Environment\r\n\r\n```\r\nOperating System: Linux\r\nCPU Architecture: x86_64\r\nPlatform: Linux-5.15.133.1-microsoft-standard-WSL2-x86_64-with-glibc2.35\r\nPython version: 3.10.12\r\nCMake version: 3.22.1\r\nPip version: 24.0\r\nPip packages:\r\n fprime-tools==3.4.4\r\n fprime-gds==3.4.3\r\n fprime-fpp-*==2.1.0a3\r\n```\r\n\r\n## How to Reproduce\r\n\r\n1. Create a parent directory containing a single quote (For instance, "F\'")\r\n2. Follow the basic project setup guide located here: https://fprime-community.github.io/fprime-tutorial-hello-world/docs/NewProject.html \r\n3. Once you hit \'fprime-util generate\', you should soon see an error about an unterminated quote from one of the fprime dependencies. \r\n\r\n## Expected Behavior\r\n\r\nIdeally, you\'d have a project set up properly in your virtual environment.\r\n'</li></ul> | | non-bug | <ul><li>"Install Upgraded PIP in new Project\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\n## Feature Description\r\n\r\nOlder versions of PIP may pull in non-native tools package. We should upgrade PIP during the new project setup. Note: users who don't want this can still decline venv setup entirely."</li><li>'fprime-gds: default file downlink directory can overwrite files\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|[v3.4.3](https://github.com/fprime-community/fprime-gds/releases/tag/v3.4.3) |\r\n|**_Affected Component_**| n/a |\r\n---\r\n## Feature Description\r\n\r\nHave GDS created a time-tag directory for file downlinks.\r\n\r\nMake the default be `logs/<time tag>/fprime-downlink` to match telemetry and events.\r\n\r\nAlso, make a separate argument for uplink and downlink. It makes sense to automatically separate downlinked files, but uplink may wish to be in a common store.\r\n\r\n## Rationale\r\n\r\nThe default `/tmp/username` (or even overridden with `--file-storage-directory`) directory for file downlinks can cause new instances to overwrite older files, or cause parallel instances to clobber each other.\r\n'</li><li>'Update FileDownlink to report file progress\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|v3.4.3 |\r\n|**_Affected Component_**| `Svc/FileDownlink` |\r\n---\r\n## Feature Description\r\n\r\n`Svc/FileDownlink` currently only reports completion status back to the sender only when the file is complete. This will make re-transmitting data products inefficient since the Data Catalog has no idea how far partial transmits made it before the link dropped or the system was powered down. \r\n\r\nThis feature would report partial progress back to DP Catalog can restart transmits.\r\n\r\n## Rationale\r\n\r\nMake data product downlink management more efficient.\r\n'</li></ul> | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("setfit_model_id") # Run inference preds = model("Switch Framer and Deframer to use Mallocator Pattern | | | |:---|:---| |**_F´ Version_**| | |**_Affected Component_**| | --- ## Problem Description Mallocator pattern is preferred over member-allocated buffers.") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:---------|:-----| | Word count | 4 | 124.1383 | 2486 | | Label | Training Sample Count | |:--------|:----------------------| | bug | 296 | | non-bug | 304 | ### Training Hyperparameters - batch_size: (16, 2) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 20 - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0007 | 1 | 0.447 | - | | 0.0333 | 50 | 0.2333 | - | | 0.0667 | 100 | 0.083 | - | | 0.1 | 150 | 0.039 | - | | 0.1333 | 200 | 0.0354 | - | | 0.1667 | 250 | 0.0177 | - | | 0.2 | 300 | 0.0053 | - | | 0.2333 | 350 | 0.0004 | - | | 0.2667 | 400 | 0.0027 | - | | 0.3 | 450 | 0.0015 | - | | 0.3333 | 500 | 0.002 | - | | 0.3667 | 550 | 0.0003 | - | | 0.4 | 600 | 0.0001 | - | | 0.4333 | 650 | 0.0001 | - | | 0.4667 | 700 | 0.0001 | - | | 0.5 | 750 | 0.0001 | - | | 0.5333 | 800 | 0.0001 | - | | 0.5667 | 850 | 0.0001 | - | | 0.6 | 900 | 0.0001 | - | | 0.6333 | 950 | 0.0001 | - | | 0.6667 | 1000 | 0.0001 | - | | 0.7 | 1050 | 0.0 | - | | 0.7333 | 1100 | 0.0 | - | | 0.7667 | 1150 | 0.0001 | - | | 0.8 | 1200 | 0.0 | - | | 0.8333 | 1250 | 0.0001 | - | | 0.8667 | 1300 | 0.0 | - | | 0.9 | 1350 | 0.0 | - | | 0.9333 | 1400 | 0.0001 | - | | 0.9667 | 1450 | 0.0 | - | | 1.0 | 1500 | 0.0 | - | ### Framework Versions - Python: 3.11.6 - SetFit: 1.1.0 - Sentence Transformers: 3.0.1 - Transformers: 4.44.2 - PyTorch: 2.4.1+cu121 - Datasets: 2.21.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "Throttle send frame EVR\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**|commit d3fa31c |\r\n|**_Affected Component_**| ? |\r\n---\r\n## Problem Description\r\n\r\nA description of the problem with sufficient detail to understand the issue.\r\n\r\nIf there is no ground system, the interface continuously sends this pair of EVRs:\r\n\r\n```\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x201db690 (TV_TLM): [ERROR] Failed to send framed data: 0\r\n0x202236f0 (TV_ReceiveTask): [WARNING] Failed to open port with status 61 and errno 0\r\n```\r\n\r\n## How to Reproduce\r\n\r\n1. Run Ref without the ground system\r\n2.\r\n3.\r\n\r\n## Expected Behavior\r\n\r\nIMHO the EVR should throttle, and perhaps the throttle is reset when the connection is make.\r\n"}, {"text": "Color-coding interlaced Events in the API Log\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\nOne feature that wasn't completed this summer was to color-code interlaced event logs based on severity. Presently, interlacing events are implemented by making the API a consumer of the event decoder in the GDS and then filtering events. Modifying the color of these log messages can be done [here](https://github.com/nasa/fprime/blob/717bc6fab85c53680108fc961cad6338e779816f/Gds/src/fprime_gds/common/testing_fw/api.py#L1258).\r\n"}, {"text": "Switch Framer and Deframer to use Mallocator Pattern\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\n## Problem Description\r\n\r\nMallocator pattern is preferred over member-allocated buffers."}, {"text": "Ninja support for fprime-tools\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\n## Problem Description\r\n\r\nThere are a couple places in fprime-tools where things would break if Ninja was used instead of Make. We need to fix that, as Ninja is usually much faster.\r\ne.g. [this](https://github.com/fprime-community/fprime-tools/blob/0a9fdf58ce4b428d407ab264f7266041808237c8/src/fprime/fbuild/cmake.py#L133) is Make-specific output, Ninja formats it differently\r\n\r\n## Expected Behavior\r\n\r\nSupport Ninja with fprime-tools. Add a convenient option to chose which one to use.\r\n"}, {"text": "Build A Frame Reassembler\n| | |\r\n|:---|:---|\r\n|**_F´ Version_**| |\r\n|**_Affected Component_**| |\r\n---\r\n## Feature Description\r\n\r\nBuild a component that can be used to reassemble communication frames given protocol information. This will break-off this functionality from the Deframer.\r\n\r\nBasic requirements:\r\n1. Accept incoming Fw::Buffers of any size\r\n2. Accumulate buffers in-order\r\n3. Call frame detector helper class\r\n4. On \"NO_FRAME\" discard first byte and try again\r\n5. On \"NEED DATA\" continue to accumulate data\r\n6. On \"FRAME\" allocate buffer, copy-out frame\r\n\r\nHelper class requirements:\r\n1. Must implement `Enum detect_frame(const CircularBuffer& buffer, FwSizeType& size_output)` method\r\n2. Cannot alter circular buffer (uses peeks)\r\n3. Must set `size_output` when data is needed and when frame detected\r\n "}], "inference": true}
task
[ "TEXT_CLASSIFICATION" ]
41,456
Nextcloud-AI/opus-mt-fr-es
Nextcloud-AI
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-02-23T10:43:18Z
2023-08-16T11:36:22+00:00
8
0
--- license: apache-2.0 tags: - translation --- ### opus-mt-fr-es * source languages: fr * target languages: es * OPUS readme: [fr-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/fr-es/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-09.zip](https://object.pouta.csc.fi/OPUS-MT-models/fr-es/opus-2020-01-09.zip) * test set translations: [opus-2020-01-09.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/fr-es/opus-2020-01-09.test.txt) * test set scores: [opus-2020-01-09.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/fr-es/opus-2020-01-09.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.fr.es | 34.3 | 0.601 | | news-test2008.fr.es | 32.5 | 0.583 | | newstest2009.fr.es | 31.6 | 0.586 | | newstest2010.fr.es | 36.5 | 0.616 | | newstest2011.fr.es | 38.3 | 0.622 | | newstest2012.fr.es | 38.1 | 0.619 | | newstest2013.fr.es | 34.0 | 0.587 | | Tatoeba.fr.es | 53.2 | 0.709 |
null
Non_BioNLP
### opus-mt-fr-es * source languages: fr * target languages: es * OPUS readme: [fr-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/fr-es/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-09.zip](https://object.pouta.csc.fi/OPUS-MT-models/fr-es/opus-2020-01-09.zip) * test set translations: [opus-2020-01-09.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/fr-es/opus-2020-01-09.test.txt) * test set scores: [opus-2020-01-09.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/fr-es/opus-2020-01-09.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | newssyscomb2009.fr.es | 34.3 | 0.601 | | news-test2008.fr.es | 32.5 | 0.583 | | newstest2009.fr.es | 31.6 | 0.586 | | newstest2010.fr.es | 36.5 | 0.616 | | newstest2011.fr.es | 38.3 | 0.622 | | newstest2012.fr.es | 38.1 | 0.619 | | newstest2013.fr.es | 34.0 | 0.587 | | Tatoeba.fr.es | 53.2 | 0.709 |
{"license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
41,457
lxyuan/span-marker-bert-base-multilingual-cased-multinerd
lxyuan
token-classification
[ "span-marker", "pytorch", "safetensors", "generated_from_trainer", "ner", "named-entity-recognition", "token-classification", "de", "en", "es", "fr", "it", "nl", "pl", "pt", "ru", "zh", "dataset:Babelscape/multinerd", "base_model:google-bert/bert-base-multilingual-cased", "base_model:finetune:google-bert/bert-base-multilingual-cased", "license:cc-by-nc-sa-4.0", "model-index", "region:us" ]
2023-08-09T09:56:24Z
2025-03-01T07:22:30+00:00
100
12
--- base_model: bert-base-multilingual-cased datasets: - Babelscape/multinerd language: - de - en - es - fr - it - nl - pl - pt - ru - zh license: cc-by-nc-sa-4.0 metrics: - precision - recall - f1 pipeline_tag: token-classification tags: - generated_from_trainer - ner - named-entity-recognition - span-marker widget: - text: Amelia Earthart flog mit ihrer einmotorigen Lockheed Vega 5B über den Atlantik nach Paris. example_title: German - text: Amelia Earhart flew her single engine Lockheed Vega 5B across the Atlantic to Paris. example_title: English - text: Amelia Earthart voló su Lockheed Vega 5B monomotor a través del Océano Atlántico hasta París. example_title: Spanish - text: Amelia Earthart a fait voler son monomoteur Lockheed Vega 5B à travers l'ocean Atlantique jusqu'à Paris. example_title: French - text: Amelia Earhart ha volato con il suo monomotore Lockheed Vega 5B attraverso l'Atlantico fino a Parigi. example_title: Italian - text: Amelia Earthart vloog met haar één-motorige Lockheed Vega 5B over de Atlantische Oceaan naar Parijs. example_title: Dutch - text: Amelia Earthart przeleciała swoim jednosilnikowym samolotem Lockheed Vega 5B przez Ocean Atlantycki do Paryża. example_title: Polish - text: Amelia Earhart voou em seu monomotor Lockheed Vega 5B através do Atlântico para Paris. example_title: Portuguese - text: Амелия Эртхарт перелетела на своем одномоторном самолете Lockheed Vega 5B через Атлантический океан в Париж. example_title: Russian - text: Amelia Earthart flaug eins hreyfils Lockheed Vega 5B yfir Atlantshafið til Parísar. example_title: Icelandic - text: Η Amelia Earthart πέταξε το μονοκινητήριο Lockheed Vega 5B της πέρα ​​από τον Ατλαντικό Ωκεανό στο Παρίσι. example_title: Greek - text: Amelia Earhartová přeletěla se svým jednomotorovým Lockheed Vega 5B přes Atlantik do Paříže. example_title: Czech - text: Amelia Earhart lensi yksimoottorisella Lockheed Vega 5B:llä Atlantin yli Pariisiin. example_title: Finnish - text: Amelia Earhart fløj med sin enmotoriske Lockheed Vega 5B over Atlanten til Paris. example_title: Danish - text: Amelia Earhart flög sin enmotoriga Lockheed Vega 5B över Atlanten till Paris. example_title: Swedish - text: Amelia Earhart fløy sin enmotoriske Lockheed Vega 5B over Atlanterhavet til Paris. example_title: Norwegian - text: Amelia Earhart și-a zburat cu un singur motor Lockheed Vega 5B peste Atlantic până la Paris. example_title: Romanian - text: Amelia Earhart menerbangkan mesin tunggal Lockheed Vega 5B melintasi Atlantik ke Paris. example_title: Indonesian - text: Амелія Эрхарт пераляцела на сваім аднаматорным Lockheed Vega 5B праз Атлантыку ў Парыж. example_title: Belarusian - text: Амелія Ергарт перелетіла на своєму одномоторному літаку Lockheed Vega 5B через Атлантику до Парижа. example_title: Ukrainian - text: Amelia Earhart preletjela je svojim jednomotornim zrakoplovom Lockheed Vega 5B preko Atlantika do Pariza. example_title: Croatian - text: Amelia Earhart lendas oma ühemootoriga Lockheed Vega 5B üle Atlandi ookeani Pariisi . example_title: Estonian model-index: - name: span-marker-bert-base-multilingual-cased-multinerd results: - task: type: token-classification name: Named Entity Recognition dataset: name: MultiNERD type: Babelscape/multinerd split: test revision: 2814b78e7af4b5a1f1886fe7ad49632de4d9dd25 metrics: - type: f1 value: 0.927 name: F1 - type: precision value: 0.9281 name: Precision - type: recall value: 0.9259 name: Recall --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # span-marker-bert-base-multilingual-cased-multinerd This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an [Babelscape/multinerd](https://huggingface.co/datasets/Babelscape/multinerd) dataset. Is your data not (always) capitalized correctly? Then consider using the uncased variant of this model instead for better performance: [lxyuan/span-marker-bert-base-multilingual-uncased-multinerd](https://huggingface.co/lxyuan/span-marker-bert-base-multilingual-uncased-multinerd). This model achieves the following results on the evaluation set: - Loss: 0.0049 - Overall Precision: 0.9242 - Overall Recall: 0.9281 - Overall F1: 0.9261 - Overall Accuracy: 0.9852 Test set results: - test_loss: 0.005226554349064827, - test_overall_accuracy: 0.9851129807294873, - test_overall_f1: 0.9270450073152169, - test_overall_precision: 0.9281906912835416, - test_overall_recall: 0.9259021481405626, - test_runtime: 2690.9722, - test_samples_per_second: 150.748, - test_steps_per_second: 4.711 This is a replication of Tom's work. Everything remains unchanged, except that we extended the number of training epochs to 3 for a slightly longer training duration and set the gradient_accumulation_steps to 2. Please refer to the official [model page](https://huggingface.co/tomaarsen/span-marker-mbert-base-multinerd) to review their results and training script ## Results: | **Language** | **Precision** | **Recall** | **F1** | |--------------|---------------|------------|------------| | **all** | 92.42 | 92.81 | **92.61** | | **de** | 95.03 | 95.07 | **95.05** | | **en** | 95.00 | 95.40 | **95.20** | | **es** | 92.05 | 91.37 | **91.71** | | **fr** | 92.37 | 91.41 | **91.89** | | **it** | 91.45 | 93.15 | **92.29** | | **nl** | 93.85 | 92.98 | **93.41** | | **pl** | 93.13 | 92.66 | **92.89** | | **pt** | 93.60 | 92.50 | **93.05** | | **ru** | 93.25 | 93.32 | **93.29** | | **zh** | 89.47 | 88.40 | **88.93** | - Special thanks to Tom for creating the evaluation script and generating the [results](https://huggingface.co/lxyuan/span-marker-bert-base-multilingual-cased-multinerd/discussions/1). ## Label set | Class | Description | Examples | |-------|-------------|----------| | **PER (person)** | People | Ray Charles, Jessica Alba, Leonardo DiCaprio, Roger Federer, Anna Massey. | | **ORG (organization)** | Associations, companies, agencies, institutions, nationalities and religious or political groups | University of Edinburgh, San Francisco Giants, Google, Democratic Party. | | **LOC (location)** | Physical locations (e.g. mountains, bodies of water), geopolitical entities (e.g. cities, states), and facilities (e.g. bridges, buildings, airports). | Rome, Lake Paiku, Chrysler Building, Mount Rushmore, Mississippi River. | | **ANIM (animal)** | Breeds of dogs, cats and other animals, including their scientific names. | Maine Coon, African Wild Dog, Great White Shark, New Zealand Bellbird. | | **BIO (biological)** | Genus of fungus, bacteria and protoctists, families of viruses, and other biological entities. | Herpes Simplex Virus, Escherichia Coli, Salmonella, Bacillus Anthracis. | | **CEL (celestial)** | Planets, stars, asteroids, comets, nebulae, galaxies and other astronomical objects. | Sun, Neptune, Asteroid 187 Lamberta, Proxima Centauri, V838 Monocerotis. | | **DIS (disease)** | Physical, mental, infectious, non-infectious, deficiency, inherited, degenerative, social and self-inflicted diseases. | Alzheimer’s Disease, Cystic Fibrosis, Dilated Cardiomyopathy, Arthritis. | | **EVE (event)** | Sport events, battles, wars and other events. | American Civil War, 2003 Wimbledon Championships, Cannes Film Festival. | | **FOOD (food)** | Foods and drinks. | Carbonara, Sangiovese, Cheddar Beer Fondue, Pizza Margherita. | | **INST (instrument)** | Technological instruments, mechanical instruments, musical instruments, and other tools. | Spitzer Space Telescope, Commodore 64, Skype, Apple Watch, Fender Stratocaster. | | **MEDIA (media)** | Titles of films, books, magazines, songs and albums, fictional characters and languages. | Forbes, American Psycho, Kiss Me Once, Twin Peaks, Disney Adventures. | | **PLANT (plant)** | Types of trees, flowers, and other plants, including their scientific names. | Salix, Quercus Petraea, Douglas Fir, Forsythia, Artemisia Maritima. | | **MYTH (mythological)** | Mythological and religious entities. | Apollo, Persephone, Aphrodite, Saint Peter, Pope Gregory I, Hercules. | | **TIME (time)** | Specific and well-defined time intervals, such as eras, historical periods, centuries, years and important days. No months and days of the week. | Renaissance, Middle Ages, Christmas, Great Depression, 17th Century, 2012. | | **VEHI (vehicle)** | Cars, motorcycles and other vehicles. | Ferrari Testarossa, Suzuki Jimny, Honda CR-X, Boeing 747, Fairey Fulmar. | ## Inference Example ```python # install span_marker (env)$ pip install span_marker from span_marker import SpanMarkerModel model = SpanMarkerModel.from_pretrained("lxyuan/span-marker-bert-base-multilingual-cased-multinerd") description = "Singapore is renowned for its hawker centers offering dishes \ like Hainanese chicken rice and laksa, while Malaysia boasts dishes such as \ nasi lemak and rendang, reflecting its rich culinary heritage." entities = model.predict(description) entities >>> [ {'span': 'Singapore', 'label': 'LOC', 'score': 0.999988317489624, 'char_start_index': 0, 'char_end_index': 9}, {'span': 'Hainanese chicken rice', 'label': 'FOOD', 'score': 0.9894770383834839, 'char_start_index': 66, 'char_end_index': 88}, {'span': 'laksa', 'label': 'FOOD', 'score': 0.9224908947944641, 'char_start_index': 93, 'char_end_index': 98}, {'span': 'Malaysia', 'label': 'LOC', 'score': 0.9999839067459106, 'char_start_index': 106, 'char_end_index': 114}] # missed: nasi lemak as FOOD # missed: rendang as FOOD # :( ``` #### Quick test on Chinese ```python from span_marker import SpanMarkerModel model = SpanMarkerModel.from_pretrained("lxyuan/span-marker-bert-base-multilingual-cased-multinerd") # translate to chinese description = "Singapore is renowned for its hawker centers offering dishes \ like Hainanese chicken rice and laksa, while Malaysia boasts dishes such as \ nasi lemak and rendang, reflecting its rich culinary heritage." zh_description = "新加坡因其小贩中心提供海南鸡饭和叻沙等菜肴而闻名, 而马来西亚则拥有椰浆饭和仁当等菜肴,反映了其丰富的烹饪传统." entities = model.predict(zh_description) entities >>> [ {'span': '新加坡', 'label': 'LOC', 'score': 0.9282007813453674, 'char_start_index': 0, 'char_end_index': 3}, {'span': '马来西亚', 'label': 'LOC', 'score': 0.7439665794372559, 'char_start_index': 27, 'char_end_index': 31}] # It only managed to capture two countries: Singapore and Malaysia. # All other entities were missed out. ``` ## Training procedure One can reproduce the result running this [script](https://huggingface.co/tomaarsen/span-marker-mbert-base-multinerd/blob/main/train.py) ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:------:|:---------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 0.0129 | 1.0 | 50436 | 0.0042 | 0.9226 | 0.9169 | 0.9197 | 0.9837 | | 0.0027 | 2.0 | 100873 | 0.0043 | 0.9255 | 0.9206 | 0.9230 | 0.9846 | | 0.0015 | 3.0 | 151308 | 0.0049 | 0.9242 | 0.9281 | 0.9261 | 0.9852 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu117 - Datasets 2.14.3 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # span-marker-bert-base-multilingual-cased-multinerd This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an [Babelscape/multinerd](https://huggingface.co/datasets/Babelscape/multinerd) dataset. Is your data not (always) capitalized correctly? Then consider using the uncased variant of this model instead for better performance: [lxyuan/span-marker-bert-base-multilingual-uncased-multinerd](https://huggingface.co/lxyuan/span-marker-bert-base-multilingual-uncased-multinerd). This model achieves the following results on the evaluation set: - Loss: 0.0049 - Overall Precision: 0.9242 - Overall Recall: 0.9281 - Overall F1: 0.9261 - Overall Accuracy: 0.9852 Test set results: - test_loss: 0.005226554349064827, - test_overall_accuracy: 0.9851129807294873, - test_overall_f1: 0.9270450073152169, - test_overall_precision: 0.9281906912835416, - test_overall_recall: 0.9259021481405626, - test_runtime: 2690.9722, - test_samples_per_second: 150.748, - test_steps_per_second: 4.711 This is a replication of Tom's work. Everything remains unchanged, except that we extended the number of training epochs to 3 for a slightly longer training duration and set the gradient_accumulation_steps to 2. Please refer to the official [model page](https://huggingface.co/tomaarsen/span-marker-mbert-base-multinerd) to review their results and training script ## Results: | **Language** | **Precision** | **Recall** | **F1** | |--------------|---------------|------------|------------| | **all** | 92.42 | 92.81 | **92.61** | | **de** | 95.03 | 95.07 | **95.05** | | **en** | 95.00 | 95.40 | **95.20** | | **es** | 92.05 | 91.37 | **91.71** | | **fr** | 92.37 | 91.41 | **91.89** | | **it** | 91.45 | 93.15 | **92.29** | | **nl** | 93.85 | 92.98 | **93.41** | | **pl** | 93.13 | 92.66 | **92.89** | | **pt** | 93.60 | 92.50 | **93.05** | | **ru** | 93.25 | 93.32 | **93.29** | | **zh** | 89.47 | 88.40 | **88.93** | - Special thanks to Tom for creating the evaluation script and generating the [results](https://huggingface.co/lxyuan/span-marker-bert-base-multilingual-cased-multinerd/discussions/1). ## Label set | Class | Description | Examples | |-------|-------------|----------| | **PER (person)** | People | Ray Charles, Jessica Alba, Leonardo DiCaprio, Roger Federer, Anna Massey. | | **ORG (organization)** | Associations, companies, agencies, institutions, nationalities and religious or political groups | University of Edinburgh, San Francisco Giants, Google, Democratic Party. | | **LOC (location)** | Physical locations (e.g. mountains, bodies of water), geopolitical entities (e.g. cities, states), and facilities (e.g. bridges, buildings, airports). | Rome, Lake Paiku, Chrysler Building, Mount Rushmore, Mississippi River. | | **ANIM (animal)** | Breeds of dogs, cats and other animals, including their scientific names. | Maine Coon, African Wild Dog, Great White Shark, New Zealand Bellbird. | | **BIO (biological)** | Genus of fungus, bacteria and protoctists, families of viruses, and other biological entities. | Herpes Simplex Virus, Escherichia Coli, Salmonella, Bacillus Anthracis. | | **CEL (celestial)** | Planets, stars, asteroids, comets, nebulae, galaxies and other astronomical objects. | Sun, Neptune, Asteroid 187 Lamberta, Proxima Centauri, V838 Monocerotis. | | **DIS (disease)** | Physical, mental, infectious, non-infectious, deficiency, inherited, degenerative, social and self-inflicted diseases. | Alzheimer’s Disease, Cystic Fibrosis, Dilated Cardiomyopathy, Arthritis. | | **EVE (event)** | Sport events, battles, wars and other events. | American Civil War, 2003 Wimbledon Championships, Cannes Film Festival. | | **FOOD (food)** | Foods and drinks. | Carbonara, Sangiovese, Cheddar Beer Fondue, Pizza Margherita. | | **INST (instrument)** | Technological instruments, mechanical instruments, musical instruments, and other tools. | Spitzer Space Telescope, Commodore 64, Skype, Apple Watch, Fender Stratocaster. | | **MEDIA (media)** | Titles of films, books, magazines, songs and albums, fictional characters and languages. | Forbes, American Psycho, Kiss Me Once, Twin Peaks, Disney Adventures. | | **PLANT (plant)** | Types of trees, flowers, and other plants, including their scientific names. | Salix, Quercus Petraea, Douglas Fir, Forsythia, Artemisia Maritima. | | **MYTH (mythological)** | Mythological and religious entities. | Apollo, Persephone, Aphrodite, Saint Peter, Pope Gregory I, Hercules. | | **TIME (time)** | Specific and well-defined time intervals, such as eras, historical periods, centuries, years and important days. No months and days of the week. | Renaissance, Middle Ages, Christmas, Great Depression, 17th Century, 2012. | | **VEHI (vehicle)** | Cars, motorcycles and other vehicles. | Ferrari Testarossa, Suzuki Jimny, Honda CR-X, Boeing 747, Fairey Fulmar. | ## Inference Example ```python # install span_marker (env)$ pip install span_marker from span_marker import SpanMarkerModel model = SpanMarkerModel.from_pretrained("lxyuan/span-marker-bert-base-multilingual-cased-multinerd") description = "Singapore is renowned for its hawker centers offering dishes \ like Hainanese chicken rice and laksa, while Malaysia boasts dishes such as \ nasi lemak and rendang, reflecting its rich culinary heritage." entities = model.predict(description) entities >>> [ {'span': 'Singapore', 'label': 'LOC', 'score': 0.999988317489624, 'char_start_index': 0, 'char_end_index': 9}, {'span': 'Hainanese chicken rice', 'label': 'FOOD', 'score': 0.9894770383834839, 'char_start_index': 66, 'char_end_index': 88}, {'span': 'laksa', 'label': 'FOOD', 'score': 0.9224908947944641, 'char_start_index': 93, 'char_end_index': 98}, {'span': 'Malaysia', 'label': 'LOC', 'score': 0.9999839067459106, 'char_start_index': 106, 'char_end_index': 114}] # missed: nasi lemak as FOOD # missed: rendang as FOOD # :( ``` #### Quick test on Chinese ```python from span_marker import SpanMarkerModel model = SpanMarkerModel.from_pretrained("lxyuan/span-marker-bert-base-multilingual-cased-multinerd") # translate to chinese description = "Singapore is renowned for its hawker centers offering dishes \ like Hainanese chicken rice and laksa, while Malaysia boasts dishes such as \ nasi lemak and rendang, reflecting its rich culinary heritage." zh_description = "新加坡因其小贩中心提供海南鸡饭和叻沙等菜肴而闻名, 而马来西亚则拥有椰浆饭和仁当等菜肴,反映了其丰富的烹饪传统." entities = model.predict(zh_description) entities >>> [ {'span': '新加坡', 'label': 'LOC', 'score': 0.9282007813453674, 'char_start_index': 0, 'char_end_index': 3}, {'span': '马来西亚', 'label': 'LOC', 'score': 0.7439665794372559, 'char_start_index': 27, 'char_end_index': 31}] # It only managed to capture two countries: Singapore and Malaysia. # All other entities were missed out. ``` ## Training procedure One can reproduce the result running this [script](https://huggingface.co/tomaarsen/span-marker-mbert-base-multinerd/blob/main/train.py) ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:------:|:---------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 0.0129 | 1.0 | 50436 | 0.0042 | 0.9226 | 0.9169 | 0.9197 | 0.9837 | | 0.0027 | 2.0 | 100873 | 0.0043 | 0.9255 | 0.9206 | 0.9230 | 0.9846 | | 0.0015 | 3.0 | 151308 | 0.0049 | 0.9242 | 0.9281 | 0.9261 | 0.9852 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu117 - Datasets 2.14.3 - Tokenizers 0.13.3
{"base_model": "bert-base-multilingual-cased", "datasets": ["Babelscape/multinerd"], "language": ["de", "en", "es", "fr", "it", "nl", "pl", "pt", "ru", "zh"], "license": "cc-by-nc-sa-4.0", "metrics": ["precision", "recall", "f1"], "pipeline_tag": "token-classification", "tags": ["generated_from_trainer", "ner", "named-entity-recognition", "span-marker"], "widget": [{"text": "Amelia Earthart flog mit ihrer einmotorigen Lockheed Vega 5B über den Atlantik nach Paris.", "example_title": "German"}, {"text": "Amelia Earhart flew her single engine Lockheed Vega 5B across the Atlantic to Paris.", "example_title": "English"}, {"text": "Amelia Earthart voló su Lockheed Vega 5B monomotor a través del Océano Atlántico hasta París.", "example_title": "Spanish"}, {"text": "Amelia Earthart a fait voler son monomoteur Lockheed Vega 5B à travers l'ocean Atlantique jusqu'à Paris.", "example_title": "French"}, {"text": "Amelia Earhart ha volato con il suo monomotore Lockheed Vega 5B attraverso l'Atlantico fino a Parigi.", "example_title": "Italian"}, {"text": "Amelia Earthart vloog met haar één-motorige Lockheed Vega 5B over de Atlantische Oceaan naar Parijs.", "example_title": "Dutch"}, {"text": "Amelia Earthart przeleciała swoim jednosilnikowym samolotem Lockheed Vega 5B przez Ocean Atlantycki do Paryża.", "example_title": "Polish"}, {"text": "Amelia Earhart voou em seu monomotor Lockheed Vega 5B através do Atlântico para Paris.", "example_title": "Portuguese"}, {"text": "Амелия Эртхарт перелетела на своем одномоторном самолете Lockheed Vega 5B через Атлантический океан в Париж.", "example_title": "Russian"}, {"text": "Amelia Earthart flaug eins hreyfils Lockheed Vega 5B yfir Atlantshafið til Parísar.", "example_title": "Icelandic"}, {"text": "Η Amelia Earthart πέταξε το μονοκινητήριο Lockheed Vega 5B της πέρα ​​από τον Ατλαντικό Ωκεανό στο Παρίσι.", "example_title": "Greek"}, {"text": "Amelia Earhartová přeletěla se svým jednomotorovým Lockheed Vega 5B přes Atlantik do Paříže.", "example_title": "Czech"}, {"text": "Amelia Earhart lensi yksimoottorisella Lockheed Vega 5B:llä Atlantin yli Pariisiin.", "example_title": "Finnish"}, {"text": "Amelia Earhart fløj med sin enmotoriske Lockheed Vega 5B over Atlanten til Paris.", "example_title": "Danish"}, {"text": "Amelia Earhart flög sin enmotoriga Lockheed Vega 5B över Atlanten till Paris.", "example_title": "Swedish"}, {"text": "Amelia Earhart fløy sin enmotoriske Lockheed Vega 5B over Atlanterhavet til Paris.", "example_title": "Norwegian"}, {"text": "Amelia Earhart și-a zburat cu un singur motor Lockheed Vega 5B peste Atlantic până la Paris.", "example_title": "Romanian"}, {"text": "Amelia Earhart menerbangkan mesin tunggal Lockheed Vega 5B melintasi Atlantik ke Paris.", "example_title": "Indonesian"}, {"text": "Амелія Эрхарт пераляцела на сваім аднаматорным Lockheed Vega 5B праз Атлантыку ў Парыж.", "example_title": "Belarusian"}, {"text": "Амелія Ергарт перелетіла на своєму одномоторному літаку Lockheed Vega 5B через Атлантику до Парижа.", "example_title": "Ukrainian"}, {"text": "Amelia Earhart preletjela je svojim jednomotornim zrakoplovom Lockheed Vega 5B preko Atlantika do Pariza.", "example_title": "Croatian"}, {"text": "Amelia Earhart lendas oma ühemootoriga Lockheed Vega 5B üle Atlandi ookeani Pariisi .", "example_title": "Estonian"}], "model-index": [{"name": "span-marker-bert-base-multilingual-cased-multinerd", "results": [{"task": {"type": "token-classification", "name": "Named Entity Recognition"}, "dataset": {"name": "MultiNERD", "type": "Babelscape/multinerd", "split": "test", "revision": "2814b78e7af4b5a1f1886fe7ad49632de4d9dd25"}, "metrics": [{"type": "f1", "value": 0.927, "name": "F1"}, {"type": "precision", "value": 0.9281, "name": "Precision"}, {"type": "recall", "value": 0.9259, "name": "Recall"}]}]}]}
task
[ "NAMED_ENTITY_RECOGNITION" ]
41,458
DrGwin/setfit-paraphrase-mpnet-base-v2-sst2
DrGwin
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:sentence-transformers/paraphrase-mpnet-base-v2", "base_model:finetune:sentence-transformers/paraphrase-mpnet-base-v2", "model-index", "region:us" ]
2025-03-02T22:50:24Z
2025-03-02T22:50:41+00:00
18
0
--- base_model: sentence-transformers/paraphrase-mpnet-base-v2 library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: 'green might want to hang onto that ski mask , as robbery may be the only way to pay for his next project . ' - text: 'even horror fans will most likely not find what they ''re seeking with trouble every day ; the movie lacks both thrills and humor . ' - text: 'the acting , costumes , music , cinematography and sound are all astounding given the production ''s austere locales . ' - text: 'byler reveals his characters in a way that intrigues and even fascinates us , and he never reduces the situation to simple melodrama . ' - text: 'a sequence of ridiculous shoot - ''em - up scenes . ' inference: true model-index: - name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2 results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.8484455958549223 name: Accuracy --- # SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:---------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | positive | <ul><li>'a powerful and reasonably fulfilling gestalt '</li><li>'while the importance of being earnest offers opportunities for occasional smiles and chuckles '</li><li>'the proud warrior that still lingers in the souls of these characters '</li></ul> | | negative | <ul><li>'hate yourself '</li><li>'eight crazy nights is a total misfire . '</li><li>'guilty about it '</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.8484 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("DrGwin/setfit-paraphrase-mpnet-base-v2-sst2") # Run inference preds = model("a sequence of ridiculous shoot - 'em - up scenes . ") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 3 | 7.875 | 18 | | Label | Training Sample Count | |:---------|:----------------------| | negative | 8 | | positive | 8 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (4, 4) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: True ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.1111 | 1 | 0.2847 | - | | 1.0 | 9 | - | 0.2303 | | 2.0 | 18 | - | 0.1917 | | 3.0 | 27 | - | 0.1718 | | 4.0 | 36 | - | 0.1715 | ### Framework Versions - Python: 3.11.11 - SetFit: 1.1.1 - Sentence Transformers: 3.4.1 - Transformers: 4.48.3 - PyTorch: 2.5.1+cu124 - Datasets: 3.3.2 - Tokenizers: 0.21.0 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:---------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | positive | <ul><li>'a powerful and reasonably fulfilling gestalt '</li><li>'while the importance of being earnest offers opportunities for occasional smiles and chuckles '</li><li>'the proud warrior that still lingers in the souls of these characters '</li></ul> | | negative | <ul><li>'hate yourself '</li><li>'eight crazy nights is a total misfire . '</li><li>'guilty about it '</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.8484 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("DrGwin/setfit-paraphrase-mpnet-base-v2-sst2") # Run inference preds = model("a sequence of ridiculous shoot - 'em - up scenes . ") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 3 | 7.875 | 18 | | Label | Training Sample Count | |:---------|:----------------------| | negative | 8 | | positive | 8 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (4, 4) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: True ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.1111 | 1 | 0.2847 | - | | 1.0 | 9 | - | 0.2303 | | 2.0 | 18 | - | 0.1917 | | 3.0 | 27 | - | 0.1718 | | 4.0 | 36 | - | 0.1715 | ### Framework Versions - Python: 3.11.11 - SetFit: 1.1.1 - Sentence Transformers: 3.4.1 - Transformers: 4.48.3 - PyTorch: 2.5.1+cu124 - Datasets: 3.3.2 - Tokenizers: 0.21.0 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/paraphrase-mpnet-base-v2", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "green might want to hang onto that ski mask , as robbery may be the only way to pay for his next project . "}, {"text": "even horror fans will most likely not find what they 're seeking with trouble every day ; the movie lacks both thrills and humor . "}, {"text": "the acting , costumes , music , cinematography and sound are all astounding given the production 's austere locales . "}, {"text": "byler reveals his characters in a way that intrigues and even fascinates us , and he never reduces the situation to simple melodrama . "}, {"text": "a sequence of ridiculous shoot - 'em - up scenes . "}], "inference": true, "model-index": [{"name": "SetFit with sentence-transformers/paraphrase-mpnet-base-v2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.8484455958549223, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,459
microsoft/git-base-vatex
microsoft
image-text-to-text
[ "transformers", "pytorch", "safetensors", "git", "image-text-to-text", "vision", "en", "arxiv:2205.14100", "license:mit", "region:us" ]
2023-01-02T10:44:05Z
2023-09-15T19:52:10+00:00
377
5
--- language: en license: mit model_name: microsoft/git-base-vatex tags: - vision inference: false --- # GIT (GenerativeImage2Text), base-sized, fine-tuned on VATEX GIT (short for GenerativeImage2Text) model, base-sized version, fine-tuned on VATEX. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for video captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/git.html). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-base", which is a smaller variant of GIT trained on 10 million image-text pairs. Next, the model was fine-tuned on VATEX. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
null
Non_BioNLP
# GIT (GenerativeImage2Text), base-sized, fine-tuned on VATEX GIT (short for GenerativeImage2Text) model, base-sized version, fine-tuned on VATEX. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for video captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/git.html). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-base", which is a smaller variant of GIT trained on 10 million image-text pairs. Next, the model was fine-tuned on VATEX. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
{"language": "en", "license": "mit", "model_name": "microsoft/git-base-vatex", "tags": ["vision"], "inference": false}
task
[ "QUESTION_ANSWERING" ]
41,460
UKASHAKAYS/KAYSAI
UKASHAKAYS
question-answering
[ "asteroid", "text-generation-inference", "Text classification", "Question answering", "Translation", "Summarization", "Image classification", "Object classification", "Image generation", "Learning", "medical", "biology", "chemistry", "legal", "music", "code", "climate", "art", "finance", "moe", "merge", "question-answering", "aa", "ae", "ak", "af", "ab", "am", "an", "ar", "as", "av", "ay", "az", "ba", "bg", "be", "bh", "co", "bn", "bo", "br", "bs", "ca", "ce", "ch", "cr", "cs", "cu", "cv", "cy", "en", "bm", "de", "dv", "da", "dz", "el", "ee", "es", "et", "eu", "fa", "ff", "bi", "eo", "fj", "fo", "fi", "fr", "fy", "ga", "gd", "gl", "gn", "gu", "gv", "ha", "hi", "ie", "hr", "he", "ho", "hu", "hy", "ht", "hz", "id", "ig", "ii", "ik", "ia", "kj", "is", "it", "iu", "ja", "jv", "ko", "ki", "kg", "dataset:fka/awesome-chatgpt-prompts", "dataset:HuggingFaceFW/fineweb-2", "dataset:HuggingFaceTB/smoltalk", "dataset:O1-OPEN/OpenO1-SFT", "dataset:PleIAs/common_corpus", "dataset:AI-MO/NuminaMath-CoT", "dataset:ChuGyouk/AI-MO-NuminaMath-CoT-Ko", "dataset:alpindale/two-million-bluesky-posts", "dataset:CohereForAI/Global-MMLU", "base_model:Datou1111/shou_xin", "base_model:merge:Datou1111/shou_xin", "base_model:Djrango/Qwen2vl-Flux", "base_model:merge:Djrango/Qwen2vl-Flux", "base_model:Lightricks/LTX-Video", "base_model:merge:Lightricks/LTX-Video", "base_model:Qwen/QwQ-32B-Preview", "base_model:merge:Qwen/QwQ-32B-Preview", "base_model:Qwen/Qwen2.5-Coder-32B-Instruct", "base_model:merge:Qwen/Qwen2.5-Coder-32B-Instruct", "base_model:black-forest-labs/FLUX.1-dev", "base_model:merge:black-forest-labs/FLUX.1-dev", "base_model:meta-llama/Llama-3.3-70B-Instruct", "base_model:merge:meta-llama/Llama-3.3-70B-Instruct", "base_model:mlx-community/Qwen_QwQ-32B-Preview_MLX-8bit", "base_model:merge:mlx-community/Qwen_QwQ-32B-Preview_MLX-8bit", "base_model:stabilityai/stable-diffusion-3.5-large", "base_model:merge:stabilityai/stable-diffusion-3.5-large", "base_model:tencent/HunyuanVideo", "base_model:merge:tencent/HunyuanVideo", "license:creativeml-openrail-m", "region:us" ]
2024-12-17T09:48:27Z
2024-12-17T10:24:19+00:00
0
1
--- base_model: - Qwen/QwQ-32B-Preview - meta-llama/Llama-3.3-70B-Instruct - tencent/HunyuanVideo - mlx-community/Qwen_QwQ-32B-Preview_MLX-8bit - black-forest-labs/FLUX.1-dev - Lightricks/LTX-Video - Djrango/Qwen2vl-Flux - stabilityai/stable-diffusion-3.5-large - Datou1111/shou_xin - Qwen/Qwen2.5-Coder-32B-Instruct datasets: - fka/awesome-chatgpt-prompts - HuggingFaceFW/fineweb-2 - HuggingFaceTB/smoltalk - O1-OPEN/OpenO1-SFT - PleIAs/common_corpus - AI-MO/NuminaMath-CoT - ChuGyouk/AI-MO-NuminaMath-CoT-Ko - alpindale/two-million-bluesky-posts - CohereForAI/Global-MMLU language: - aa - ae - ak - af - ab - am - an - ar - as - av - ay - az - ba - bg - be - bh - co - bn - bo - br - bs - ca - ce - ch - cr - cs - cu - cv - cy - en - bm - de - dv - da - dz - el - ee - es - et - eu - fa - ff - bi - eo - fj - fo - fi - fr - fy - ga - gd - gl - gn - gu - gv - ha - hi - ie - hr - he - ho - hu - hy - ht - hz - id - ig - ii - ik - ia - kj - is - it - iu - ja - jv - ko - ki - kg library_name: asteroid license: creativeml-openrail-m metrics: - accuracy - bertscore - bleu - bleurt - code_eval - chrf - charcut_mt - character - cer - brier_score pipeline_tag: question-answering tags: - text-generation-inference - Text classification - Question answering - Translation - Summarization - Image classification - Object classification - Image generation - Learning - medical - biology - chemistry - legal - music - code - climate - art - finance - moe - merge new_version: meta-llama/Llama-3.3-70B-Instruct ---
null
Non_BioNLP
{"base_model": ["Qwen/QwQ-32B-Preview", "meta-llama/Llama-3.3-70B-Instruct", "tencent/HunyuanVideo", "mlx-community/Qwen_QwQ-32B-Preview_MLX-8bit", "black-forest-labs/FLUX.1-dev", "Lightricks/LTX-Video", "Djrango/Qwen2vl-Flux", "stabilityai/stable-diffusion-3.5-large", "Datou1111/shou_xin", "Qwen/Qwen2.5-Coder-32B-Instruct"], "datasets": ["fka/awesome-chatgpt-prompts", "HuggingFaceFW/fineweb-2", "HuggingFaceTB/smoltalk", "O1-OPEN/OpenO1-SFT", "PleIAs/common_corpus", "AI-MO/NuminaMath-CoT", "ChuGyouk/AI-MO-NuminaMath-CoT-Ko", "alpindale/two-million-bluesky-posts", "CohereForAI/Global-MMLU"], "language": ["aa", "ae", "ak", "af", "ab", "am", "an", "ar", "as", "av", "ay", "az", "ba", "bg", "be", "bh", "co", "bn", "bo", "br", "bs", "ca", "ce", "ch", "cr", "cs", "cu", "cv", "cy", "en", "bm", "de", "dv", "da", "dz", "el", "ee", "es", "et", "eu", "fa", "ff", "bi", "eo", "fj", "fo", "fi", "fr", "fy", "ga", "gd", "gl", "gn", "gu", "gv", "ha", "hi", "ie", "hr", "he", "ho", "hu", "hy", "ht", "hz", "id", "ig", "ii", "ik", "ia", "kj", "is", "it", "iu", "ja", "jv", "ko", "ki", "kg"], "library_name": "asteroid", "license": "creativeml-openrail-m", "metrics": ["accuracy", "bertscore", "bleu", "bleurt", "code_eval", "chrf", "charcut_mt", "character", "cer", "brier_score"], "pipeline_tag": "question-answering", "tags": ["text-generation-inference", "Text classification", "Question answering", "Translation", "Summarization", "Image classification", "Object classification", "Image generation", "Learning", "medical", "biology", "chemistry", "legal", "music", "code", "climate", "art", "finance", "moe", "merge"], "new_version": "meta-llama/Llama-3.3-70B-Instruct"}
task
[ "TEXT_CLASSIFICATION", "QUESTION_ANSWERING", "TRANSLATION", "SUMMARIZATION" ]
41,462
Graphcore/mt5-large-ipu
Graphcore
null
[ "optimum_graphcore", "arxiv:1910.10683", "arxiv:2010.11934", "license:apache-2.0", "region:us" ]
2023-05-19T15:18:20Z
2023-07-04T19:05:35+00:00
0
0
--- license: apache-2.0 --- # Graphcore/mt5-large-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description Multilingual Text-to-Text Transfer Transformer (mT5) is the multilingual variant of [T5](https://arxiv.org/abs/1910.10683). T5 is a Transformer based model that uses a text-to-text approach for translation, question answering, and classification. It introduces an unified framework that converts all text-based language problems into a text-to-text format for transfer learning for NLP. This allows for the use of the same model, loss function, hyperparameters, etc. across our diverse set of tasks. mT5 is pretrained on the mC4 corpus, covering 101 languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Azerbaijani, Basque, Belarusian, Bengali, Bulgarian, Burmese, Catalan, Cebuano, Chichewa, Chinese, Corsican, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Haitian Creole, Hausa, Hawaiian, Hebrew, Hindi, Hmong, Hungarian, Icelandic, Igbo, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish, Kyrgyz, Lao, Latin, Latvian, Lithuanian, Luxembourgish, Macedonian, Malagasy, Malay, Malayalam, Maltese, Maori, Marathi, Mongolian, Nepali, Norwegian, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Samoan, Scottish Gaelic, Serbian, Shona, Sindhi, Sinhala, Slovak, Slovenian, Somali, Sotho, Spanish, Sundanese, Swahili, Swedish, Tajik, Tamil, Telugu, Thai, Turkish, Ukrainian, Urdu, Uzbek, Vietnamese, Welsh, West Frisian, Xhosa, Yiddish, Yoruba, Zulu. Note: mT5 was only pre-trained on mC4 excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task. Paper link :[mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the mT5 Small model (e.g. [HuggingFace/google/mt5-large](https://huggingface.co/google/mt5-large)) on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/mt5-large-ipu") ```
null
Non_BioNLP
# Graphcore/mt5-large-ipu Optimum Graphcore is a new open-source library and toolkit that enables developers to access IPU-optimized models certified by Hugging Face. It is an extension of Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on Graphcore’s IPUs - a completely new kind of massively parallel processor to accelerate machine intelligence. Learn more about how to take train Transformer models faster with IPUs at [hf.co/hardware/graphcore](https://huggingface.co/hardware/graphcore). Through HuggingFace Optimum, Graphcore released ready-to-use IPU-trained model checkpoints and IPU configuration files to make it easy to train models with maximum efficiency in the IPU. Optimum shortens the development lifecycle of your AI models by letting you plug-and-play any public dataset and allows a seamless integration to our State-of-the-art hardware giving you a quicker time-to-value for your AI project. ## Model description Multilingual Text-to-Text Transfer Transformer (mT5) is the multilingual variant of [T5](https://arxiv.org/abs/1910.10683). T5 is a Transformer based model that uses a text-to-text approach for translation, question answering, and classification. It introduces an unified framework that converts all text-based language problems into a text-to-text format for transfer learning for NLP. This allows for the use of the same model, loss function, hyperparameters, etc. across our diverse set of tasks. mT5 is pretrained on the mC4 corpus, covering 101 languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Azerbaijani, Basque, Belarusian, Bengali, Bulgarian, Burmese, Catalan, Cebuano, Chichewa, Chinese, Corsican, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Haitian Creole, Hausa, Hawaiian, Hebrew, Hindi, Hmong, Hungarian, Icelandic, Igbo, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish, Kyrgyz, Lao, Latin, Latvian, Lithuanian, Luxembourgish, Macedonian, Malagasy, Malay, Malayalam, Maltese, Maori, Marathi, Mongolian, Nepali, Norwegian, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Samoan, Scottish Gaelic, Serbian, Shona, Sindhi, Sinhala, Slovak, Slovenian, Somali, Sotho, Spanish, Sundanese, Swahili, Swedish, Tajik, Tamil, Telugu, Thai, Turkish, Ukrainian, Urdu, Uzbek, Vietnamese, Welsh, West Frisian, Xhosa, Yiddish, Yoruba, Zulu. Note: mT5 was only pre-trained on mC4 excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task. Paper link :[mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) ## Intended uses & limitations This model contains just the `IPUConfig` files for running the mT5 Small model (e.g. [HuggingFace/google/mt5-large](https://huggingface.co/google/mt5-large)) on Graphcore IPUs. **This model contains no model weights, only an IPUConfig.** ## Usage ``` from optimum.graphcore import IPUConfig ipu_config = IPUConfig.from_pretrained("Graphcore/mt5-large-ipu") ```
{"license": "apache-2.0"}
task
[ "QUESTION_ANSWERING", "TRANSLATION" ]
41,463
PeYing/model
PeYing
sentence-similarity
[ "sentence-transformers", "safetensors", "qwen2", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:182", "loss:CosineSimilarityLoss", "arxiv:1908.10084", "base_model:unsloth/Qwen2.5-0.5B-Instruct", "base_model:finetune:unsloth/Qwen2.5-0.5B-Instruct", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2025-01-27T01:50:32Z
2025-01-27T01:54:58+00:00
8
0
--- base_model: unsloth/Qwen2.5-0.5B-Instruct library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:182 - loss:CosineSimilarityLoss widget: - source_sentence: What documents must contractors/vendors provide? sentences: - 1. ESH representatives will carry out the training when new employees need to be trained, or on an annual basis. - "1. Safe Operating Procedure (SOP). \n2. Risk Assessment ( Hazard Identification,\ \ Risk Assessment, & Risk control / HIRARC) / JSA / Job Safety Analysis. \n3.\ \ Valid licenses (If applicable). \n4. Certification of Fitness-CF (For all types\ \ of cranes). \n5. Crane Operator Competency License. (If applicable). \n6. All\ \ scaffolding must be erected as per the statutory regulations. \n7. Lifting Supervisor\ \ Competency Certificate. (If applicable). \n8. Signal Man Competency Certificate.\ \ (If applicable. \n9. Rigger Competency Certificate. (If applicable). \n10. Lifting\ \ plan (If applicable). \n11. Scaffolder Level 1/2/3 Certificate. (If applicable)." - 1. To ensure the specific employees are aware of the correct procedures associated with chemical handling and waste management. - source_sentence: What is the guideline for shirts and blouses? sentences: - 1. ESH representatives will carry out the training when new employees need to be trained, or on an annual basis. - 1. Employees in CLEAN ROOM are NOT ALLOWED to use/wear makeup/bangles. - "1. 1. Formal or casual shirts with sleeves. \n2. 2. Collared T-shirts and blouses/sleeveless\ \ tops (for ladies). \n3. 3. Round-neck T-shirts are allowed for non-office personnel.\ \ \n4. 4. Clothing with the company logo is encouraged. \n5. 5. Sport Team. \n\ 6. 6. University. \n7. 7. Fashion brands on clothing are generally acceptable." - source_sentence: What is the lunch schedule for the 1st shift in the normal schedule in M-site? sentences: - 12 days. - '1. Categorization of Machine: Identify the location of the machine, its function, and all necessary items needed for it to run (e.g., lubricants, saw blades, etc). 2. Authorization: Ensure that all personnel operating the machine have received the appropriate training. 3. Hazard & Risks associated with equipment/machinery/techniques/process: Identify all hazards and risks associated, and implement sufficient controls according to the hierarchy of controls (e.g., warning labels and symbols). 4. Pre-work procedure: Ensure that the machine is in proper, running condition before starting work. 5. During work procedure: Follow the correct standard operating procedure for carrying out that work activity. 6. After work procedure: Ensure that the machine remains in a neat and tidy condition at all times. 7. Work Area: Identify the area where the work is being done. 8. PPE: Ensure that appropriate PPE is available for all personnel handling the machine. 9. Emergency Procedure: Ensure sufficient emergency features are available on the machine (e.g., emergency stop button). 10. After work hour: Ensure the machine system is in shutdown/standby mode when the machine is not running. 11. Housekeeping: Ensure basic housekeeping is done at the work area. 12. Scheduled waste: Any scheduled waste generated by the process should be disposed of according to Carsem waste management procedure.' - 1. Lunch (Tengah Hari) for the 1st shift is from 12:00 PM to 1:00 PM, lasting 60 minutes. - source_sentence: What is the meal schedule for M-site? sentences: - 2 days. - "1. 1st Shift: -Dinner (Malam): 8:00PM - 8:40PM, -Supper(Lewat Malam): 1:00AM\ \ - 1:30 AM -Breakfast(Pagi): 8:00AM - 8:30AM -Lunch(Tengah Hari): 12:50PM - 1:30PM.\ \ \n2. 2nd Shift: -Dinner(Malam): 8:50PM - 9:30PM -Supper(Lewat Malam): 1:40AM\ \ - 2:10AM -Breakfast(Pagi): 8:40AM - 9:10AM -Lunch(Tengah Hari): 1:40PM - 2:20PM.\ \ \n3. 3rd Shift: -Dinner(Malam): 9:40PM - 10:20PM -Supper(Lewat Malam): 2:20AM\ \ - 2:50AM -Breakfast(Pagi): 9:20AM - 9:50AM -Lunch(Tengah Hari): 2:30PM - 3:10PM.\ \ \n4. 4th Shift: -Dinner(Malam): 10:30PM - 11:10PM -Supper(Lewat Malam): 3:00AM\ \ - 3:30AM -Breakfast(Pagi): 10:00AM - 10:30AM -Lunch(Tengah Hari): 3:20PM - 4:00PM." - "1. The mechanical safety guidelines include: \n2. 1. Lock-Out Tag-Out (LOTO):\ \ Always practice LOTO procedures when performing maintenance or repairs on machines.\ \ \n3. 2. Preventive Maintenance: Conduct regular preventive maintenance on all\ \ machinery to ensure proper functioning. \n4. 3. Pinch Points Awareness: Identify\ \ all possible pinch points on machinery, and ensure they are properly labeled.\ \ \n5. 4. Production Area Organization: Keep the production area neat and organized\ \ at all times. \n6. 5. Operator Training: Provide adequate training to operators\ \ before allowing them to handle machines. \n7. 6. Machine Guarding: Ensure all\ \ safety guards are in place before starting machine operations." - source_sentence: Can employees wear traditional attire? sentences: - "1. N03 : Monday to Friday, 8am to 5:30pm.\n2. N04 : Tuesday to Saturday, 8am\ \ to 5:30pm.\n3. N05 : Monday to Friday, 8:30am to 6pm.\n4. N06 : Monday to Friday,\ \ 9am to 6:30pm.\n5. N07 : Tuesday to Saturday, 8:30am to 6pm.\n6. N08 : Tuesday\ \ to Saturday, 9am to 6.30pm.\n7. N6 : Tuesday to Saturday, 8:30pm to 6:15pm.\n\ 8. N9: 5 working days 2 days off, 7:30am to 5:15pm , 10:30am to 8:15pm.\n9. N10:\ \ 5 working days 2 days off, 10:30am to 8:15pm , 7:30am to 5:15pm.\n10. AA/BB/CC/A/B/C\ \ : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n11. AA1/BB1/CC1/A1/B1/C1\ \ : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n12. GG/HH/II/GG1/HH1/II1\ \ : 4 working days 2 days off, 7:30am to 7:30pm , 7:30pm to 7:30am.\n13. P1 :\ \ Monday to Thursday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to\ \ 6:30am.\n14. P2 : Tuesday to Friday (4 working days 2 days off), 6:30am to 6:30pm\ \ , 6:30pm to 6:30am. \n15. U1/U2/U3/UU1/UU2/UU3 : 4 working days 2 days off,\ \ 7:30am to 7.30pm. \n16. V1/V2/V3/VV1/VV2/VV3 : 4 working days 2 days off, 8.30am\ \ to 8.30pm. \n17. W1/W2/W3/WW1/WW2/WW3 : 4 working days 2 days off, 6.30am to\ \ 6.30pm. \n18. H1 : Monday to Thursday (4 working days 2 days off), 6.30am to\ \ 6.30pm. \n19. H2 : Tuesday to Friday (4 working days 2 days off), 6.30am to\ \ 6.30pm. \n20. H3 : Wednesday to Saturday (4 working days 2 days off), 6.30am\ \ to 6.30pm. \n21. H6(applicable in S only) : Monday to Thursday (4 working days\ \ 2 days off), 7.30am to 7.30pm. \n22. H6(applicable in M only) : Monday to Thursday\ \ (4 working days 2 days off), 7.30am to 7.30pm." - "1. 1st Shift: -Dinner (Malam): 8:00PM - 8:40PM, -Supper(Lewat Malam): 1:00AM\ \ - 1:30 AM -Breakfast(Pagi): 8:30AM - 9:00AM -Lunch(Tengah Hari): 1:40PM - 2:20PM.\ \ \n2. 2nd Shift: -Dinner(Malam): 8:50PM - 9:30PM -Supper(Lewat Malam): 1:40AM\ \ - 2:10AM -Breakfast(Pagi): 9:10AM - 9:40AM -Lunch(Tengah Hari): 2:30PM - 3:10PM.\ \ \n3. 3rd Shift: -Dinner(Malam): 9:40PM - 10:20PM -Supper(Lewat Malam): 2:20AM\ \ - 2:50AM -Breakfast(Pagi): 9:50AM - 10:20AM -Lunch(Tengah Hari): 3:20PM - 4:00PM." - "1. 1. Yes, acceptable traditional attire includes: \n2. 1. Malaysian Traditional\ \ Attire. \n3. 2.Malay Baju Kurung. \n4. 3. Baju Melayu for Muslim men. \n5. 4.Indian\ \ Saree. \n6. 5. Punjabi Suit. \n7. Chinese Cheongsam are acceptable." --- # SentenceTransformer based on unsloth/Qwen2.5-0.5B-Instruct This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [unsloth/Qwen2.5-0.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-0.5B-Instruct). It maps sentences & paragraphs to a 896-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [unsloth/Qwen2.5-0.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-0.5B-Instruct) <!-- at revision 6a7b5090fc11df0706c796b7ba76762d7beb688b --> - **Maximum Sequence Length:** 32768 tokens - **Output Dimensionality:** 896 dimensions - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 32768, 'do_lower_case': False}) with Transformer model: Qwen2Model (1): Pooling({'word_embedding_dimension': 896, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("PeYing/model") # Run inference sentences = [ 'Can employees wear traditional attire?', '1. 1. Yes, acceptable traditional attire includes: \n2. 1. Malaysian Traditional Attire. \n3. 2.Malay Baju Kurung. \n4. 3. Baju Melayu for Muslim men. \n5. 4.Indian Saree. \n6. 5. Punjabi Suit. \n7. Chinese Cheongsam are acceptable.', '1. N03 : Monday to Friday, 8am to 5:30pm.\n2. N04 : Tuesday to Saturday, 8am to 5:30pm.\n3. N05 : Monday to Friday, 8:30am to 6pm.\n4. N06 : Monday to Friday, 9am to 6:30pm.\n5. N07 : Tuesday to Saturday, 8:30am to 6pm.\n6. N08 : Tuesday to Saturday, 9am to 6.30pm.\n7. N6 : Tuesday to Saturday, 8:30pm to 6:15pm.\n8. N9: 5 working days 2 days off, 7:30am to 5:15pm , 10:30am to 8:15pm.\n9. N10: 5 working days 2 days off, 10:30am to 8:15pm , 7:30am to 5:15pm.\n10. AA/BB/CC/A/B/C : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n11. AA1/BB1/CC1/A1/B1/C1 : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n12. GG/HH/II/GG1/HH1/II1 : 4 working days 2 days off, 7:30am to 7:30pm , 7:30pm to 7:30am.\n13. P1 : Monday to Thursday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am.\n14. P2 : Tuesday to Friday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am. \n15. U1/U2/U3/UU1/UU2/UU3 : 4 working days 2 days off, 7:30am to 7.30pm. \n16. V1/V2/V3/VV1/VV2/VV3 : 4 working days 2 days off, 8.30am to 8.30pm. \n17. W1/W2/W3/WW1/WW2/WW3 : 4 working days 2 days off, 6.30am to 6.30pm. \n18. H1 : Monday to Thursday (4 working days 2 days off), 6.30am to 6.30pm. \n19. H2 : Tuesday to Friday (4 working days 2 days off), 6.30am to 6.30pm. \n20. H3 : Wednesday to Saturday (4 working days 2 days off), 6.30am to 6.30pm. \n21. H6(applicable in S only) : Monday to Thursday (4 working days 2 days off), 7.30am to 7.30pm. \n22. H6(applicable in M only) : Monday to Thursday (4 working days 2 days off), 7.30am to 7.30pm.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 896] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 182 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code> * Approximate statistics based on the first 182 samples: | | sentence_0 | sentence_1 | label | |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-----------------------------| | type | string | string | int | | details | <ul><li>min: 5 tokens</li><li>mean: 12.15 tokens</li><li>max: 28 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 63.21 tokens</li><li>max: 793 tokens</li></ul> | <ul><li>1: 100.00%</li></ul> | * Samples: | sentence_0 | sentence_1 | label | |:----------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------| | <code>List out all the work schedule for Carsem.</code> | <code>1. N03 : Monday to Friday, 8am to 5:30pm.<br>2. N04 : Tuesday to Saturday, 8am to 5:30pm.<br>3. N05 : Monday to Friday, 8:30am to 6pm.<br>4. N06 : Monday to Friday, 9am to 6:30pm.<br>5. N07 : Tuesday to Saturday, 8:30am to 6pm.<br>6. N08 : Tuesday to Saturday, 9am to 6.30pm.<br>7. N6 : Tuesday to Saturday, 8:30pm to 6:15pm.<br>8. N9: 5 working days 2 days off, 7:30am to 5:15pm , 10:30am to 8:15pm.<br>9. N10: 5 working days 2 days off, 10:30am to 8:15pm , 7:30am to 5:15pm.<br>10. AA/BB/CC/A/B/C : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.<br>11. AA1/BB1/CC1/A1/B1/C1 : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.<br>12. GG/HH/II/GG1/HH1/II1 : 4 working days 2 days off, 7:30am to 7:30pm , 7:30pm to 7:30am.<br>13. P1 : Monday to Thursday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am.<br>14. P2 : Tuesday to Friday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am. <br>15. U1/U2/U3/UU1/UU2/UU3 : 4 working days 2 days off, 7:30am to 7.30pm. <br>16. V1/V2/V3/VV1/VV...</code> | <code>1</code> | | <code>What is the maximum allowed working hours in a week?</code> | <code>1. Employees are not allowed to work more than 60 hours in a week inclusive of overtime and 1 rest day per week. Company will monitor overtime and rest day utilization and take appropriate action to address instances deemed excessive.</code> | <code>1</code> | | <code>Why the company is not allowed working hours in a week more than 60 hours?</code> | <code>1. Continuous overtime causes worker strain that may lead to reduced productivity, increased turnover and increased injury and illnesses.</code> | <code>1</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 1 - `per_device_eval_batch_size`: 1 - `num_train_epochs`: 1 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 1 - `per_device_eval_batch_size`: 1 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Framework Versions - Python: 3.10.0 - Sentence Transformers: 3.3.1 - Transformers: 4.48.1 - PyTorch: 2.5.1+cu118 - Accelerate: 1.3.0 - Datasets: 3.2.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on unsloth/Qwen2.5-0.5B-Instruct This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [unsloth/Qwen2.5-0.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-0.5B-Instruct). It maps sentences & paragraphs to a 896-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [unsloth/Qwen2.5-0.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-0.5B-Instruct) <!-- at revision 6a7b5090fc11df0706c796b7ba76762d7beb688b --> - **Maximum Sequence Length:** 32768 tokens - **Output Dimensionality:** 896 dimensions - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 32768, 'do_lower_case': False}) with Transformer model: Qwen2Model (1): Pooling({'word_embedding_dimension': 896, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("PeYing/model") # Run inference sentences = [ 'Can employees wear traditional attire?', '1. 1. Yes, acceptable traditional attire includes: \n2. 1. Malaysian Traditional Attire. \n3. 2.Malay Baju Kurung. \n4. 3. Baju Melayu for Muslim men. \n5. 4.Indian Saree. \n6. 5. Punjabi Suit. \n7. Chinese Cheongsam are acceptable.', '1. N03 : Monday to Friday, 8am to 5:30pm.\n2. N04 : Tuesday to Saturday, 8am to 5:30pm.\n3. N05 : Monday to Friday, 8:30am to 6pm.\n4. N06 : Monday to Friday, 9am to 6:30pm.\n5. N07 : Tuesday to Saturday, 8:30am to 6pm.\n6. N08 : Tuesday to Saturday, 9am to 6.30pm.\n7. N6 : Tuesday to Saturday, 8:30pm to 6:15pm.\n8. N9: 5 working days 2 days off, 7:30am to 5:15pm , 10:30am to 8:15pm.\n9. N10: 5 working days 2 days off, 10:30am to 8:15pm , 7:30am to 5:15pm.\n10. AA/BB/CC/A/B/C : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n11. AA1/BB1/CC1/A1/B1/C1 : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n12. GG/HH/II/GG1/HH1/II1 : 4 working days 2 days off, 7:30am to 7:30pm , 7:30pm to 7:30am.\n13. P1 : Monday to Thursday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am.\n14. P2 : Tuesday to Friday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am. \n15. U1/U2/U3/UU1/UU2/UU3 : 4 working days 2 days off, 7:30am to 7.30pm. \n16. V1/V2/V3/VV1/VV2/VV3 : 4 working days 2 days off, 8.30am to 8.30pm. \n17. W1/W2/W3/WW1/WW2/WW3 : 4 working days 2 days off, 6.30am to 6.30pm. \n18. H1 : Monday to Thursday (4 working days 2 days off), 6.30am to 6.30pm. \n19. H2 : Tuesday to Friday (4 working days 2 days off), 6.30am to 6.30pm. \n20. H3 : Wednesday to Saturday (4 working days 2 days off), 6.30am to 6.30pm. \n21. H6(applicable in S only) : Monday to Thursday (4 working days 2 days off), 7.30am to 7.30pm. \n22. H6(applicable in M only) : Monday to Thursday (4 working days 2 days off), 7.30am to 7.30pm.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 896] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 182 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code> * Approximate statistics based on the first 182 samples: | | sentence_0 | sentence_1 | label | |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-----------------------------| | type | string | string | int | | details | <ul><li>min: 5 tokens</li><li>mean: 12.15 tokens</li><li>max: 28 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 63.21 tokens</li><li>max: 793 tokens</li></ul> | <ul><li>1: 100.00%</li></ul> | * Samples: | sentence_0 | sentence_1 | label | |:----------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------| | <code>List out all the work schedule for Carsem.</code> | <code>1. N03 : Monday to Friday, 8am to 5:30pm.<br>2. N04 : Tuesday to Saturday, 8am to 5:30pm.<br>3. N05 : Monday to Friday, 8:30am to 6pm.<br>4. N06 : Monday to Friday, 9am to 6:30pm.<br>5. N07 : Tuesday to Saturday, 8:30am to 6pm.<br>6. N08 : Tuesday to Saturday, 9am to 6.30pm.<br>7. N6 : Tuesday to Saturday, 8:30pm to 6:15pm.<br>8. N9: 5 working days 2 days off, 7:30am to 5:15pm , 10:30am to 8:15pm.<br>9. N10: 5 working days 2 days off, 10:30am to 8:15pm , 7:30am to 5:15pm.<br>10. AA/BB/CC/A/B/C : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.<br>11. AA1/BB1/CC1/A1/B1/C1 : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.<br>12. GG/HH/II/GG1/HH1/II1 : 4 working days 2 days off, 7:30am to 7:30pm , 7:30pm to 7:30am.<br>13. P1 : Monday to Thursday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am.<br>14. P2 : Tuesday to Friday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am. <br>15. U1/U2/U3/UU1/UU2/UU3 : 4 working days 2 days off, 7:30am to 7.30pm. <br>16. V1/V2/V3/VV1/VV...</code> | <code>1</code> | | <code>What is the maximum allowed working hours in a week?</code> | <code>1. Employees are not allowed to work more than 60 hours in a week inclusive of overtime and 1 rest day per week. Company will monitor overtime and rest day utilization and take appropriate action to address instances deemed excessive.</code> | <code>1</code> | | <code>Why the company is not allowed working hours in a week more than 60 hours?</code> | <code>1. Continuous overtime causes worker strain that may lead to reduced productivity, increased turnover and increased injury and illnesses.</code> | <code>1</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 1 - `per_device_eval_batch_size`: 1 - `num_train_epochs`: 1 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 1 - `per_device_eval_batch_size`: 1 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Framework Versions - Python: 3.10.0 - Sentence Transformers: 3.3.1 - Transformers: 4.48.1 - PyTorch: 2.5.1+cu118 - Accelerate: 1.3.0 - Datasets: 3.2.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "unsloth/Qwen2.5-0.5B-Instruct", "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:182", "loss:CosineSimilarityLoss"], "widget": [{"source_sentence": "What documents must contractors/vendors provide?", "sentences": ["1. ESH representatives will carry out the training when new employees need to be trained, or on an annual basis.", "1. Safe Operating Procedure (SOP). \n2. Risk Assessment ( Hazard Identification, Risk Assessment, & Risk control / HIRARC) / JSA / Job Safety Analysis. \n3. Valid licenses (If applicable). \n4. Certification of Fitness-CF (For all types of cranes). \n5. Crane Operator Competency License. (If applicable). \n6. All scaffolding must be erected as per the statutory regulations. \n7. Lifting Supervisor Competency Certificate. (If applicable). \n8. Signal Man Competency Certificate. (If applicable. \n9. Rigger Competency Certificate. (If applicable). \n10. Lifting plan (If applicable). \n11. Scaffolder Level 1/2/3 Certificate. (If applicable).", "1. To ensure the specific employees are aware of the correct procedures associated with chemical handling and waste management."]}, {"source_sentence": "What is the guideline for shirts and blouses?", "sentences": ["1. ESH representatives will carry out the training when new employees need to be trained, or on an annual basis.", "1. Employees in CLEAN ROOM are NOT ALLOWED to use/wear makeup/bangles.", "1. 1. Formal or casual shirts with sleeves. \n2. 2. Collared T-shirts and blouses/sleeveless tops (for ladies). \n3. 3. Round-neck T-shirts are allowed for non-office personnel. \n4. 4. Clothing with the company logo is encouraged. \n5. 5. Sport Team. \n6. 6. University. \n7. 7. Fashion brands on clothing are generally acceptable."]}, {"source_sentence": "What is the lunch schedule for the 1st shift in the normal schedule in M-site?", "sentences": ["12 days.", "1. Categorization of Machine: Identify the location of the machine, its function, and all necessary items needed for it to run (e.g., lubricants, saw blades, etc).\n2. Authorization: Ensure that all personnel operating the machine have received the appropriate training.\n3. Hazard & Risks associated with equipment/machinery/techniques/process: Identify all hazards and risks associated, and implement sufficient controls according to the hierarchy of controls (e.g., warning labels and symbols).\n4. Pre-work procedure: Ensure that the machine is in proper, running condition before starting work.\n5. During work procedure: Follow the correct standard operating procedure for carrying out that work activity.\n6. After work procedure: Ensure that the machine remains in a neat and tidy condition at all times.\n7. Work Area: Identify the area where the work is being done.\n8. PPE: Ensure that appropriate PPE is available for all personnel handling the machine.\n9. Emergency Procedure: Ensure sufficient emergency features are available on the machine (e.g., emergency stop button).\n10. After work hour: Ensure the machine system is in shutdown/standby mode when the machine is not running.\n11. Housekeeping: Ensure basic housekeeping is done at the work area.\n12. Scheduled waste: Any scheduled waste generated by the process should be disposed of according to Carsem waste management procedure.", "1. Lunch (Tengah Hari) for the 1st shift is from 12:00 PM to 1:00 PM, lasting 60 minutes."]}, {"source_sentence": "What is the meal schedule for M-site?", "sentences": ["2 days.", "1. 1st Shift: -Dinner (Malam): 8:00PM - 8:40PM, -Supper(Lewat Malam): 1:00AM - 1:30 AM -Breakfast(Pagi): 8:00AM - 8:30AM -Lunch(Tengah Hari): 12:50PM - 1:30PM. \n2. 2nd Shift: -Dinner(Malam): 8:50PM - 9:30PM -Supper(Lewat Malam): 1:40AM - 2:10AM -Breakfast(Pagi): 8:40AM - 9:10AM -Lunch(Tengah Hari): 1:40PM - 2:20PM. \n3. 3rd Shift: -Dinner(Malam): 9:40PM - 10:20PM -Supper(Lewat Malam): 2:20AM - 2:50AM -Breakfast(Pagi): 9:20AM - 9:50AM -Lunch(Tengah Hari): 2:30PM - 3:10PM. \n4. 4th Shift: -Dinner(Malam): 10:30PM - 11:10PM -Supper(Lewat Malam): 3:00AM - 3:30AM -Breakfast(Pagi): 10:00AM - 10:30AM -Lunch(Tengah Hari): 3:20PM - 4:00PM.", "1. The mechanical safety guidelines include: \n2. 1. Lock-Out Tag-Out (LOTO): Always practice LOTO procedures when performing maintenance or repairs on machines. \n3. 2. Preventive Maintenance: Conduct regular preventive maintenance on all machinery to ensure proper functioning. \n4. 3. Pinch Points Awareness: Identify all possible pinch points on machinery, and ensure they are properly labeled. \n5. 4. Production Area Organization: Keep the production area neat and organized at all times. \n6. 5. Operator Training: Provide adequate training to operators before allowing them to handle machines. \n7. 6. Machine Guarding: Ensure all safety guards are in place before starting machine operations."]}, {"source_sentence": "Can employees wear traditional attire?", "sentences": ["1. N03 : Monday to Friday, 8am to 5:30pm.\n2. N04 : Tuesday to Saturday, 8am to 5:30pm.\n3. N05 : Monday to Friday, 8:30am to 6pm.\n4. N06 : Monday to Friday, 9am to 6:30pm.\n5. N07 : Tuesday to Saturday, 8:30am to 6pm.\n6. N08 : Tuesday to Saturday, 9am to 6.30pm.\n7. N6 : Tuesday to Saturday, 8:30pm to 6:15pm.\n8. N9: 5 working days 2 days off, 7:30am to 5:15pm , 10:30am to 8:15pm.\n9. N10: 5 working days 2 days off, 10:30am to 8:15pm , 7:30am to 5:15pm.\n10. AA/BB/CC/A/B/C : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n11. AA1/BB1/CC1/A1/B1/C1 : 4 working days 2 days off, 6:30am to 6:30pm , 6:30pm to 6:30am.\n12. GG/HH/II/GG1/HH1/II1 : 4 working days 2 days off, 7:30am to 7:30pm , 7:30pm to 7:30am.\n13. P1 : Monday to Thursday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am.\n14. P2 : Tuesday to Friday (4 working days 2 days off), 6:30am to 6:30pm , 6:30pm to 6:30am. \n15. U1/U2/U3/UU1/UU2/UU3 : 4 working days 2 days off, 7:30am to 7.30pm. \n16. V1/V2/V3/VV1/VV2/VV3 : 4 working days 2 days off, 8.30am to 8.30pm. \n17. W1/W2/W3/WW1/WW2/WW3 : 4 working days 2 days off, 6.30am to 6.30pm. \n18. H1 : Monday to Thursday (4 working days 2 days off), 6.30am to 6.30pm. \n19. H2 : Tuesday to Friday (4 working days 2 days off), 6.30am to 6.30pm. \n20. H3 : Wednesday to Saturday (4 working days 2 days off), 6.30am to 6.30pm. \n21. H6(applicable in S only) : Monday to Thursday (4 working days 2 days off), 7.30am to 7.30pm. \n22. H6(applicable in M only) : Monday to Thursday (4 working days 2 days off), 7.30am to 7.30pm.", "1. 1st Shift: -Dinner (Malam): 8:00PM - 8:40PM, -Supper(Lewat Malam): 1:00AM - 1:30 AM -Breakfast(Pagi): 8:30AM - 9:00AM -Lunch(Tengah Hari): 1:40PM - 2:20PM. \n2. 2nd Shift: -Dinner(Malam): 8:50PM - 9:30PM -Supper(Lewat Malam): 1:40AM - 2:10AM -Breakfast(Pagi): 9:10AM - 9:40AM -Lunch(Tengah Hari): 2:30PM - 3:10PM. \n3. 3rd Shift: -Dinner(Malam): 9:40PM - 10:20PM -Supper(Lewat Malam): 2:20AM - 2:50AM -Breakfast(Pagi): 9:50AM - 10:20AM -Lunch(Tengah Hari): 3:20PM - 4:00PM.", "1. 1. Yes, acceptable traditional attire includes: \n2. 1. Malaysian Traditional Attire. \n3. 2.Malay Baju Kurung. \n4. 3. Baju Melayu for Muslim men. \n5. 4.Indian Saree. \n6. 5. Punjabi Suit. \n7. Chinese Cheongsam are acceptable."]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,464
Backdrive/distilbert-base-uncased-finetuned-emotion
Backdrive
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-05-22T14:39:04Z
2023-05-22T14:52:30+00:00
10
0
--- datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.9285 name: Accuracy - type: f1 value: 0.9285478749765623 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2158 - Accuracy: 0.9285 - F1: 0.9285 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8112 | 1.0 | 250 | 0.3104 | 0.9005 | 0.8968 | | 0.2447 | 2.0 | 500 | 0.2158 | 0.9285 | 0.9285 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2158 - Accuracy: 0.9285 - F1: 0.9285 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8112 | 1.0 | 250 | 0.3104 | 0.9005 | 0.8968 | | 0.2447 | 2.0 | 500 | 0.2158 | 0.9285 | 0.9285 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
{"datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9285, "name": "Accuracy"}, {"type": "f1", "value": 0.9285478749765623, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,465
jawad24/marian-finetuned-kde4-en-to-fr
jawad24
translation
[ "transformers", "tensorboard", "safetensors", "marian", "text2text-generation", "translation", "generated_from_trainer", "dataset:kde4", "base_model:Helsinki-NLP/opus-mt-en-fr", "base_model:finetune:Helsinki-NLP/opus-mt-en-fr", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-28T09:27:55Z
2024-07-28T12:05:19+00:00
6
0
--- base_model: Helsinki-NLP/opus-mt-en-fr datasets: - kde4 license: apache-2.0 metrics: - bleu tags: - translation - generated_from_trainer model-index: - name: marian-finetuned-kde4-en-to-fr results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: kde4 type: kde4 config: en-fr split: train args: en-fr metrics: - type: bleu value: 52.91210143343284 name: Bleu --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.8554 - Bleu: 52.9121 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.42.4 - Pytorch 2.3.1+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.8554 - Bleu: 52.9121 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.42.4 - Pytorch 2.3.1+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
{"base_model": "Helsinki-NLP/opus-mt-en-fr", "datasets": ["kde4"], "license": "apache-2.0", "metrics": ["bleu"], "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "marian-finetuned-kde4-en-to-fr", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "kde4", "type": "kde4", "config": "en-fr", "split": "train", "args": "en-fr"}, "metrics": [{"type": "bleu", "value": 52.91210143343284, "name": "Bleu"}]}]}]}
task
[ "TRANSLATION" ]
41,466
shail-2512/nomic-embed-financial-matryoshka
shail-2512
sentence-similarity
[ "sentence-transformers", "safetensors", "nomic_bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:6300", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss", "custom_code", "en", "dataset:philschmid/finanical-rag-embedding-dataset", "arxiv:1908.10084", "arxiv:2205.13147", "arxiv:1705.00652", "base_model:nomic-ai/nomic-embed-text-v1.5", "base_model:finetune:nomic-ai/nomic-embed-text-v1.5", "license:apache-2.0", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-12-06T05:03:36Z
2024-12-07T20:37:12+00:00
9
2
--- base_model: nomic-ai/nomic-embed-text-v1.5 datasets: - philschmid/finanical-rag-embedding-dataset language: - en library_name: sentence-transformers license: apache-2.0 metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:6300 - loss:MatryoshkaLoss - loss:MultipleNegativesRankingLoss widget: - source_sentence: Where in the Annual Report can one find a description of certain legal matters and their impact on the company? sentences: - Apollo coordinates the delivery of new features, security updates, and platform configurations, ensuring the continuous operation of systems in any environment. It was introduced commercially in 2021. - In the Annual Report on Form 10-K, 'Item 1A. Risk Factors' provides a further description of certain legal matters and their impact on the company. - During fiscal 2022, we opened four new stores in Mexico. - source_sentence: How does the company assess uncertain tax positions? sentences: - We recognize tax benefits from uncertain tax positions only if we believe that it is more likely than not that the tax position will be sustained on examination by the taxing authorities based on the technical merits of the position. - CMS uses a risk-adjustment model which adjusts premiums paid to Medicare Advantage, or MA, plans according to health status of covered members. The risk-adjustment model, which CMS implemented pursuant to the Balanced Budget Act of 1997 (BBA) and the Benefits Improvement and Protection Act of 2000 (BIPA), generally pays more where a plan's membership has higher expected costs. Under this model, rates paid to MA plans are based on actuarially determined bids, which include a process whereby our prospective payments are based on our estimated cost of providing standard Medicare-covered benefits to an enrollee with a 'national average risk profile.' That baseline payment amount is adjusted to account for certain demographic characteristics and health status of our enrolled members. - Walmart Inc. reported total revenues of $611,289 million for the fiscal year ended January 31, 2023. - source_sentence: When does the 364-day facility entered into in August 2023 expire, and what is its total amount? sentences: - In 2023, the total revenue generated by Emgality amounted to 678.3. - In August 2023, we entered into a new 364-day facility. The 364-day facility of $3.15 billion expires in August 2024. - Diluted EPS increased $0.09, or 2%, to $5.90 as the decrease in net earnings was more than fully offset by a reduction in shares outstanding. - source_sentence: What does the company believe adds significant value to its business regarding intellectual property? sentences: - We believe that, to varying degrees, our trademarks, trade names, copyrights, proprietary processes, trade secrets, trade dress, domain names and similar intellectual property add significant value to our business - Railroad operating revenues declined 6.9% in 2023 compared to 2022, reflecting an overall volume decrease of 5.7% and a decrease in average revenue per car/unit of 0.6%, primarily attributable to lower fuel surcharge revenue, partially offset by favorable price and mix. - Cash provided by operating activities increased from $26.413 billion in 2022 to $28.501 billion in 2023, an increase of approximately $2.088 billion. - source_sentence: How are government incentives treated in accounting according to the given information? sentences: - The components of 'Other income (expense), net' for the year ended December 30, 2023, were $197 million; for December 31, 2022, they were $8 million; and for December 25, 2021, they were $55 million. - We are entitled to certain advanced manufacturing production credits under the IRA, and government incentives are not accounted for or classified as an income tax credit. We account for government incentives as a reduction of expense, a reduction of the cost of the capital investment or other income based on the substance of the incentive received. Benefits are generally recorded when there is reasonable assurance of receipt or, as it relates with advanced manufacturing production credits, upon the generation of the credit. - Basic net income per share is computed by dividing net income attributable to common stock by the weighted-average number of shares of common stock outstanding during the period. model-index: - name: Nomic Embed Financial Matryoshka results: - task: type: information-retrieval name: Information Retrieval dataset: name: dim 768 type: dim_768 metrics: - type: cosine_accuracy@1 value: 0.7185714285714285 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.87 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.9014285714285715 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9357142857142857 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.7185714285714285 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.29 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.18028571428571427 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09357142857142857 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.7185714285714285 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.87 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.9014285714285715 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9357142857142857 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.8337966812161252 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.8004784580498868 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.8030662019934727 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 512 type: dim_512 metrics: - type: cosine_accuracy@1 value: 0.7157142857142857 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.8685714285714285 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.9028571428571428 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9342857142857143 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.7157142857142857 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2895238095238095 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.18057142857142855 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09342857142857142 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.7157142857142857 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.8685714285714285 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.9028571428571428 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9342857142857143 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.8320816465681472 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7986201814058957 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.8013251784905495 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 256 type: dim_256 metrics: - type: cosine_accuracy@1 value: 0.7028571428571428 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.86 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8914285714285715 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9271428571428572 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.7028571428571428 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2866666666666667 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.17828571428571427 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09271428571428571 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.7028571428571428 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.86 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8914285714285715 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9271428571428572 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.8208030315973883 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7862023809523814 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7893111186082761 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 128 type: dim_128 metrics: - type: cosine_accuracy@1 value: 0.7 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.8428571428571429 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8771428571428571 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9271428571428572 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.7 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.28095238095238095 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.1754285714285714 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09271428571428571 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.7 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.8428571428571429 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8771428571428571 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9271428571428572 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.8174548081454337 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7820821995464855 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7852661387487447 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 64 type: dim_64 metrics: - type: cosine_accuracy@1 value: 0.69 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.83 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8671428571428571 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9128571428571428 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.69 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.27666666666666667 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.1734285714285714 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09128571428571428 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.69 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.83 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8671428571428571 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9128571428571428 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.804303333645382 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.769315192743764 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7729055647510643 name: Cosine Map@100 --- # Nomic Embed Financial Matryoshka This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [nomic-ai/nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) on the json dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [nomic-ai/nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) <!-- at revision d802ae16c9caed4d197895d27c6d529434cd8c6d --> - **Maximum Sequence Length:** 8192 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - json - **Language:** en - **License:** apache-2.0 ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 8192, 'do_lower_case': False}) with Transformer model: NomicBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("shail-2512/nomic-embed-financial-matryoshka") # Run inference sentences = [ 'How are government incentives treated in accounting according to the given information?', 'We are entitled to certain advanced manufacturing production credits under the IRA, and government incentives are not accounted for or classified as an income tax credit. We account for government incentives as a reduction of expense, a reduction of the cost of the capital investment or other income based on the substance of the incentive received. Benefits are generally recorded when there is reasonable assurance of receipt or, as it relates with advanced manufacturing production credits, upon the generation of the credit.', 'Basic net income per share is computed by dividing net income attributable to common stock by the weighted-average number of shares of common stock outstanding during the period.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Datasets: `dim_768`, `dim_512`, `dim_256`, `dim_128` and `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | dim_768 | dim_512 | dim_256 | dim_128 | dim_64 | |:--------------------|:-----------|:-----------|:-----------|:-----------|:-----------| | cosine_accuracy@1 | 0.7186 | 0.7157 | 0.7029 | 0.7 | 0.69 | | cosine_accuracy@3 | 0.87 | 0.8686 | 0.86 | 0.8429 | 0.83 | | cosine_accuracy@5 | 0.9014 | 0.9029 | 0.8914 | 0.8771 | 0.8671 | | cosine_accuracy@10 | 0.9357 | 0.9343 | 0.9271 | 0.9271 | 0.9129 | | cosine_precision@1 | 0.7186 | 0.7157 | 0.7029 | 0.7 | 0.69 | | cosine_precision@3 | 0.29 | 0.2895 | 0.2867 | 0.281 | 0.2767 | | cosine_precision@5 | 0.1803 | 0.1806 | 0.1783 | 0.1754 | 0.1734 | | cosine_precision@10 | 0.0936 | 0.0934 | 0.0927 | 0.0927 | 0.0913 | | cosine_recall@1 | 0.7186 | 0.7157 | 0.7029 | 0.7 | 0.69 | | cosine_recall@3 | 0.87 | 0.8686 | 0.86 | 0.8429 | 0.83 | | cosine_recall@5 | 0.9014 | 0.9029 | 0.8914 | 0.8771 | 0.8671 | | cosine_recall@10 | 0.9357 | 0.9343 | 0.9271 | 0.9271 | 0.9129 | | **cosine_ndcg@10** | **0.8338** | **0.8321** | **0.8208** | **0.8175** | **0.8043** | | cosine_mrr@10 | 0.8005 | 0.7986 | 0.7862 | 0.7821 | 0.7693 | | cosine_map@100 | 0.8031 | 0.8013 | 0.7893 | 0.7853 | 0.7729 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### json * Dataset: json * Size: 6,300 training samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 1000 samples: | | anchor | positive | |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 2 tokens</li><li>mean: 20.65 tokens</li><li>max: 45 tokens</li></ul> | <ul><li>min: 2 tokens</li><li>mean: 46.29 tokens</li><li>max: 326 tokens</li></ul> | * Samples: | anchor | positive | |:-------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Where is the Investor Relations office of Intuit Inc. located?</code> | <code>Copies of this Annual Report on Form 10-K may also be obtained without charge by contacting Investor Relations, Intuit Inc., P.O. Box 7850, Mountain View, California 94039-7850, calling 650-944-6000, or emailing [email protected].</code> | | <code>Where is the Financial Statement Schedule located in the Form 10-K?</code> | <code>The Financial Statement Schedule is found on page S-1 of the Form 10-K.</code> | | <code>What factors are considered when evaluating the realization of deferred tax assets?</code> | <code>Many factors are considered when assessing whether it is more likely than not that the deferred tax assets will be realized, including recent cumulative earnings, expectations of future taxable income, carryforward periods and other relevant quantitative and qualitative factors.</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Evaluation Dataset #### json * Dataset: json * Size: 700 evaluation samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 700 samples: | | anchor | positive | |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 2 tokens</li><li>mean: 20.71 tokens</li><li>max: 45 tokens</li></ul> | <ul><li>min: 9 tokens</li><li>mean: 46.74 tokens</li><li>max: 248 tokens</li></ul> | * Samples: | anchor | positive | |:--------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>What fiscal changes did Garmin make in January 2023?</code> | <code>The Company announced an organization realignment in January 2023, which combined the consumer auto operating segment with the outdoor operating segment.</code> | | <code>Where are the details about 'Legal Matters' and 'Government Investigations, Audits and Reviews' located in the financial statements?</code> | <code>The information required by this Item 3 is incorporated herein by reference to the information set forth under the captions 'Legal Matters' and 'Government Investigations, Audits and Reviews' in Note 12 of the Notes to the Consolidated Financial Statements included in Part II, Item 8, 'Financial Statements and Supplementary Data'.</code> | | <code>Are the pages of IBM's Management’s Discussion and Analysis section in the 2023 Annual Report included in the report itself?</code> | <code>In IBM’s 2023 Annual Report, the pages containing Management’s Discussion and Analysis of Financial Condition and Results of Operations (pages 6 through 40) are incorporated by reference.</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `gradient_accumulation_steps`: 8 - `learning_rate`: 2e-05 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `bf16`: True - `load_best_model_at_end`: True - `optim`: adamw_torch_fused - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 8 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 3 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | Validation Loss | dim_768_cosine_ndcg@10 | dim_512_cosine_ndcg@10 | dim_256_cosine_ndcg@10 | dim_128_cosine_ndcg@10 | dim_64_cosine_ndcg@10 | |:-------:|:------:|:-------------:|:---------------:|:----------------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:| | 0.1015 | 10 | 0.2626 | - | - | - | - | - | - | | 0.2030 | 20 | 0.1764 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0311 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0259 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0056 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0064 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0016 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0015 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0006 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0006 | - | - | - | - | - | - | | 0.3046 | 30 | 0.1324 | - | - | - | - | - | - | | 0.4061 | 40 | 0.113 | - | - | - | - | - | - | | 0.5076 | 50 | 0.128 | - | - | - | - | - | - | | 0.6091 | 60 | 0.1134 | - | - | - | - | - | - | | 0.7107 | 70 | 0.056 | - | - | - | - | - | - | | 0.8122 | 80 | 0.1086 | - | - | - | - | - | - | | 0.9137 | 90 | 0.1008 | - | - | - | - | - | - | | **1.0** | **99** | **-** | **0.0771** | **0.8286** | **0.8306** | **0.8266** | **0.8197** | **0.7955** | | 1.0102 | 100 | 0.0491 | - | - | - | - | - | - | | 1.1117 | 110 | 0.0029 | - | - | - | - | - | - | | 1.2132 | 120 | 0.0009 | - | - | - | - | - | - | | 1.3147 | 130 | 0.0326 | - | - | - | - | - | - | | 1.4162 | 140 | 0.0077 | - | - | - | - | - | - | | 1.5178 | 150 | 0.0109 | - | - | - | - | - | - | | 1.6193 | 160 | 0.0047 | - | - | - | - | - | - | | 1.7208 | 170 | 0.004 | - | - | - | - | - | - | | 1.8223 | 180 | 0.0122 | - | - | - | - | - | - | | 1.9239 | 190 | 0.0043 | - | - | - | - | - | - | | 2.0 | 198 | - | 0.0758 | 0.8296 | 0.8330 | 0.8222 | 0.8169 | 0.7998 | | 2.0203 | 200 | 0.0032 | - | - | - | - | - | - | | 2.1218 | 210 | 0.0002 | - | - | - | - | - | - | | 2.2234 | 220 | 0.0002 | - | - | - | - | - | - | | 2.3249 | 230 | 0.0097 | - | - | - | - | - | - | | 2.4264 | 240 | 0.0012 | - | - | - | - | - | - | | 2.5279 | 250 | 0.0012 | - | - | - | - | - | - | | 2.6294 | 260 | 0.0009 | - | - | - | - | - | - | | 2.7310 | 270 | 0.0007 | - | - | - | - | - | - | | 2.8325 | 280 | 0.0019 | - | - | - | - | - | - | | 2.9340 | 290 | 0.0009 | - | - | - | - | - | - | | 2.9746 | 294 | - | 0.0744 | 0.8338 | 0.8321 | 0.8208 | 0.8175 | 0.8043 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.3.1 - Transformers: 4.47.0 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# Nomic Embed Financial Matryoshka This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [nomic-ai/nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) on the json dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [nomic-ai/nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) <!-- at revision d802ae16c9caed4d197895d27c6d529434cd8c6d --> - **Maximum Sequence Length:** 8192 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - json - **Language:** en - **License:** apache-2.0 ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 8192, 'do_lower_case': False}) with Transformer model: NomicBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("shail-2512/nomic-embed-financial-matryoshka") # Run inference sentences = [ 'How are government incentives treated in accounting according to the given information?', 'We are entitled to certain advanced manufacturing production credits under the IRA, and government incentives are not accounted for or classified as an income tax credit. We account for government incentives as a reduction of expense, a reduction of the cost of the capital investment or other income based on the substance of the incentive received. Benefits are generally recorded when there is reasonable assurance of receipt or, as it relates with advanced manufacturing production credits, upon the generation of the credit.', 'Basic net income per share is computed by dividing net income attributable to common stock by the weighted-average number of shares of common stock outstanding during the period.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Datasets: `dim_768`, `dim_512`, `dim_256`, `dim_128` and `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | dim_768 | dim_512 | dim_256 | dim_128 | dim_64 | |:--------------------|:-----------|:-----------|:-----------|:-----------|:-----------| | cosine_accuracy@1 | 0.7186 | 0.7157 | 0.7029 | 0.7 | 0.69 | | cosine_accuracy@3 | 0.87 | 0.8686 | 0.86 | 0.8429 | 0.83 | | cosine_accuracy@5 | 0.9014 | 0.9029 | 0.8914 | 0.8771 | 0.8671 | | cosine_accuracy@10 | 0.9357 | 0.9343 | 0.9271 | 0.9271 | 0.9129 | | cosine_precision@1 | 0.7186 | 0.7157 | 0.7029 | 0.7 | 0.69 | | cosine_precision@3 | 0.29 | 0.2895 | 0.2867 | 0.281 | 0.2767 | | cosine_precision@5 | 0.1803 | 0.1806 | 0.1783 | 0.1754 | 0.1734 | | cosine_precision@10 | 0.0936 | 0.0934 | 0.0927 | 0.0927 | 0.0913 | | cosine_recall@1 | 0.7186 | 0.7157 | 0.7029 | 0.7 | 0.69 | | cosine_recall@3 | 0.87 | 0.8686 | 0.86 | 0.8429 | 0.83 | | cosine_recall@5 | 0.9014 | 0.9029 | 0.8914 | 0.8771 | 0.8671 | | cosine_recall@10 | 0.9357 | 0.9343 | 0.9271 | 0.9271 | 0.9129 | | **cosine_ndcg@10** | **0.8338** | **0.8321** | **0.8208** | **0.8175** | **0.8043** | | cosine_mrr@10 | 0.8005 | 0.7986 | 0.7862 | 0.7821 | 0.7693 | | cosine_map@100 | 0.8031 | 0.8013 | 0.7893 | 0.7853 | 0.7729 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### json * Dataset: json * Size: 6,300 training samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 1000 samples: | | anchor | positive | |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 2 tokens</li><li>mean: 20.65 tokens</li><li>max: 45 tokens</li></ul> | <ul><li>min: 2 tokens</li><li>mean: 46.29 tokens</li><li>max: 326 tokens</li></ul> | * Samples: | anchor | positive | |:-------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Where is the Investor Relations office of Intuit Inc. located?</code> | <code>Copies of this Annual Report on Form 10-K may also be obtained without charge by contacting Investor Relations, Intuit Inc., P.O. Box 7850, Mountain View, California 94039-7850, calling 650-944-6000, or emailing [email protected].</code> | | <code>Where is the Financial Statement Schedule located in the Form 10-K?</code> | <code>The Financial Statement Schedule is found on page S-1 of the Form 10-K.</code> | | <code>What factors are considered when evaluating the realization of deferred tax assets?</code> | <code>Many factors are considered when assessing whether it is more likely than not that the deferred tax assets will be realized, including recent cumulative earnings, expectations of future taxable income, carryforward periods and other relevant quantitative and qualitative factors.</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Evaluation Dataset #### json * Dataset: json * Size: 700 evaluation samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 700 samples: | | anchor | positive | |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 2 tokens</li><li>mean: 20.71 tokens</li><li>max: 45 tokens</li></ul> | <ul><li>min: 9 tokens</li><li>mean: 46.74 tokens</li><li>max: 248 tokens</li></ul> | * Samples: | anchor | positive | |:--------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>What fiscal changes did Garmin make in January 2023?</code> | <code>The Company announced an organization realignment in January 2023, which combined the consumer auto operating segment with the outdoor operating segment.</code> | | <code>Where are the details about 'Legal Matters' and 'Government Investigations, Audits and Reviews' located in the financial statements?</code> | <code>The information required by this Item 3 is incorporated herein by reference to the information set forth under the captions 'Legal Matters' and 'Government Investigations, Audits and Reviews' in Note 12 of the Notes to the Consolidated Financial Statements included in Part II, Item 8, 'Financial Statements and Supplementary Data'.</code> | | <code>Are the pages of IBM's Management’s Discussion and Analysis section in the 2023 Annual Report included in the report itself?</code> | <code>In IBM’s 2023 Annual Report, the pages containing Management’s Discussion and Analysis of Financial Condition and Results of Operations (pages 6 through 40) are incorporated by reference.</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `gradient_accumulation_steps`: 8 - `learning_rate`: 2e-05 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `bf16`: True - `load_best_model_at_end`: True - `optim`: adamw_torch_fused - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 8 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 3 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | Validation Loss | dim_768_cosine_ndcg@10 | dim_512_cosine_ndcg@10 | dim_256_cosine_ndcg@10 | dim_128_cosine_ndcg@10 | dim_64_cosine_ndcg@10 | |:-------:|:------:|:-------------:|:---------------:|:----------------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:| | 0.1015 | 10 | 0.2626 | - | - | - | - | - | - | | 0.2030 | 20 | 0.1764 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0311 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0259 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0056 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0064 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0016 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0015 | - | - | - | - | - | - | | 0.1015 | 10 | 0.0006 | - | - | - | - | - | - | | 0.2030 | 20 | 0.0006 | - | - | - | - | - | - | | 0.3046 | 30 | 0.1324 | - | - | - | - | - | - | | 0.4061 | 40 | 0.113 | - | - | - | - | - | - | | 0.5076 | 50 | 0.128 | - | - | - | - | - | - | | 0.6091 | 60 | 0.1134 | - | - | - | - | - | - | | 0.7107 | 70 | 0.056 | - | - | - | - | - | - | | 0.8122 | 80 | 0.1086 | - | - | - | - | - | - | | 0.9137 | 90 | 0.1008 | - | - | - | - | - | - | | **1.0** | **99** | **-** | **0.0771** | **0.8286** | **0.8306** | **0.8266** | **0.8197** | **0.7955** | | 1.0102 | 100 | 0.0491 | - | - | - | - | - | - | | 1.1117 | 110 | 0.0029 | - | - | - | - | - | - | | 1.2132 | 120 | 0.0009 | - | - | - | - | - | - | | 1.3147 | 130 | 0.0326 | - | - | - | - | - | - | | 1.4162 | 140 | 0.0077 | - | - | - | - | - | - | | 1.5178 | 150 | 0.0109 | - | - | - | - | - | - | | 1.6193 | 160 | 0.0047 | - | - | - | - | - | - | | 1.7208 | 170 | 0.004 | - | - | - | - | - | - | | 1.8223 | 180 | 0.0122 | - | - | - | - | - | - | | 1.9239 | 190 | 0.0043 | - | - | - | - | - | - | | 2.0 | 198 | - | 0.0758 | 0.8296 | 0.8330 | 0.8222 | 0.8169 | 0.7998 | | 2.0203 | 200 | 0.0032 | - | - | - | - | - | - | | 2.1218 | 210 | 0.0002 | - | - | - | - | - | - | | 2.2234 | 220 | 0.0002 | - | - | - | - | - | - | | 2.3249 | 230 | 0.0097 | - | - | - | - | - | - | | 2.4264 | 240 | 0.0012 | - | - | - | - | - | - | | 2.5279 | 250 | 0.0012 | - | - | - | - | - | - | | 2.6294 | 260 | 0.0009 | - | - | - | - | - | - | | 2.7310 | 270 | 0.0007 | - | - | - | - | - | - | | 2.8325 | 280 | 0.0019 | - | - | - | - | - | - | | 2.9340 | 290 | 0.0009 | - | - | - | - | - | - | | 2.9746 | 294 | - | 0.0744 | 0.8338 | 0.8321 | 0.8208 | 0.8175 | 0.8043 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.3.1 - Transformers: 4.47.0 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "nomic-ai/nomic-embed-text-v1.5", "datasets": ["philschmid/finanical-rag-embedding-dataset"], "language": ["en"], "library_name": "sentence-transformers", "license": "apache-2.0", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:6300", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Where in the Annual Report can one find a description of certain legal matters and their impact on the company?", "sentences": ["Apollo coordinates the delivery of new features, security updates, and platform configurations, ensuring the continuous operation of systems in any environment. It was introduced commercially in 2021.", "In the Annual Report on Form 10-K, 'Item 1A. Risk Factors' provides a further description of certain legal matters and their impact on the company.", "During fiscal 2022, we opened four new stores in Mexico."]}, {"source_sentence": "How does the company assess uncertain tax positions?", "sentences": ["We recognize tax benefits from uncertain tax positions only if we believe that it is more likely than not that the tax position will be sustained on examination by the taxing authorities based on the technical merits of the position.", "CMS uses a risk-adjustment model which adjusts premiums paid to Medicare Advantage, or MA, plans according to health status of covered members. The risk-adjustment model, which CMS implemented pursuant to the Balanced Budget Act of 1997 (BBA) and the Benefits Improvement and Protection Act of 2000 (BIPA), generally pays more where a plan's membership has higher expected costs. Under this model, rates paid to MA plans are based on actuarially determined bids, which include a process whereby our prospective payments are based on our estimated cost of providing standard Medicare-covered benefits to an enrollee with a 'national average risk profile.' That baseline payment amount is adjusted to account for certain demographic characteristics and health status of our enrolled members.", "Walmart Inc. reported total revenues of $611,289 million for the fiscal year ended January 31, 2023."]}, {"source_sentence": "When does the 364-day facility entered into in August 2023 expire, and what is its total amount?", "sentences": ["In 2023, the total revenue generated by Emgality amounted to 678.3.", "In August 2023, we entered into a new 364-day facility. The 364-day facility of $3.15 billion expires in August 2024.", "Diluted EPS increased $0.09, or 2%, to $5.90 as the decrease in net earnings was more than fully offset by a reduction in shares outstanding."]}, {"source_sentence": "What does the company believe adds significant value to its business regarding intellectual property?", "sentences": ["We believe that, to varying degrees, our trademarks, trade names, copyrights, proprietary processes, trade secrets, trade dress, domain names and similar intellectual property add significant value to our business", "Railroad operating revenues declined 6.9% in 2023 compared to 2022, reflecting an overall volume decrease of 5.7% and a decrease in average revenue per car/unit of 0.6%, primarily attributable to lower fuel surcharge revenue, partially offset by favorable price and mix.", "Cash provided by operating activities increased from $26.413 billion in 2022 to $28.501 billion in 2023, an increase of approximately $2.088 billion."]}, {"source_sentence": "How are government incentives treated in accounting according to the given information?", "sentences": ["The components of 'Other income (expense), net' for the year ended December 30, 2023, were $197 million; for December 31, 2022, they were $8 million; and for December 25, 2021, they were $55 million.", "We are entitled to certain advanced manufacturing production credits under the IRA, and government incentives are not accounted for or classified as an income tax credit. We account for government incentives as a reduction of expense, a reduction of the cost of the capital investment or other income based on the substance of the incentive received. Benefits are generally recorded when there is reasonable assurance of receipt or, as it relates with advanced manufacturing production credits, upon the generation of the credit.", "Basic net income per share is computed by dividing net income attributable to common stock by the weighted-average number of shares of common stock outstanding during the period."]}], "model-index": [{"name": "Nomic Embed Financial Matryoshka", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 768", "type": "dim_768"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.7185714285714285, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.87, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.9014285714285715, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9357142857142857, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.7185714285714285, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.29, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.18028571428571427, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09357142857142857, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.7185714285714285, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.87, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.9014285714285715, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9357142857142857, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.8337966812161252, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.8004784580498868, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.8030662019934727, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 512", "type": "dim_512"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.7157142857142857, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.8685714285714285, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.9028571428571428, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9342857142857143, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.7157142857142857, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2895238095238095, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.18057142857142855, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09342857142857142, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.7157142857142857, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.8685714285714285, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.9028571428571428, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9342857142857143, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.8320816465681472, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7986201814058957, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.8013251784905495, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 256", "type": "dim_256"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.7028571428571428, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.86, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8914285714285715, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9271428571428572, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.7028571428571428, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2866666666666667, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.17828571428571427, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09271428571428571, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.7028571428571428, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.86, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8914285714285715, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9271428571428572, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.8208030315973883, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7862023809523814, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7893111186082761, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 128", "type": "dim_128"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.7, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.8428571428571429, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8771428571428571, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9271428571428572, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.7, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.28095238095238095, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.1754285714285714, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09271428571428571, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.7, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.8428571428571429, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8771428571428571, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9271428571428572, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.8174548081454337, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7820821995464855, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7852661387487447, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 64", "type": "dim_64"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.69, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.83, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8671428571428571, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9128571428571428, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.69, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.27666666666666667, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.1734285714285714, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09128571428571428, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.69, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.83, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8671428571428571, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9128571428571428, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.804303333645382, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.769315192743764, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7729055647510643, "name": "Cosine Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,468
may-s-d/t5-finetuned-NYT
may-s-d
summarization
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "summarization", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-12-03T16:03:46Z
2022-12-03T20:29:27+00:00
18
0
--- license: apache-2.0 metrics: - rouge tags: - summarization - generated_from_trainer model-index: - name: t5-finetuned-NYT results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-finetuned-NYT This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.2519 - Rouge1: 45.692 - Rouge2: 32.1167 - Rougel: 44.3548 - Rougelsum: 44.3959 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:| | No log | 1.0 | 2516 | 2.4293 | 38.1445 | 25.9377 | 36.5758 | 36.6165 | | No log | 2.0 | 5032 | 2.3661 | 40.356 | 27.7563 | 38.993 | 39.036 | | No log | 3.0 | 7548 | 2.3225 | 43.6557 | 30.2246 | 42.2529 | 42.2804 | | No log | 4.0 | 10064 | 2.2852 | 43.7584 | 30.4744 | 42.3437 | 42.3871 | | No log | 5.0 | 12580 | 2.2677 | 45.6522 | 32.0921 | 44.311 | 44.3587 | | No log | 6.0 | 15096 | 2.2598 | 45.4426 | 31.7845 | 44.1273 | 44.1394 | | No log | 7.0 | 17612 | 2.2529 | 45.6841 | 32.1469 | 44.3355 | 44.37 | | 3.2998 | 8.0 | 20128 | 2.2519 | 45.692 | 32.1167 | 44.3548 | 44.3959 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-finetuned-NYT This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.2519 - Rouge1: 45.692 - Rouge2: 32.1167 - Rougel: 44.3548 - Rougelsum: 44.3959 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:| | No log | 1.0 | 2516 | 2.4293 | 38.1445 | 25.9377 | 36.5758 | 36.6165 | | No log | 2.0 | 5032 | 2.3661 | 40.356 | 27.7563 | 38.993 | 39.036 | | No log | 3.0 | 7548 | 2.3225 | 43.6557 | 30.2246 | 42.2529 | 42.2804 | | No log | 4.0 | 10064 | 2.2852 | 43.7584 | 30.4744 | 42.3437 | 42.3871 | | No log | 5.0 | 12580 | 2.2677 | 45.6522 | 32.0921 | 44.311 | 44.3587 | | No log | 6.0 | 15096 | 2.2598 | 45.4426 | 31.7845 | 44.1273 | 44.1394 | | No log | 7.0 | 17612 | 2.2529 | 45.6841 | 32.1469 | 44.3355 | 44.37 | | 3.2998 | 8.0 | 20128 | 2.2519 | 45.692 | 32.1167 | 44.3548 | 44.3959 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
{"license": "apache-2.0", "metrics": ["rouge"], "tags": ["summarization", "generated_from_trainer"], "model-index": [{"name": "t5-finetuned-NYT", "results": []}]}
task
[ "SUMMARIZATION" ]
41,469
ellarose/test-trainer-alternate
ellarose
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:sentence-transformers/paraphrase-mpnet-base-v2", "base_model:finetune:sentence-transformers/paraphrase-mpnet-base-v2", "model-index", "region:us" ]
2024-08-28T21:54:34Z
2024-08-30T22:06:04+00:00
3
0
--- base_model: sentence-transformers/paraphrase-mpnet-base-v2 library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: ballades (formes fixes) - text: prison fiction - text: gregorian chants - text: argentina--buenos aires, port of - text: passepieds (music) inference: true model-index: - name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2 results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.9555555555555556 name: Accuracy --- # SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:-----------|:-----------------------------------------------------------------------------------------------------------------| | subject | <ul><li>'vidourle river (france)'</li><li>'knockout kings 2000 (game)'</li><li>'social practice (art)'</li></ul> | | genre/form | <ul><li>'hadith stories'</li><li>'discographies'</li><li>'dance drama'</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.9556 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("ellarose/test-trainer-alternate") # Run inference preds = model("prison fiction") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 1 | 2.415 | 10 | | Label | Training Sample Count | |:-----------|:----------------------| | subject | 500 | | genre/form | 500 | ### Training Hyperparameters - batch_size: (40, 40) - num_epochs: (10, 10) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: True ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:-------:|:---------:|:-------------:|:---------------:| | 0.0001 | 1 | 0.3379 | - | | 0.0040 | 50 | 0.3311 | - | | 0.0080 | 100 | 0.3642 | - | | 0.0120 | 150 | 0.3077 | - | | 0.0160 | 200 | 0.2542 | - | | 0.0200 | 250 | 0.233 | - | | 0.0240 | 300 | 0.23 | - | | 0.0279 | 350 | 0.2247 | - | | 0.0319 | 400 | 0.2009 | - | | 0.0359 | 450 | 0.2354 | - | | 0.0399 | 500 | 0.1823 | - | | 0.0439 | 550 | 0.2048 | - | | 0.0479 | 600 | 0.1546 | - | | 0.0519 | 650 | 0.1363 | - | | 0.0559 | 700 | 0.1031 | - | | 0.0599 | 750 | 0.0668 | - | | 0.0639 | 800 | 0.1156 | - | | 0.0679 | 850 | 0.0435 | - | | 0.0719 | 900 | 0.0495 | - | | 0.0758 | 950 | 0.046 | - | | 0.0798 | 1000 | 0.0424 | - | | 0.0838 | 1050 | 0.1312 | - | | 0.0878 | 1100 | 0.0246 | - | | 0.0918 | 1150 | 0.0273 | - | | 0.0958 | 1200 | 0.0075 | - | | 0.0998 | 1250 | 0.0203 | - | | 0.1038 | 1300 | 0.0073 | - | | 0.1078 | 1350 | 0.0328 | - | | 0.1118 | 1400 | 0.0274 | - | | 0.1158 | 1450 | 0.0042 | - | | 0.1198 | 1500 | 0.0494 | - | | 0.1238 | 1550 | 0.0413 | - | | 0.1277 | 1600 | 0.0036 | - | | 0.1317 | 1650 | 0.0329 | - | | 0.1357 | 1700 | 0.0168 | - | | 0.1397 | 1750 | 0.0028 | - | | 0.1437 | 1800 | 0.0227 | - | | 0.1477 | 1850 | 0.002 | - | | 0.1517 | 1900 | 0.0121 | - | | 0.1557 | 1950 | 0.0018 | - | | 0.1597 | 2000 | 0.0019 | - | | 0.1637 | 2050 | 0.001 | - | | 0.1677 | 2100 | 0.0009 | - | | 0.1717 | 2150 | 0.0012 | - | | 0.1756 | 2200 | 0.0007 | - | | 0.1796 | 2250 | 0.001 | - | | 0.1836 | 2300 | 0.0008 | - | | 0.1876 | 2350 | 0.0009 | - | | 0.1916 | 2400 | 0.001 | - | | 0.1956 | 2450 | 0.0009 | - | | 0.1996 | 2500 | 0.0247 | - | | 0.2036 | 2550 | 0.0007 | - | | 0.2076 | 2600 | 0.0008 | - | | 0.2116 | 2650 | 0.0008 | - | | 0.2156 | 2700 | 0.0006 | - | | 0.2196 | 2750 | 0.0023 | - | | 0.2236 | 2800 | 0.0007 | - | | 0.2275 | 2850 | 0.0004 | - | | 0.2315 | 2900 | 0.0054 | - | | 0.2355 | 2950 | 0.0007 | - | | 0.2395 | 3000 | 0.0004 | - | | 0.2435 | 3050 | 0.0007 | - | | 0.2475 | 3100 | 0.0244 | - | | 0.2515 | 3150 | 0.0243 | - | | 0.2555 | 3200 | 0.0005 | - | | 0.2595 | 3250 | 0.0004 | - | | 0.2635 | 3300 | 0.0004 | - | | 0.2675 | 3350 | 0.0004 | - | | 0.2715 | 3400 | 0.0004 | - | | 0.2754 | 3450 | 0.0482 | - | | 0.2794 | 3500 | 0.0004 | - | | 0.2834 | 3550 | 0.0005 | - | | 0.2874 | 3600 | 0.0005 | - | | 0.2914 | 3650 | 0.0007 | - | | 0.2954 | 3700 | 0.0063 | - | | 0.2994 | 3750 | 0.0043 | - | | 0.3034 | 3800 | 0.0005 | - | | 0.3074 | 3850 | 0.0366 | - | | 0.3114 | 3900 | 0.0245 | - | | 0.3154 | 3950 | 0.0242 | - | | 0.3194 | 4000 | 0.0003 | - | | 0.3234 | 4050 | 0.0007 | - | | 0.3273 | 4100 | 0.0123 | - | | 0.3313 | 4150 | 0.0004 | - | | 0.3353 | 4200 | 0.0007 | - | | 0.3393 | 4250 | 0.0238 | - | | 0.3433 | 4300 | 0.0002 | - | | 0.3473 | 4350 | 0.0238 | - | | 0.3513 | 4400 | 0.0003 | - | | 0.3553 | 4450 | 0.0224 | - | | 0.3593 | 4500 | 0.0006 | - | | 0.3633 | 4550 | 0.0005 | - | | 0.3673 | 4600 | 0.0004 | - | | 0.3713 | 4650 | 0.0025 | - | | 0.3752 | 4700 | 0.0003 | - | | 0.3792 | 4750 | 0.0218 | - | | 0.3832 | 4800 | 0.001 | - | | 0.3872 | 4850 | 0.0004 | - | | 0.3912 | 4900 | 0.0004 | - | | 0.3952 | 4950 | 0.0161 | - | | 0.3992 | 5000 | 0.0008 | - | | 0.4032 | 5050 | 0.0024 | - | | 0.4072 | 5100 | 0.0003 | - | | 0.4112 | 5150 | 0.0002 | - | | 0.4152 | 5200 | 0.0005 | - | | 0.4192 | 5250 | 0.0021 | - | | 0.4232 | 5300 | 0.0235 | - | | 0.4271 | 5350 | 0.0035 | - | | 0.4311 | 5400 | 0.0007 | - | | 0.4351 | 5450 | 0.0007 | - | | 0.4391 | 5500 | 0.0217 | - | | 0.4431 | 5550 | 0.0006 | - | | 0.4471 | 5600 | 0.0054 | - | | 0.4511 | 5650 | 0.002 | - | | 0.4551 | 5700 | 0.0013 | - | | 0.4591 | 5750 | 0.0026 | - | | 0.4631 | 5800 | 0.0051 | - | | 0.4671 | 5850 | 0.0003 | - | | 0.4711 | 5900 | 0.0003 | - | | 0.4750 | 5950 | 0.0119 | - | | 0.4790 | 6000 | 0.0011 | - | | 0.4830 | 6050 | 0.0253 | - | | 0.4870 | 6100 | 0.0244 | - | | 0.4910 | 6150 | 0.0002 | - | | 0.4950 | 6200 | 0.0002 | - | | 0.4990 | 6250 | 0.0002 | - | | 0.5030 | 6300 | 0.0167 | - | | 0.5070 | 6350 | 0.0002 | - | | 0.5110 | 6400 | 0.0003 | - | | 0.5150 | 6450 | 0.0012 | - | | 0.5190 | 6500 | 0.003 | - | | 0.5230 | 6550 | 0.0003 | - | | 0.5269 | 6600 | 0.0003 | - | | 0.5309 | 6650 | 0.0006 | - | | 0.5349 | 6700 | 0.0026 | - | | 0.5389 | 6750 | 0.0004 | - | | 0.5429 | 6800 | 0.0001 | - | | 0.5469 | 6850 | 0.0002 | - | | 0.5509 | 6900 | 0.0003 | - | | 0.5549 | 6950 | 0.0028 | - | | 0.5589 | 7000 | 0.0022 | - | | 0.5629 | 7050 | 0.0007 | - | | 0.5669 | 7100 | 0.0004 | - | | 0.5709 | 7150 | 0.0002 | - | | 0.5749 | 7200 | 0.0001 | - | | 0.5788 | 7250 | 0.0122 | - | | 0.5828 | 7300 | 0.0017 | - | | 0.5868 | 7350 | 0.0001 | - | | 0.5908 | 7400 | 0.0002 | - | | 0.5948 | 7450 | 0.0001 | - | | 0.5988 | 7500 | 0.0003 | - | | 0.6028 | 7550 | 0.0011 | - | | 0.6068 | 7600 | 0.0002 | - | | 0.6108 | 7650 | 0.0003 | - | | 0.6148 | 7700 | 0.0001 | - | | 0.6188 | 7750 | 0.0001 | - | | 0.6228 | 7800 | 0.0001 | - | | 0.6267 | 7850 | 0.0002 | - | | 0.6307 | 7900 | 0.0149 | - | | 0.6347 | 7950 | 0.0106 | - | | 0.6387 | 8000 | 0.0015 | - | | 0.6427 | 8050 | 0.0001 | - | | 0.6467 | 8100 | 0.0009 | - | | 0.6507 | 8150 | 0.0015 | - | | 0.6547 | 8200 | 0.0306 | - | | 0.6587 | 8250 | 0.0054 | - | | 0.6627 | 8300 | 0.0011 | - | | 0.6667 | 8350 | 0.0003 | - | | 0.6707 | 8400 | 0.0001 | - | | 0.6747 | 8450 | 0.0024 | - | | 0.6786 | 8500 | 0.0001 | - | | 0.6826 | 8550 | 0.0001 | - | | 0.6866 | 8600 | 0.0001 | - | | 0.6906 | 8650 | 0.0072 | - | | 0.6946 | 8700 | 0.0001 | - | | 0.6986 | 8750 | 0.0002 | - | | 0.7026 | 8800 | 0.0001 | - | | 0.7066 | 8850 | 0.0243 | - | | 0.7106 | 8900 | 0.0001 | - | | 0.7146 | 8950 | 0.0001 | - | | 0.7186 | 9000 | 0.0001 | - | | 0.7226 | 9050 | 0.0001 | - | | 0.7265 | 9100 | 0.0001 | - | | 0.7305 | 9150 | 0.0001 | - | | 0.7345 | 9200 | 0.0008 | - | | 0.7385 | 9250 | 0.021 | - | | 0.7425 | 9300 | 0.0229 | - | | 0.7465 | 9350 | 0.0001 | - | | 0.7505 | 9400 | 0.002 | - | | 0.7545 | 9450 | 0.0008 | - | | 0.7585 | 9500 | 0.0225 | - | | 0.7625 | 9550 | 0.0001 | - | | 0.7665 | 9600 | 0.0041 | - | | 0.7705 | 9650 | 0.0012 | - | | 0.7745 | 9700 | 0.0034 | - | | 0.7784 | 9750 | 0.0011 | - | | 0.7824 | 9800 | 0.0008 | - | | 0.7864 | 9850 | 0.0101 | - | | 0.7904 | 9900 | 0.0039 | - | | 0.7944 | 9950 | 0.0001 | - | | 0.7984 | 10000 | 0.0005 | - | | 0.8024 | 10050 | 0.0011 | - | | 0.8064 | 10100 | 0.0025 | - | | 0.8104 | 10150 | 0.0001 | - | | 0.8144 | 10200 | 0.0003 | - | | 0.8184 | 10250 | 0.0002 | - | | 0.8224 | 10300 | 0.0002 | - | | 0.8263 | 10350 | 0.0001 | - | | 0.8303 | 10400 | 0.0007 | - | | 0.8343 | 10450 | 0.0005 | - | | 0.8383 | 10500 | 0.0005 | - | | 0.8423 | 10550 | 0.0001 | - | | 0.8463 | 10600 | 0.0206 | - | | 0.8503 | 10650 | 0.0023 | - | | 0.8543 | 10700 | 0.0001 | - | | 0.8583 | 10750 | 0.0001 | - | | 0.8623 | 10800 | 0.0001 | - | | 0.8663 | 10850 | 0.0001 | - | | 0.8703 | 10900 | 0.0001 | - | | 0.8743 | 10950 | 0.0002 | - | | 0.8782 | 11000 | 0.0007 | - | | 0.8822 | 11050 | 0.0025 | - | | 0.8862 | 11100 | 0.0001 | - | | 0.8902 | 11150 | 0.0001 | - | | 0.8942 | 11200 | 0.0001 | - | | 0.8982 | 11250 | 0.0001 | - | | 0.9022 | 11300 | 0.0047 | - | | 0.9062 | 11350 | 0.0001 | - | | 0.9102 | 11400 | 0.0002 | - | | 0.9142 | 11450 | 0.0001 | - | | 0.9182 | 11500 | 0.0013 | - | | 0.9222 | 11550 | 0.0011 | - | | 0.9261 | 11600 | 0.0001 | - | | 0.9301 | 11650 | 0.001 | - | | 0.9341 | 11700 | 0.0145 | - | | 0.9381 | 11750 | 0.0001 | - | | 0.9421 | 11800 | 0.0156 | - | | 0.9461 | 11850 | 0.0001 | - | | 0.9501 | 11900 | 0.0016 | - | | 0.9541 | 11950 | 0.0001 | - | | 0.9581 | 12000 | 0.0011 | - | | 0.9621 | 12050 | 0.002 | - | | 0.9661 | 12100 | 0.0001 | - | | 0.9701 | 12150 | 0.0004 | - | | 0.9741 | 12200 | 0.0007 | - | | 0.9780 | 12250 | 0.0014 | - | | 0.9820 | 12300 | 0.0255 | - | | 0.9860 | 12350 | 0.004 | - | | 0.9900 | 12400 | 0.002 | - | | 0.9940 | 12450 | 0.0146 | - | | 0.9980 | 12500 | 0.0008 | - | | 1.0 | 12525 | - | 0.0588 | | 1.0020 | 12550 | 0.0175 | - | | 1.0060 | 12600 | 0.0001 | - | | 1.0100 | 12650 | 0.0006 | - | | 1.0140 | 12700 | 0.0002 | - | | 1.0180 | 12750 | 0.0149 | - | | 1.0220 | 12800 | 0.0001 | - | | 1.0259 | 12850 | 0.0001 | - | | 1.0299 | 12900 | 0.0001 | - | | 1.0339 | 12950 | 0.003 | - | | 1.0379 | 13000 | 0.0003 | - | | 1.0419 | 13050 | 0.0254 | - | | 1.0459 | 13100 | 0.0001 | - | | 1.0499 | 13150 | 0.0001 | - | | 1.0539 | 13200 | 0.0001 | - | | 1.0579 | 13250 | 0.0001 | - | | 1.0619 | 13300 | 0.0003 | - | | 1.0659 | 13350 | 0.0244 | - | | 1.0699 | 13400 | 0.0001 | - | | 1.0739 | 13450 | 0.0001 | - | | 1.0778 | 13500 | 0.0175 | - | | 1.0818 | 13550 | 0.0002 | - | | 1.0858 | 13600 | 0.0002 | - | | 1.0898 | 13650 | 0.0001 | - | | 1.0938 | 13700 | 0.0001 | - | | 1.0978 | 13750 | 0.0002 | - | | 1.1018 | 13800 | 0.0001 | - | | 1.1058 | 13850 | 0.0001 | - | | 1.1098 | 13900 | 0.0001 | - | | 1.1138 | 13950 | 0.0005 | - | | 1.1178 | 14000 | 0.0001 | - | | 1.1218 | 14050 | 0.0001 | - | | 1.1257 | 14100 | 0.0002 | - | | 1.1297 | 14150 | 0.0001 | - | | 1.1337 | 14200 | 0.0002 | - | | 1.1377 | 14250 | 0.0008 | - | | 1.1417 | 14300 | 0.0001 | - | | 1.1457 | 14350 | 0.0001 | - | | 1.1497 | 14400 | 0.0013 | - | | 1.1537 | 14450 | 0.0001 | - | | 1.1577 | 14500 | 0.0001 | - | | 1.1617 | 14550 | 0.0004 | - | | 1.1657 | 14600 | 0.0001 | - | | 1.1697 | 14650 | 0.0001 | - | | 1.1737 | 14700 | 0.001 | - | | 1.1776 | 14750 | 0.0156 | - | | 1.1816 | 14800 | 0.0001 | - | | 1.1856 | 14850 | 0.0003 | - | | 1.1896 | 14900 | 0.0045 | - | | 1.1936 | 14950 | 0.0011 | - | | 1.1976 | 15000 | 0.0015 | - | | 1.2016 | 15050 | 0.0017 | - | | 1.2056 | 15100 | 0.017 | - | | 1.2096 | 15150 | 0.0001 | - | | 1.2136 | 15200 | 0.0011 | - | | 1.2176 | 15250 | 0.0003 | - | | 1.2216 | 15300 | 0.0001 | - | | 1.2255 | 15350 | 0.0001 | - | | 1.2295 | 15400 | 0.0005 | - | | 1.2335 | 15450 | 0.0144 | - | | 1.2375 | 15500 | 0.0001 | - | | 1.2415 | 15550 | 0.0001 | - | | 1.2455 | 15600 | 0.0191 | - | | 1.2495 | 15650 | 0.0001 | - | | 1.2535 | 15700 | 0.0001 | - | | 1.2575 | 15750 | 0.0001 | - | | 1.2615 | 15800 | 0.0001 | - | | 1.2655 | 15850 | 0.0008 | - | | 1.2695 | 15900 | 0.0005 | - | | 1.2735 | 15950 | 0.013 | - | | 1.2774 | 16000 | 0.0001 | - | | 1.2814 | 16050 | 0.0201 | - | | 1.2854 | 16100 | 0.0008 | - | | 1.2894 | 16150 | 0.0001 | - | | 1.2934 | 16200 | 0.0001 | - | | 1.2974 | 16250 | 0.0001 | - | | 1.3014 | 16300 | 0.0001 | - | | 1.3054 | 16350 | 0.0 | - | | 1.3094 | 16400 | 0.0118 | - | | 1.3134 | 16450 | 0.0253 | - | | 1.3174 | 16500 | 0.0001 | - | | 1.3214 | 16550 | 0.0012 | - | | 1.3253 | 16600 | 0.0017 | - | | 1.3293 | 16650 | 0.0001 | - | | 1.3333 | 16700 | 0.0094 | - | | 1.3373 | 16750 | 0.0001 | - | | 1.3413 | 16800 | 0.0243 | - | | 1.3453 | 16850 | 0.049 | - | | 1.3493 | 16900 | 0.0001 | - | | 1.3533 | 16950 | 0.0247 | - | | 1.3573 | 17000 | 0.0001 | - | | 1.3613 | 17050 | 0.0001 | - | | 1.3653 | 17100 | 0.0001 | - | | 1.3693 | 17150 | 0.0246 | - | | 1.3733 | 17200 | 0.0001 | - | | 1.3772 | 17250 | 0.0245 | - | | 1.3812 | 17300 | 0.0012 | - | | 1.3852 | 17350 | 0.0001 | - | | 1.3892 | 17400 | 0.0001 | - | | 1.3932 | 17450 | 0.0001 | - | | 1.3972 | 17500 | 0.0001 | - | | 1.4012 | 17550 | 0.0001 | - | | 1.4052 | 17600 | 0.0002 | - | | 1.4092 | 17650 | 0.0001 | - | | 1.4132 | 17700 | 0.0001 | - | | 1.4172 | 17750 | 0.0039 | - | | 1.4212 | 17800 | 0.0012 | - | | 1.4251 | 17850 | 0.0043 | - | | 1.4291 | 17900 | 0.0001 | - | | 1.4331 | 17950 | 0.0001 | - | | 1.4371 | 18000 | 0.0001 | - | | 1.4411 | 18050 | 0.0001 | - | | 1.4451 | 18100 | 0.0043 | - | | 1.4491 | 18150 | 0.0023 | - | | 1.4531 | 18200 | 0.0001 | - | | 1.4571 | 18250 | 0.0002 | - | | 1.4611 | 18300 | 0.0001 | - | | 1.4651 | 18350 | 0.0001 | - | | 1.4691 | 18400 | 0.0001 | - | | 1.4731 | 18450 | 0.0099 | - | | 1.4770 | 18500 | 0.0001 | - | | 1.4810 | 18550 | 0.0001 | - | | 1.4850 | 18600 | 0.03 | - | | 1.4890 | 18650 | 0.0001 | - | | 1.4930 | 18700 | 0.0014 | - | | 1.4970 | 18750 | 0.0 | - | | 1.5010 | 18800 | 0.0246 | - | | 1.5050 | 18850 | 0.0001 | - | | 1.5090 | 18900 | 0.0001 | - | | 1.5130 | 18950 | 0.0001 | - | | 1.5170 | 19000 | 0.0001 | - | | 1.5210 | 19050 | 0.0001 | - | | 1.5250 | 19100 | 0.0001 | - | | 1.5289 | 19150 | 0.0001 | - | | 1.5329 | 19200 | 0.0032 | - | | 1.5369 | 19250 | 0.0001 | - | | 1.5409 | 19300 | 0.0001 | - | | 1.5449 | 19350 | 0.0001 | - | | 1.5489 | 19400 | 0.025 | - | | 1.5529 | 19450 | 0.0028 | - | | 1.5569 | 19500 | 0.0028 | - | | 1.5609 | 19550 | 0.0001 | - | | 1.5649 | 19600 | 0.005 | - | | 1.5689 | 19650 | 0.0024 | - | | 1.5729 | 19700 | 0.0001 | - | | 1.5768 | 19750 | 0.0 | - | | 1.5808 | 19800 | 0.0001 | - | | 1.5848 | 19850 | 0.0001 | - | | 1.5888 | 19900 | 0.0001 | - | | 1.5928 | 19950 | 0.0001 | - | | 1.5968 | 20000 | 0.0001 | - | | 1.6008 | 20050 | 0.0001 | - | | 1.6048 | 20100 | 0.0001 | - | | 1.6088 | 20150 | 0.0001 | - | | 1.6128 | 20200 | 0.0 | - | | 1.6168 | 20250 | 0.0001 | - | | 1.6208 | 20300 | 0.0001 | - | | 1.6248 | 20350 | 0.0028 | - | | 1.6287 | 20400 | 0.0019 | - | | 1.6327 | 20450 | 0.0115 | - | | 1.6367 | 20500 | 0.0029 | - | | 1.6407 | 20550 | 0.0035 | - | | 1.6447 | 20600 | 0.0005 | - | | 1.6487 | 20650 | 0.0007 | - | | 1.6527 | 20700 | 0.0374 | - | | 1.6567 | 20750 | 0.0064 | - | | 1.6607 | 20800 | 0.004 | - | | 1.6647 | 20850 | 0.0009 | - | | 1.6687 | 20900 | 0.0 | - | | 1.6727 | 20950 | 0.0017 | - | | 1.6766 | 21000 | 0.0001 | - | | 1.6806 | 21050 | 0.0001 | - | | 1.6846 | 21100 | 0.0001 | - | | 1.6886 | 21150 | 0.0083 | - | | 1.6926 | 21200 | 0.0001 | - | | 1.6966 | 21250 | 0.0001 | - | | 1.7006 | 21300 | 0.0001 | - | | 1.7046 | 21350 | 0.0009 | - | | 1.7086 | 21400 | 0.0001 | - | | 1.7126 | 21450 | 0.0001 | - | | 1.7166 | 21500 | 0.0015 | - | | 1.7206 | 21550 | 0.0001 | - | | 1.7246 | 21600 | 0.0001 | - | | 1.7285 | 21650 | 0.0001 | - | | 1.7325 | 21700 | 0.0007 | - | | 1.7365 | 21750 | 0.0001 | - | | 1.7405 | 21800 | 0.0213 | - | | 1.7445 | 21850 | 0.0007 | - | | 1.7485 | 21900 | 0.0029 | - | | 1.7525 | 21950 | 0.0007 | - | | 1.7565 | 22000 | 0.0025 | - | | 1.7605 | 22050 | 0.0002 | - | | 1.7645 | 22100 | 0.0173 | - | | 1.7685 | 22150 | 0.0012 | - | | 1.7725 | 22200 | 0.0027 | - | | 1.7764 | 22250 | 0.0009 | - | | 1.7804 | 22300 | 0.0013 | - | | 1.7844 | 22350 | 0.024 | - | | 1.7884 | 22400 | 0.0043 | - | | 1.7924 | 22450 | 0.0001 | - | | 1.7964 | 22500 | 0.0001 | - | | 1.8004 | 22550 | 0.0109 | - | | 1.8044 | 22600 | 0.0001 | - | | 1.8084 | 22650 | 0.0002 | - | | 1.8124 | 22700 | 0.0246 | - | | 1.8164 | 22750 | 0.0001 | - | | 1.8204 | 22800 | 0.0002 | - | | 1.8244 | 22850 | 0.0248 | - | | 1.8283 | 22900 | 0.0001 | - | | 1.8323 | 22950 | 0.0119 | - | | 1.8363 | 23000 | 0.0254 | - | | 1.8403 | 23050 | 0.1233 | - | | 1.8443 | 23100 | 0.0003 | - | | 1.8483 | 23150 | 0.0001 | - | | 1.8523 | 23200 | 0.0 | - | | 1.8563 | 23250 | 0.0 | - | | 1.8603 | 23300 | 0.0001 | - | | 1.8643 | 23350 | 0.0001 | - | | 1.8683 | 23400 | 0.0001 | - | | 1.8723 | 23450 | 0.0001 | - | | 1.8762 | 23500 | 0.0001 | - | | 1.8802 | 23550 | 0.0017 | - | | 1.8842 | 23600 | 0.0 | - | | 1.8882 | 23650 | 0.0001 | - | | 1.8922 | 23700 | 0.0 | - | | 1.8962 | 23750 | 0.0001 | - | | 1.9002 | 23800 | 0.0055 | - | | 1.9042 | 23850 | 0.0001 | - | | 1.9082 | 23900 | 0.0003 | - | | 1.9122 | 23950 | 0.0002 | - | | 1.9162 | 24000 | 0.0007 | - | | 1.9202 | 24050 | 0.0011 | - | | 1.9242 | 24100 | 0.0001 | - | | 1.9281 | 24150 | 0.0001 | - | | 1.9321 | 24200 | 0.0001 | - | | 1.9361 | 24250 | 0.0001 | - | | 1.9401 | 24300 | 0.0192 | - | | 1.9441 | 24350 | 0.0001 | - | | 1.9481 | 24400 | 0.0008 | - | | 1.9521 | 24450 | 0.0 | - | | 1.9561 | 24500 | 0.0004 | - | | 1.9601 | 24550 | 0.0001 | - | | 1.9641 | 24600 | 0.0001 | - | | 1.9681 | 24650 | 0.0001 | - | | 1.9721 | 24700 | 0.0001 | - | | 1.9760 | 24750 | 0.0013 | - | | 1.9800 | 24800 | 0.0129 | - | | 1.9840 | 24850 | 0.0024 | - | | 1.9880 | 24900 | 0.0015 | - | | 1.9920 | 24950 | 0.0196 | - | | 1.9960 | 25000 | 0.0164 | - | | 2.0 | 25050 | 0.002 | 0.0769 | | 2.0040 | 25100 | 0.0001 | - | | 2.0080 | 25150 | 0.0156 | - | | 2.0120 | 25200 | 0.0 | - | | 2.0160 | 25250 | 0.0169 | - | | 2.0200 | 25300 | 0.0023 | - | | 2.0240 | 25350 | 0.0001 | - | | 2.0279 | 25400 | 0.0 | - | | 2.0319 | 25450 | 0.001 | - | | 2.0359 | 25500 | 0.0152 | - | | 2.0399 | 25550 | 0.0001 | - | | 2.0439 | 25600 | 0.001 | - | | 2.0479 | 25650 | 0.0001 | - | | 2.0519 | 25700 | 0.002 | - | | 2.0559 | 25750 | 0.0006 | - | | 2.0599 | 25800 | 0.0031 | - | | 2.0639 | 25850 | 0.0016 | - | | 2.0679 | 25900 | 0.0 | - | | 2.0719 | 25950 | 0.0001 | - | | 2.0758 | 26000 | 0.0001 | - | | 2.0798 | 26050 | 0.0159 | - | | 2.0838 | 26100 | 0.0005 | - | | 2.0878 | 26150 | 0.0001 | - | | 2.0918 | 26200 | 0.0001 | - | | 2.0958 | 26250 | 0.0006 | - | | 2.0998 | 26300 | 0.0008 | - | | 2.1038 | 26350 | 0.0 | - | | 2.1078 | 26400 | 0.0 | - | | 2.1118 | 26450 | 0.0183 | - | | 2.1158 | 26500 | 0.0001 | - | | 2.1198 | 26550 | 0.0167 | - | | 2.1238 | 26600 | 0.0001 | - | | 2.1277 | 26650 | 0.0014 | - | | 2.1317 | 26700 | 0.0003 | - | | 2.1357 | 26750 | 0.0014 | - | | 2.1397 | 26800 | 0.0001 | - | | 2.1437 | 26850 | 0.0001 | - | | 2.1477 | 26900 | 0.0011 | - | | 2.1517 | 26950 | 0.0 | - | | 2.1557 | 27000 | 0.0001 | - | | 2.1597 | 27050 | 0.0001 | - | | 2.1637 | 27100 | 0.0007 | - | | 2.1677 | 27150 | 0.0001 | - | | 2.1717 | 27200 | 0.0 | - | | 2.1756 | 27250 | 0.0001 | - | | 2.1796 | 27300 | 0.0005 | - | | 2.1836 | 27350 | 0.0 | - | | 2.1876 | 27400 | 0.0002 | - | | 2.1916 | 27450 | 0.0001 | - | | 2.1956 | 27500 | 0.0001 | - | | 2.1996 | 27550 | 0.025 | - | | 2.2036 | 27600 | 0.0001 | - | | 2.2076 | 27650 | 0.0001 | - | | 2.2116 | 27700 | 0.0001 | - | | 2.2156 | 27750 | 0.0001 | - | | 2.2196 | 27800 | 0.0001 | - | | 2.2236 | 27850 | 0.0001 | - | | 2.2275 | 27900 | 0.0001 | - | | 2.2315 | 27950 | 0.0 | - | | 2.2355 | 28000 | 0.0001 | - | | 2.2395 | 28050 | 0.0001 | - | | 2.2435 | 28100 | 0.0245 | - | | 2.2475 | 28150 | 0.0001 | - | | 2.2515 | 28200 | 0.0251 | - | | 2.2555 | 28250 | 0.0001 | - | | 2.2595 | 28300 | 0.0497 | - | | 2.2635 | 28350 | 0.0002 | - | | 2.2675 | 28400 | 0.0487 | - | | 2.2715 | 28450 | 0.0703 | - | | 2.2754 | 28500 | 0.0248 | - | | 2.2794 | 28550 | 0.0001 | - | | 2.2834 | 28600 | 0.0245 | - | | 2.2874 | 28650 | 0.0004 | - | | 2.2914 | 28700 | 0.0001 | - | | 2.2954 | 28750 | 0.0001 | - | | 2.2994 | 28800 | 0.0 | - | | 2.3034 | 28850 | 0.0002 | - | | 2.3074 | 28900 | 0.0489 | - | | 2.3114 | 28950 | 0.0245 | - | | 2.3154 | 29000 | 0.0002 | - | | 2.3194 | 29050 | 0.0001 | - | | 2.3234 | 29100 | 0.0001 | - | | 2.3273 | 29150 | 0.0247 | - | | 2.3313 | 29200 | 0.0003 | - | | 2.3353 | 29250 | 0.0001 | - | | 2.3393 | 29300 | 0.0001 | - | | 2.3433 | 29350 | 0.0246 | - | | 2.3473 | 29400 | 0.0246 | - | | 2.3513 | 29450 | 0.0246 | - | | 2.3553 | 29500 | 0.0001 | - | | 2.3593 | 29550 | 0.0001 | - | | 2.3633 | 29600 | 0.0001 | - | | 2.3673 | 29650 | 0.0246 | - | | 2.3713 | 29700 | 0.0 | - | | 2.3752 | 29750 | 0.0246 | - | | 2.3792 | 29800 | 0.0001 | - | | 2.3832 | 29850 | 0.0001 | - | | 2.3872 | 29900 | 0.0001 | - | | 2.3912 | 29950 | 0.0002 | - | | 2.3952 | 30000 | 0.0248 | - | | 2.3992 | 30050 | 0.0002 | - | | 2.4032 | 30100 | 0.0001 | - | | 2.4072 | 30150 | 0.0001 | - | | 2.4112 | 30200 | 0.0001 | - | | 2.4152 | 30250 | 0.0001 | - | | 2.4192 | 30300 | 0.0001 | - | | 2.4232 | 30350 | 0.0245 | - | | 2.4271 | 30400 | 0.0001 | - | | 2.4311 | 30450 | 0.0001 | - | | 2.4351 | 30500 | 0.0001 | - | | 2.4391 | 30550 | 0.0 | - | | 2.4431 | 30600 | 0.0001 | - | | 2.4471 | 30650 | 0.0001 | - | | 2.4511 | 30700 | 0.0001 | - | | 2.4551 | 30750 | 0.0001 | - | | 2.4591 | 30800 | 0.0001 | - | | 2.4631 | 30850 | 0.0002 | - | | 2.4671 | 30900 | 0.0001 | - | | 2.4711 | 30950 | 0.0245 | - | | 2.4750 | 31000 | 0.0001 | - | | 2.4790 | 31050 | 0.0249 | - | | 2.4830 | 31100 | 0.0246 | - | | 2.4870 | 31150 | 0.0001 | - | | 2.4910 | 31200 | 0.0246 | - | | 2.4950 | 31250 | 0.0001 | - | | 2.4990 | 31300 | 0.0247 | - | | 2.5030 | 31350 | 0.0001 | - | | 2.5070 | 31400 | 0.0001 | - | | 2.5110 | 31450 | 0.0003 | - | | 2.5150 | 31500 | 0.0002 | - | | 2.5190 | 31550 | 0.0002 | - | | 2.5230 | 31600 | 0.0001 | - | | 2.5269 | 31650 | 0.0001 | - | | 2.5309 | 31700 | 0.0003 | - | | 2.5349 | 31750 | 0.0001 | - | | 2.5389 | 31800 | 0.0001 | - | | 2.5429 | 31850 | 0.0001 | - | | 2.5469 | 31900 | 0.0001 | - | | 2.5509 | 31950 | 0.0493 | - | | 2.5549 | 32000 | 0.0001 | - | | 2.5589 | 32050 | 0.0001 | - | | 2.5629 | 32100 | 0.0003 | - | | 2.5669 | 32150 | 0.0001 | - | | 2.5709 | 32200 | 0.025 | - | | 2.5749 | 32250 | 0.0001 | - | | 2.5788 | 32300 | 0.0249 | - | | 2.5828 | 32350 | 0.0001 | - | | 2.5868 | 32400 | 0.0001 | - | | 2.5908 | 32450 | 0.0001 | - | | 2.5948 | 32500 | 0.0001 | - | | 2.5988 | 32550 | 0.0004 | - | | 2.6028 | 32600 | 0.0001 | - | | 2.6068 | 32650 | 0.0001 | - | | 2.6108 | 32700 | 0.0001 | - | | 2.6148 | 32750 | 0.0001 | - | | 2.6188 | 32800 | 0.0001 | - | | 2.6228 | 32850 | 0.0001 | - | | 2.6267 | 32900 | 0.0001 | - | | 2.6307 | 32950 | 0.0492 | - | | 2.6347 | 33000 | 0.0001 | - | | 2.6387 | 33050 | 0.0001 | - | | 2.6427 | 33100 | 0.0 | - | | 2.6467 | 33150 | 0.0001 | - | | 2.6507 | 33200 | 0.0247 | - | | 2.6547 | 33250 | 0.0001 | - | | 2.6587 | 33300 | 0.0001 | - | | 2.6627 | 33350 | 0.0001 | - | | 2.6667 | 33400 | 0.0001 | - | | 2.6707 | 33450 | 0.0001 | - | | 2.6747 | 33500 | 0.0001 | - | | 2.6786 | 33550 | 0.0001 | - | | 2.6826 | 33600 | 0.0001 | - | | 2.6866 | 33650 | 0.0002 | - | | 2.6906 | 33700 | 0.0001 | - | | 2.6946 | 33750 | 0.0001 | - | | 2.6986 | 33800 | 0.0001 | - | | 2.7026 | 33850 | 0.0001 | - | | 2.7066 | 33900 | 0.0254 | - | | 2.7106 | 33950 | 0.0001 | - | | 2.7146 | 34000 | 0.0001 | - | | 2.7186 | 34050 | 0.0001 | - | | 2.7226 | 34100 | 0.0001 | - | | 2.7265 | 34150 | 0.0001 | - | | 2.7305 | 34200 | 0.0001 | - | | 2.7345 | 34250 | 0.0002 | - | | 2.7385 | 34300 | 0.0498 | - | | 2.7425 | 34350 | 0.0001 | - | | 2.7465 | 34400 | 0.0001 | - | | 2.7505 | 34450 | 0.0001 | - | | 2.7545 | 34500 | 0.0001 | - | | 2.7585 | 34550 | 0.0248 | - | | 2.7625 | 34600 | 0.0 | - | | 2.7665 | 34650 | 0.0001 | - | | 2.7705 | 34700 | 0.0001 | - | | 2.7745 | 34750 | 0.0001 | - | | 2.7784 | 34800 | 0.0001 | - | | 2.7824 | 34850 | 0.0247 | - | | 2.7864 | 34900 | 0.0001 | - | | 2.7904 | 34950 | 0.0001 | - | | 2.7944 | 35000 | 0.0001 | - | | 2.7984 | 35050 | 0.0001 | - | | 2.8024 | 35100 | 0.0001 | - | | 2.8064 | 35150 | 0.0001 | - | | 2.8104 | 35200 | 0.0001 | - | | 2.8144 | 35250 | 0.0001 | - | | 2.8184 | 35300 | 0.0001 | - | | 2.8224 | 35350 | 0.0001 | - | | 2.8263 | 35400 | 0.0 | - | | 2.8303 | 35450 | 0.0001 | - | | 2.8343 | 35500 | 0.0 | - | | 2.8383 | 35550 | 0.0 | - | | 2.8423 | 35600 | 0.0001 | - | | 2.8463 | 35650 | 0.0254 | - | | 2.8503 | 35700 | 0.0001 | - | | 2.8543 | 35750 | 0.0001 | - | | 2.8583 | 35800 | 0.0001 | - | | 2.8623 | 35850 | 0.0 | - | | 2.8663 | 35900 | 0.0001 | - | | 2.8703 | 35950 | 0.0001 | - | | 2.8743 | 36000 | 0.0002 | - | | 2.8782 | 36050 | 0.0001 | - | | 2.8822 | 36100 | 0.0001 | - | | 2.8862 | 36150 | 0.0 | - | | 2.8902 | 36200 | 0.0001 | - | | 2.8942 | 36250 | 0.0001 | - | | 2.8982 | 36300 | 0.0001 | - | | 2.9022 | 36350 | 0.0001 | - | | 2.9062 | 36400 | 0.0001 | - | | 2.9102 | 36450 | 0.0001 | - | | 2.9142 | 36500 | 0.0001 | - | | 2.9182 | 36550 | 0.0001 | - | | 2.9222 | 36600 | 0.0001 | - | | 2.9261 | 36650 | 0.0002 | - | | 2.9301 | 36700 | 0.0001 | - | | 2.9341 | 36750 | 0.0248 | - | | 2.9381 | 36800 | 0.0245 | - | | 2.9421 | 36850 | 0.0001 | - | | 2.9461 | 36900 | 0.0 | - | | 2.9501 | 36950 | 0.0001 | - | | 2.9541 | 37000 | 0.0001 | - | | 2.9581 | 37050 | 0.0001 | - | | 2.9621 | 37100 | 0.0001 | - | | 2.9661 | 37150 | 0.0001 | - | | 2.9701 | 37200 | 0.0001 | - | | 2.9741 | 37250 | 0.0001 | - | | 2.9780 | 37300 | 0.0 | - | | 2.9820 | 37350 | 0.0503 | - | | 2.9860 | 37400 | 0.0001 | - | | 2.9900 | 37450 | 0.0246 | - | | 2.9940 | 37500 | 0.0001 | - | | 2.9980 | 37550 | 0.0001 | - | | 3.0 | 37575 | - | 0.0396 | | 3.0020 | 37600 | 0.0248 | - | | 3.0060 | 37650 | 0.0001 | - | | 3.0100 | 37700 | 0.0001 | - | | 3.0140 | 37750 | 0.0245 | - | | 3.0180 | 37800 | 0.0002 | - | | 3.0220 | 37850 | 0.0 | - | | 3.0259 | 37900 | 0.0001 | - | | 3.0299 | 37950 | 0.0001 | - | | 3.0339 | 38000 | 0.0003 | - | | 3.0379 | 38050 | 0.0001 | - | | 3.0419 | 38100 | 0.0001 | - | | 3.0459 | 38150 | 0.0001 | - | | 3.0499 | 38200 | 0.0001 | - | | 3.0539 | 38250 | 0.0001 | - | | 3.0579 | 38300 | 0.0001 | - | | 3.0619 | 38350 | 0.0002 | - | | 3.0659 | 38400 | 0.0251 | - | | 3.0699 | 38450 | 0.0001 | - | | 3.0739 | 38500 | 0.0001 | - | | 3.0778 | 38550 | 0.0001 | - | | 3.0818 | 38600 | 0.0001 | - | | 3.0858 | 38650 | 0.0001 | - | | 3.0898 | 38700 | 0.0001 | - | | 3.0938 | 38750 | 0.0001 | - | | 3.0978 | 38800 | 0.0001 | - | | 3.1018 | 38850 | 0.0001 | - | | 3.1058 | 38900 | 0.0001 | - | | 3.1098 | 38950 | 0.0 | - | | 3.1138 | 39000 | 0.0001 | - | | 3.1178 | 39050 | 0.0001 | - | | 3.1218 | 39100 | 0.0001 | - | | 3.1257 | 39150 | 0.0001 | - | | 3.1297 | 39200 | 0.0001 | - | | 3.1337 | 39250 | 0.0001 | - | | 3.1377 | 39300 | 0.0 | - | | 3.1417 | 39350 | 0.0001 | - | | 3.1457 | 39400 | 0.0002 | - | | 3.1497 | 39450 | 0.0001 | - | | 3.1537 | 39500 | 0.0002 | - | | 3.1577 | 39550 | 0.0001 | - | | 3.1617 | 39600 | 0.0717 | - | | 3.1657 | 39650 | 0.0001 | - | | 3.1697 | 39700 | 0.0001 | - | | 3.1737 | 39750 | 0.0004 | - | | 3.1776 | 39800 | 0.0244 | - | | 3.1816 | 39850 | 0.0001 | - | | 3.1856 | 39900 | 0.0239 | - | | 3.1896 | 39950 | 0.0245 | - | | 3.1936 | 40000 | 0.0245 | - | | 3.1976 | 40050 | 0.0001 | - | | 3.2016 | 40100 | 0.0184 | - | | 3.2056 | 40150 | 0.0246 | - | | 3.2096 | 40200 | 0.0001 | - | | 3.2136 | 40250 | 0.0001 | - | | 3.2176 | 40300 | 0.0001 | - | | 3.2216 | 40350 | 0.0001 | - | | 3.2255 | 40400 | 0.0001 | - | | 3.2295 | 40450 | 0.0002 | - | | 3.2335 | 40500 | 0.0248 | - | | 3.2375 | 40550 | 0.0001 | - | | 3.2415 | 40600 | 0.0244 | - | | 3.2455 | 40650 | 0.0002 | - | | 3.2495 | 40700 | 0.0001 | - | | 3.2535 | 40750 | 0.0001 | - | | 3.2575 | 40800 | 0.0 | - | | 3.2615 | 40850 | 0.0 | - | | 3.2655 | 40900 | 0.0001 | - | | 3.2695 | 40950 | 0.0247 | - | | 3.2735 | 41000 | 0.0001 | - | | 3.2774 | 41050 | 0.0001 | - | | 3.2814 | 41100 | 0.0246 | - | | 3.2854 | 41150 | 0.0001 | - | | 3.2894 | 41200 | 0.0001 | - | | 3.2934 | 41250 | 0.0001 | - | | 3.2974 | 41300 | 0.0001 | - | | 3.3014 | 41350 | 0.0001 | - | | 3.3054 | 41400 | 0.0246 | - | | 3.3094 | 41450 | 0.0246 | - | | 3.3134 | 41500 | 0.0246 | - | | 3.3174 | 41550 | 0.0001 | - | | 3.3214 | 41600 | 0.0003 | - | | 3.3253 | 41650 | 0.0001 | - | | 3.3293 | 41700 | 0.0001 | - | | 3.3333 | 41750 | 0.025 | - | | 3.3373 | 41800 | 0.0 | - | | 3.3413 | 41850 | 0.0245 | - | | 3.3453 | 41900 | 0.0001 | - | | 3.3493 | 41950 | 0.0246 | - | | 3.3533 | 42000 | 0.0001 | - | | 3.3573 | 42050 | 0.0001 | - | | 3.3613 | 42100 | 0.0001 | - | | 3.3653 | 42150 | 0.0001 | - | | 3.3693 | 42200 | 0.0248 | - | | 3.3733 | 42250 | 0.0245 | - | | 3.3772 | 42300 | 0.0001 | - | | 3.3812 | 42350 | 0.0 | - | | 3.3852 | 42400 | 0.0001 | - | | 3.3892 | 42450 | 0.0001 | - | | 3.3932 | 42500 | 0.0001 | - | | 3.3972 | 42550 | 0.0001 | - | | 3.4012 | 42600 | 0.0001 | - | | 3.4052 | 42650 | 0.0001 | - | | 3.4092 | 42700 | 0.0001 | - | | 3.4132 | 42750 | 0.0001 | - | | 3.4172 | 42800 | 0.0001 | - | | 3.4212 | 42850 | 0.0 | - | | 3.4251 | 42900 | 0.0 | - | | 3.4291 | 42950 | 0.0001 | - | | 3.4331 | 43000 | 0.0001 | - | | 3.4371 | 43050 | 0.0001 | - | | 3.4411 | 43100 | 0.0001 | - | | 3.4451 | 43150 | 0.0002 | - | | 3.4491 | 43200 | 0.0001 | - | | 3.4531 | 43250 | 0.0002 | - | | 3.4571 | 43300 | 0.0001 | - | | 3.4611 | 43350 | 0.0 | - | | 3.4651 | 43400 | 0.0001 | - | | 3.4691 | 43450 | 0.0246 | - | | 3.4731 | 43500 | 0.0001 | - | | 3.4770 | 43550 | 0.0001 | - | | 3.4810 | 43600 | 0.0246 | - | | 3.4850 | 43650 | 0.0001 | - | | 3.4890 | 43700 | 0.0001 | - | | 3.4930 | 43750 | 0.0001 | - | | 3.4970 | 43800 | 0.0245 | - | | 3.5010 | 43850 | 0.0001 | - | | 3.5050 | 43900 | 0.0001 | - | | 3.5090 | 43950 | 0.0001 | - | | 3.5130 | 44000 | 0.0001 | - | | 3.5170 | 44050 | 0.0001 | - | | 3.5210 | 44100 | 0.0001 | - | | 3.5250 | 44150 | 0.0001 | - | | 3.5289 | 44200 | 0.0001 | - | | 3.5329 | 44250 | 0.0001 | - | | 3.5369 | 44300 | 0.0 | - | | 3.5409 | 44350 | 0.0001 | - | | 3.5449 | 44400 | 0.0 | - | | 3.5489 | 44450 | 0.0249 | - | | 3.5529 | 44500 | 0.0 | - | | 3.5569 | 44550 | 0.0002 | - | | 3.5609 | 44600 | 0.0001 | - | | 3.5649 | 44650 | 0.0002 | - | | 3.5689 | 44700 | 0.0001 | - | | 3.5729 | 44750 | 0.0 | - | | 3.5768 | 44800 | 0.0 | - | | 3.5808 | 44850 | 0.0 | - | | 3.5848 | 44900 | 0.0 | - | | 3.5888 | 44950 | 0.0 | - | | 3.5928 | 45000 | 0.0001 | - | | 3.5968 | 45050 | 0.0001 | - | | 3.6008 | 45100 | 0.0001 | - | | 3.6048 | 45150 | 0.0001 | - | | 3.6088 | 45200 | 0.0002 | - | | 3.6128 | 45250 | 0.0 | - | | 3.6168 | 45300 | 0.0001 | - | | 3.6208 | 45350 | 0.0001 | - | | 3.6248 | 45400 | 0.0001 | - | | 3.6287 | 45450 | 0.0244 | - | | 3.6327 | 45500 | 0.0 | - | | 3.6367 | 45550 | 0.0001 | - | | 3.6407 | 45600 | 0.0 | - | | 3.6447 | 45650 | 0.0001 | - | | 3.6487 | 45700 | 0.0243 | - | | 3.6527 | 45750 | 0.0252 | - | | 3.6567 | 45800 | 0.0001 | - | | 3.6607 | 45850 | 0.0001 | - | | 3.6647 | 45900 | 0.0001 | - | | 3.6687 | 45950 | 0.0 | - | | 3.6727 | 46000 | 0.0001 | - | | 3.6766 | 46050 | 0.0001 | - | | 3.6806 | 46100 | 0.0002 | - | | 3.6846 | 46150 | 0.0 | - | | 3.6886 | 46200 | 0.0247 | - | | 3.6926 | 46250 | 0.0 | - | | 3.6966 | 46300 | 0.0001 | - | | 3.7006 | 46350 | 0.0 | - | | 3.7046 | 46400 | 0.0001 | - | | 3.7086 | 46450 | 0.0001 | - | | 3.7126 | 46500 | 0.0001 | - | | 3.7166 | 46550 | 0.0 | - | | 3.7206 | 46600 | 0.0001 | - | | 3.7246 | 46650 | 0.0 | - | | 3.7285 | 46700 | 0.0001 | - | | 3.7325 | 46750 | 0.0001 | - | | 3.7365 | 46800 | 0.0246 | - | | 3.7405 | 46850 | 0.0 | - | | 3.7445 | 46900 | 0.0001 | - | | 3.7485 | 46950 | 0.0001 | - | | 3.7525 | 47000 | 0.0001 | - | | 3.7565 | 47050 | 0.0001 | - | | 3.7605 | 47100 | 0.0001 | - | | 3.7645 | 47150 | 0.025 | - | | 3.7685 | 47200 | 0.0001 | - | | 3.7725 | 47250 | 0.0002 | - | | 3.7764 | 47300 | 0.0001 | - | | 3.7804 | 47350 | 0.0247 | - | | 3.7844 | 47400 | 0.0248 | - | | 3.7884 | 47450 | 0.0001 | - | | 3.7924 | 47500 | 0.0 | - | | 3.7964 | 47550 | 0.0001 | - | | 3.8004 | 47600 | 0.025 | - | | 3.8044 | 47650 | 0.0001 | - | | 3.8084 | 47700 | 0.0001 | - | | 3.8124 | 47750 | 0.0002 | - | | 3.8164 | 47800 | 0.0001 | - | | 3.8204 | 47850 | 0.0001 | - | | 3.8244 | 47900 | 0.0252 | - | | 3.8283 | 47950 | 0.0001 | - | | 3.8323 | 48000 | 0.0254 | - | | 3.8363 | 48050 | 0.0249 | - | | 3.8403 | 48100 | 0.0001 | - | | 3.8443 | 48150 | 0.0001 | - | | 3.8483 | 48200 | 0.0001 | - | | 3.8523 | 48250 | 0.0 | - | | 3.8563 | 48300 | 0.0001 | - | | 3.8603 | 48350 | 0.0001 | - | | 3.8643 | 48400 | 0.0001 | - | | 3.8683 | 48450 | 0.0001 | - | | 3.8723 | 48500 | 0.0001 | - | | 3.8762 | 48550 | 0.0006 | - | | 3.8802 | 48600 | 0.0003 | - | | 3.8842 | 48650 | 0.0 | - | | 3.8882 | 48700 | 0.0003 | - | | 3.8922 | 48750 | 0.0001 | - | | 3.8962 | 48800 | 0.0001 | - | | 3.9002 | 48850 | 0.0001 | - | | 3.9042 | 48900 | 0.0001 | - | | 3.9082 | 48950 | 0.0001 | - | | 3.9122 | 49000 | 0.0001 | - | | 3.9162 | 49050 | 0.0246 | - | | 3.9202 | 49100 | 0.0 | - | | 3.9242 | 49150 | 0.0001 | - | | 3.9281 | 49200 | 0.0001 | - | | 3.9321 | 49250 | 0.0001 | - | | 3.9361 | 49300 | 0.0246 | - | | 3.9401 | 49350 | 0.0 | - | | 3.9441 | 49400 | 0.0001 | - | | 3.9481 | 49450 | 0.0002 | - | | 3.9521 | 49500 | 0.0 | - | | 3.9561 | 49550 | 0.0 | - | | 3.9601 | 49600 | 0.0002 | - | | 3.9641 | 49650 | 0.0248 | - | | 3.9681 | 49700 | 0.0001 | - | | 3.9721 | 49750 | 0.0001 | - | | 3.9760 | 49800 | 0.0001 | - | | 3.9800 | 49850 | 0.0248 | - | | 3.9840 | 49900 | 0.0001 | - | | 3.9880 | 49950 | 0.0245 | - | | 3.9920 | 50000 | 0.0001 | - | | 3.9960 | 50050 | 0.0487 | - | | 4.0 | 50100 | 0.0002 | 0.0927 | | 4.0040 | 50150 | 0.0001 | - | | 4.0080 | 50200 | 0.0251 | - | | 4.0120 | 50250 | 0.0245 | - | | 4.0160 | 50300 | 0.0001 | - | | 4.0200 | 50350 | 0.0001 | - | | 4.0240 | 50400 | 0.0001 | - | | 4.0279 | 50450 | 0.0 | - | | 4.0319 | 50500 | 0.0001 | - | | 4.0359 | 50550 | 0.0255 | - | | 4.0399 | 50600 | 0.0001 | - | | 4.0439 | 50650 | 0.0 | - | | 4.0479 | 50700 | 0.0001 | - | | 4.0519 | 50750 | 0.0001 | - | | 4.0559 | 50800 | 0.0 | - | | 4.0599 | 50850 | 0.0 | - | | 4.0639 | 50900 | 0.0001 | - | | 4.0679 | 50950 | 0.0001 | - | | 4.0719 | 51000 | 0.0001 | - | | 4.0758 | 51050 | 0.0001 | - | | 4.0798 | 51100 | 0.0242 | - | | 4.0838 | 51150 | 0.0001 | - | | 4.0878 | 51200 | 0.0001 | - | | 4.0918 | 51250 | 0.0001 | - | | 4.0958 | 51300 | 0.0 | - | | 4.0998 | 51350 | 0.0001 | - | | 4.1038 | 51400 | 0.0 | - | | 4.1078 | 51450 | 0.0 | - | | 4.1118 | 51500 | 0.0246 | - | | 4.1158 | 51550 | 0.0 | - | | 4.1198 | 51600 | 0.0249 | - | | 4.1238 | 51650 | 0.0001 | - | | 4.1277 | 51700 | 0.0001 | - | | 4.1317 | 51750 | 0.0001 | - | | 4.1357 | 51800 | 0.0001 | - | | 4.1397 | 51850 | 0.0 | - | | 4.1437 | 51900 | 0.0001 | - | | 4.1477 | 51950 | 0.0 | - | | 4.1517 | 52000 | 0.0001 | - | | 4.1557 | 52050 | 0.0001 | - | | 4.1597 | 52100 | 0.0001 | - | | 4.1637 | 52150 | 0.0001 | - | | 4.1677 | 52200 | 0.0001 | - | | 4.1717 | 52250 | 0.0001 | - | | 4.1756 | 52300 | 0.0 | - | | 4.1796 | 52350 | 0.0001 | - | | 4.1836 | 52400 | 0.0001 | - | | 4.1876 | 52450 | 0.0 | - | | 4.1916 | 52500 | 0.0001 | - | | 4.1956 | 52550 | 0.0001 | - | | 4.1996 | 52600 | 0.0252 | - | | 4.2036 | 52650 | 0.0001 | - | | 4.2076 | 52700 | 0.0001 | - | | 4.2116 | 52750 | 0.0001 | - | | 4.2156 | 52800 | 0.0001 | - | | 4.2196 | 52850 | 0.0001 | - | | 4.2236 | 52900 | 0.0001 | - | | 4.2275 | 52950 | 0.0 | - | | 4.2315 | 53000 | 0.0 | - | | 4.2355 | 53050 | 0.0001 | - | | 4.2395 | 53100 | 0.0244 | - | | 4.2435 | 53150 | 0.0001 | - | | 4.2475 | 53200 | 0.0001 | - | | 4.2515 | 53250 | 0.0248 | - | | 4.2555 | 53300 | 0.0001 | - | | 4.2595 | 53350 | 0.0 | - | | 4.2635 | 53400 | 0.0 | - | | 4.2675 | 53450 | 0.0245 | - | | 4.2715 | 53500 | 0.0 | - | | 4.2754 | 53550 | 0.0251 | - | | 4.2794 | 53600 | 0.0 | - | | 4.2834 | 53650 | 0.0001 | - | | 4.2874 | 53700 | 0.0001 | - | | 4.2914 | 53750 | 0.0001 | - | | 4.2954 | 53800 | 0.0 | - | | 4.2994 | 53850 | 0.0 | - | | 4.3034 | 53900 | 0.0247 | - | | 4.3074 | 53950 | 0.049 | - | | 4.3114 | 54000 | 0.0 | - | | 4.3154 | 54050 | 0.0001 | - | | 4.3194 | 54100 | 0.0 | - | | 4.3234 | 54150 | 0.0001 | - | | 4.3273 | 54200 | 0.0001 | - | | 4.3313 | 54250 | 0.0001 | - | | 4.3353 | 54300 | 0.0001 | - | | 4.3393 | 54350 | 0.0243 | - | | 4.3433 | 54400 | 0.0001 | - | | 4.3473 | 54450 | 0.0246 | - | | 4.3513 | 54500 | 0.0 | - | | 4.3553 | 54550 | 0.0001 | - | | 4.3593 | 54600 | 0.0001 | - | | 4.3633 | 54650 | 0.0001 | - | | 4.3673 | 54700 | 0.0 | - | | 4.3713 | 54750 | 0.0246 | - | | 4.3752 | 54800 | 0.0 | - | | 4.3792 | 54850 | 0.0 | - | | 4.3832 | 54900 | 0.0001 | - | | 4.3872 | 54950 | 0.0001 | - | | 4.3912 | 55000 | 0.0001 | - | | 4.3952 | 55050 | 0.0001 | - | | 4.3992 | 55100 | 0.0001 | - | | 4.4032 | 55150 | 0.0 | - | | 4.4072 | 55200 | 0.0001 | - | | 4.4112 | 55250 | 0.0 | - | | 4.4152 | 55300 | 0.0 | - | | 4.4192 | 55350 | 0.0001 | - | | 4.4232 | 55400 | 0.0244 | - | | 4.4271 | 55450 | 0.0 | - | | 4.4311 | 55500 | 0.0 | - | | 4.4351 | 55550 | 0.0001 | - | | 4.4391 | 55600 | 0.0 | - | | 4.4431 | 55650 | 0.0001 | - | | 4.4471 | 55700 | 0.0001 | - | | 4.4511 | 55750 | 0.0959 | - | | 4.4551 | 55800 | 0.0002 | - | | 4.4591 | 55850 | 0.0001 | - | | 4.4631 | 55900 | 0.0001 | - | | 4.4671 | 55950 | 0.0246 | - | | 4.4711 | 56000 | 0.0001 | - | | 4.4750 | 56050 | 0.0001 | - | | 4.4790 | 56100 | 0.0246 | - | | 4.4830 | 56150 | 0.024 | - | | 4.4870 | 56200 | 0.0001 | - | | 4.4910 | 56250 | 0.0001 | - | | 4.4950 | 56300 | 0.0245 | - | | 4.4990 | 56350 | 0.0001 | - | | 4.5030 | 56400 | 0.0001 | - | | 4.5070 | 56450 | 0.0001 | - | | 4.5110 | 56500 | 0.0001 | - | | 4.5150 | 56550 | 0.0001 | - | | 4.5190 | 56600 | 0.0001 | - | | 4.5230 | 56650 | 0.0001 | - | | 4.5269 | 56700 | 0.0 | - | | 4.5309 | 56750 | 0.0002 | - | | 4.5349 | 56800 | 0.0001 | - | | 4.5389 | 56850 | 0.0001 | - | | 4.5429 | 56900 | 0.0001 | - | | 4.5469 | 56950 | 0.0001 | - | | 4.5509 | 57000 | 0.0 | - | | 4.5549 | 57050 | 0.0001 | - | | 4.5589 | 57100 | 0.0001 | - | | 4.5629 | 57150 | 0.0001 | - | | 4.5669 | 57200 | 0.0 | - | | 4.5709 | 57250 | 0.0001 | - | | 4.5749 | 57300 | 0.0001 | - | | 4.5788 | 57350 | 0.0252 | - | | 4.5828 | 57400 | 0.0 | - | | 4.5868 | 57450 | 0.0 | - | | 4.5908 | 57500 | 0.0001 | - | | 4.5948 | 57550 | 0.0001 | - | | 4.5988 | 57600 | 0.0001 | - | | 4.6028 | 57650 | 0.0001 | - | | 4.6068 | 57700 | 0.0 | - | | 4.6108 | 57750 | 0.0 | - | | 4.6148 | 57800 | 0.0001 | - | | 4.6188 | 57850 | 0.0001 | - | | 4.6228 | 57900 | 0.0 | - | | 4.6267 | 57950 | 0.0244 | - | | 4.6307 | 58000 | 0.0416 | - | | 4.6347 | 58050 | 0.0001 | - | | 4.6387 | 58100 | 0.0 | - | | 4.6427 | 58150 | 0.0 | - | | 4.6467 | 58200 | 0.0245 | - | | 4.6507 | 58250 | 0.0001 | - | | 4.6547 | 58300 | 0.0001 | - | | 4.6587 | 58350 | 0.0 | - | | 4.6627 | 58400 | 0.0001 | - | | 4.6667 | 58450 | 0.0001 | - | | 4.6707 | 58500 | 0.0001 | - | | 4.6747 | 58550 | 0.0001 | - | | 4.6786 | 58600 | 0.0001 | - | | 4.6826 | 58650 | 0.0001 | - | | 4.6866 | 58700 | 0.0002 | - | | 4.6906 | 58750 | 0.0 | - | | 4.6946 | 58800 | 0.0001 | - | | 4.6986 | 58850 | 0.0001 | - | | 4.7026 | 58900 | 0.0001 | - | | 4.7066 | 58950 | 0.0253 | - | | 4.7106 | 59000 | 0.0001 | - | | 4.7146 | 59050 | 0.0 | - | | 4.7186 | 59100 | 0.0001 | - | | 4.7226 | 59150 | 0.0 | - | | 4.7265 | 59200 | 0.0001 | - | | 4.7305 | 59250 | 0.0001 | - | | 4.7345 | 59300 | 0.0246 | - | | 4.7385 | 59350 | 0.0252 | - | | 4.7425 | 59400 | 0.0001 | - | | 4.7465 | 59450 | 0.1531 | - | | 4.7505 | 59500 | 0.0001 | - | | 4.7545 | 59550 | 0.0001 | - | | 4.7585 | 59600 | 0.025 | - | | 4.7625 | 59650 | 0.0 | - | | 4.7665 | 59700 | 0.0001 | - | | 4.7705 | 59750 | 0.0001 | - | | 4.7745 | 59800 | 0.0001 | - | | 4.7784 | 59850 | 0.0244 | - | | 4.7824 | 59900 | 0.0009 | - | | 4.7864 | 59950 | 0.0001 | - | | 4.7904 | 60000 | 0.0009 | - | | 4.7944 | 60050 | 0.0015 | - | | 4.7984 | 60100 | 0.0252 | - | | 4.8024 | 60150 | 0.0001 | - | | 4.8064 | 60200 | 0.0245 | - | | 4.8104 | 60250 | 0.0003 | - | | 4.8144 | 60300 | 0.0002 | - | | 4.8184 | 60350 | 0.0001 | - | | 4.8224 | 60400 | 0.0001 | - | | 4.8263 | 60450 | 0.0249 | - | | 4.8303 | 60500 | 0.0002 | - | | 4.8343 | 60550 | 0.0001 | - | | 4.8383 | 60600 | 0.0001 | - | | 4.8423 | 60650 | 0.0001 | - | | 4.8463 | 60700 | 0.0242 | - | | 4.8503 | 60750 | 0.0001 | - | | 4.8543 | 60800 | 0.0002 | - | | 4.8583 | 60850 | 0.0001 | - | | 4.8623 | 60900 | 0.0001 | - | | 4.8663 | 60950 | 0.0005 | - | | 4.8703 | 61000 | 0.0001 | - | | 4.8743 | 61050 | 0.0247 | - | | 4.8782 | 61100 | 0.0001 | - | | 4.8822 | 61150 | 0.0001 | - | | 4.8862 | 61200 | 0.0001 | - | | 4.8902 | 61250 | 0.0001 | - | | 4.8942 | 61300 | 0.0247 | - | | 4.8982 | 61350 | 0.0245 | - | | 4.9022 | 61400 | 0.0001 | - | | 4.9062 | 61450 | 0.0001 | - | | 4.9102 | 61500 | 0.0249 | - | | 4.9142 | 61550 | 0.2375 | - | | 4.9182 | 61600 | 0.0001 | - | | 4.9222 | 61650 | 0.0001 | - | | 4.9261 | 61700 | 0.0002 | - | | 4.9301 | 61750 | 0.0001 | - | | 4.9341 | 61800 | 0.0494 | - | | 4.9381 | 61850 | 0.0001 | - | | 4.9421 | 61900 | 0.0001 | - | | 4.9461 | 61950 | 0.0001 | - | | 4.9501 | 62000 | 0.0001 | - | | 4.9541 | 62050 | 0.0001 | - | | 4.9581 | 62100 | 0.024 | - | | 4.9621 | 62150 | 0.0001 | - | | 4.9661 | 62200 | 0.0001 | - | | 4.9701 | 62250 | 0.0001 | - | | 4.9741 | 62300 | 0.0002 | - | | 4.9780 | 62350 | 0.0001 | - | | 4.9820 | 62400 | 0.0502 | - | | 4.9860 | 62450 | 0.0244 | - | | 4.9900 | 62500 | 0.0001 | - | | 4.9940 | 62550 | 0.0001 | - | | 4.9980 | 62600 | 0.0001 | - | | 5.0 | 62625 | - | 0.1298 | | 5.0020 | 62650 | 0.025 | - | | 5.0060 | 62700 | 0.0001 | - | | 5.0100 | 62750 | 0.0244 | - | | 5.0140 | 62800 | 0.0001 | - | | 5.0180 | 62850 | 0.0001 | - | | 5.0220 | 62900 | 0.0001 | - | | 5.0259 | 62950 | 0.0001 | - | | 5.0299 | 63000 | 0.0001 | - | | 5.0339 | 63050 | 0.0002 | - | | 5.0379 | 63100 | 0.0001 | - | | 5.0419 | 63150 | 0.0001 | - | | 5.0459 | 63200 | 0.0001 | - | | 5.0499 | 63250 | 0.0001 | - | | 5.0539 | 63300 | 0.0001 | - | | 5.0579 | 63350 | 0.0001 | - | | 5.0619 | 63400 | 0.0001 | - | | 5.0659 | 63450 | 0.0249 | - | | 5.0699 | 63500 | 0.0001 | - | | 5.0739 | 63550 | 0.0001 | - | | 5.0778 | 63600 | 0.0002 | - | | 5.0818 | 63650 | 0.0001 | - | | 5.0858 | 63700 | 0.0001 | - | | 5.0898 | 63750 | 0.0001 | - | | 5.0938 | 63800 | 0.0001 | - | | 5.0978 | 63850 | 0.0001 | - | | 5.1018 | 63900 | 0.0001 | - | | 5.1058 | 63950 | 0.0001 | - | | 5.1098 | 64000 | 0.0001 | - | | 5.1138 | 64050 | 0.0001 | - | | 5.1178 | 64100 | 0.0001 | - | | 5.1218 | 64150 | 0.0001 | - | | 5.1257 | 64200 | 0.0001 | - | | 5.1297 | 64250 | 0.0001 | - | | 5.1337 | 64300 | 0.0002 | - | | 5.1377 | 64350 | 0.0001 | - | | 5.1417 | 64400 | 0.0001 | - | | 5.1457 | 64450 | 0.0002 | - | | 5.1497 | 64500 | 0.0001 | - | | 5.1537 | 64550 | 0.0001 | - | | 5.1577 | 64600 | 0.0001 | - | | 5.1617 | 64650 | 0.0003 | - | | 5.1657 | 64700 | 0.0001 | - | | 5.1697 | 64750 | 0.0001 | - | | 5.1737 | 64800 | 0.0001 | - | | 5.1776 | 64850 | 0.0243 | - | | 5.1816 | 64900 | 0.0001 | - | | 5.1856 | 64950 | 0.0003 | - | | 5.1896 | 65000 | 0.0001 | - | | 5.1936 | 65050 | 0.0001 | - | | 5.1976 | 65100 | 0.0001 | - | | 5.2016 | 65150 | 0.0001 | - | | 5.2056 | 65200 | 0.0245 | - | | 5.2096 | 65250 | 0.0001 | - | | 5.2136 | 65300 | 0.0001 | - | | 5.2176 | 65350 | 0.0001 | - | | 5.2216 | 65400 | 0.0001 | - | | 5.2255 | 65450 | 0.0001 | - | | 5.2295 | 65500 | 0.0002 | - | | 5.2335 | 65550 | 0.0248 | - | | 5.2375 | 65600 | 0.0243 | - | | 5.2415 | 65650 | 0.0001 | - | | 5.2455 | 65700 | 0.0002 | - | | 5.2495 | 65750 | 0.0001 | - | | 5.2535 | 65800 | 0.0001 | - | | 5.2575 | 65850 | 0.0 | - | | 5.2615 | 65900 | 0.0001 | - | | 5.2655 | 65950 | 0.0245 | - | | 5.2695 | 66000 | 0.0001 | - | | 5.2735 | 66050 | 0.0001 | - | | 5.2774 | 66100 | 0.0001 | - | | 5.2814 | 66150 | 0.0244 | - | | 5.2854 | 66200 | 0.0001 | - | | 5.2894 | 66250 | 0.0001 | - | | 5.2934 | 66300 | 0.0001 | - | | 5.2974 | 66350 | 0.0001 | - | | 5.3014 | 66400 | 0.0247 | - | | 5.3054 | 66450 | 0.0244 | - | | 5.3094 | 66500 | 0.0001 | - | | 5.3134 | 66550 | 0.0248 | - | | 5.3174 | 66600 | 0.0001 | - | | 5.3214 | 66650 | 0.0003 | - | | 5.3253 | 66700 | 0.0001 | - | | 5.3293 | 66750 | 0.0001 | - | | 5.3333 | 66800 | 0.0249 | - | | 5.3373 | 66850 | 0.0244 | - | | 5.3413 | 66900 | 0.0001 | - | | 5.3453 | 66950 | 0.0246 | - | | 5.3493 | 67000 | 0.0 | - | | 5.3533 | 67050 | 0.0001 | - | | 5.3573 | 67100 | 0.0001 | - | | 5.3613 | 67150 | 0.0001 | - | | 5.3653 | 67200 | 0.0001 | - | | 5.3693 | 67250 | 0.0494 | - | | 5.3733 | 67300 | 0.0001 | - | | 5.3772 | 67350 | 0.0001 | - | | 5.3812 | 67400 | 0.0001 | - | | 5.3852 | 67450 | 0.0001 | - | | 5.3892 | 67500 | 0.0001 | - | | 5.3932 | 67550 | 0.0001 | - | | 5.3972 | 67600 | 0.0001 | - | | 5.4012 | 67650 | 0.0001 | - | | 5.4052 | 67700 | 0.0001 | - | | 5.4092 | 67750 | 0.0001 | - | | 5.4132 | 67800 | 0.0001 | - | | 5.4172 | 67850 | 0.0001 | - | | 5.4212 | 67900 | 0.0 | - | | 5.4251 | 67950 | 0.0 | - | | 5.4291 | 68000 | 0.0001 | - | | 5.4331 | 68050 | 0.0001 | - | | 5.4371 | 68100 | 0.0001 | - | | 5.4411 | 68150 | 0.0001 | - | | 5.4451 | 68200 | 0.0002 | - | | 5.4491 | 68250 | 0.0001 | - | | 5.4531 | 68300 | 0.0001 | - | | 5.4571 | 68350 | 0.0001 | - | | 5.4611 | 68400 | 0.0001 | - | | 5.4651 | 68450 | 0.0245 | - | | 5.4691 | 68500 | 0.0001 | - | | 5.4731 | 68550 | 0.0001 | - | | 5.4770 | 68600 | 0.0245 | - | | 5.4810 | 68650 | 0.0001 | - | | 5.4850 | 68700 | 0.0001 | - | | 5.4890 | 68750 | 0.0001 | - | | 5.4930 | 68800 | 0.0246 | - | | 5.4970 | 68850 | 0.0 | - | | 5.5010 | 68900 | 0.0001 | - | | 5.5050 | 68950 | 0.0001 | - | | 5.5090 | 69000 | 0.0001 | - | | 5.5130 | 69050 | 0.0001 | - | | 5.5170 | 69100 | 0.0001 | - | | 5.5210 | 69150 | 0.0001 | - | | 5.5250 | 69200 | 0.0001 | - | | 5.5289 | 69250 | 0.0001 | - | | 5.5329 | 69300 | 0.0001 | - | | 5.5369 | 69350 | 0.0 | - | | 5.5409 | 69400 | 0.0001 | - | | 5.5449 | 69450 | 0.0001 | - | | 5.5489 | 69500 | 0.0249 | - | | 5.5529 | 69550 | 0.0 | - | | 5.5569 | 69600 | 0.0001 | - | | 5.5609 | 69650 | 0.0001 | - | | 5.5649 | 69700 | 0.0016 | - | | 5.5689 | 69750 | 0.0001 | - | | 5.5729 | 69800 | 0.0 | - | | 5.5768 | 69850 | 0.0 | - | | 5.5808 | 69900 | 0.0 | - | | 5.5848 | 69950 | 0.0 | - | | 5.5888 | 70000 | 0.0 | - | | 5.5928 | 70050 | 0.0001 | - | | 5.5968 | 70100 | 0.0001 | - | | 5.6008 | 70150 | 0.0001 | - | | 5.6048 | 70200 | 0.0001 | - | | 5.6088 | 70250 | 0.0001 | - | | 5.6128 | 70300 | 0.0 | - | | 5.6168 | 70350 | 0.0 | - | | 5.6208 | 70400 | 0.0043 | - | | 5.6248 | 70450 | 0.0151 | - | | 5.6287 | 70500 | 0.0 | - | | 5.6327 | 70550 | 0.0 | - | | 5.6367 | 70600 | 0.0 | - | | 5.6407 | 70650 | 0.0033 | - | | 5.6447 | 70700 | 0.0274 | - | | 5.6487 | 70750 | 0.0031 | - | | 5.6527 | 70800 | 0.0248 | - | | 5.6567 | 70850 | 0.0 | - | | 5.6607 | 70900 | 0.0245 | - | | 5.6647 | 70950 | 0.0248 | - | | 5.6687 | 71000 | 0.0 | - | | 5.6727 | 71050 | 0.0001 | - | | 5.6766 | 71100 | 0.0001 | - | | 5.6806 | 71150 | 0.0001 | - | | 5.6846 | 71200 | 0.0001 | - | | 5.6886 | 71250 | 0.0002 | - | | 5.6926 | 71300 | 0.0 | - | | 5.6966 | 71350 | 0.0001 | - | | 5.7006 | 71400 | 0.0001 | - | | 5.7046 | 71450 | 0.0001 | - | | 5.7086 | 71500 | 0.0001 | - | | 5.7126 | 71550 | 0.0001 | - | | 5.7166 | 71600 | 0.0247 | - | | 5.7206 | 71650 | 0.0001 | - | | 5.7246 | 71700 | 0.0001 | - | | 5.7285 | 71750 | 0.0001 | - | | 5.7325 | 71800 | 0.0001 | - | | 5.7365 | 71850 | 0.0001 | - | | 5.7405 | 71900 | 0.0245 | - | | 5.7445 | 71950 | 0.0001 | - | | 5.7485 | 72000 | 0.0247 | - | | 5.7525 | 72050 | 0.0001 | - | | 5.7565 | 72100 | 0.0 | - | | 5.7605 | 72150 | 0.0001 | - | | 5.7645 | 72200 | 0.05 | - | | 5.7685 | 72250 | 0.0001 | - | | 5.7725 | 72300 | 0.0247 | - | | 5.7764 | 72350 | 0.0002 | - | | 5.7804 | 72400 | 0.0244 | - | | 5.7844 | 72450 | 0.0 | - | | 5.7884 | 72500 | 0.0001 | - | | 5.7924 | 72550 | 0.0001 | - | | 5.7964 | 72600 | 0.0 | - | | 5.8004 | 72650 | 0.0001 | - | | 5.8044 | 72700 | 0.0001 | - | | 5.8084 | 72750 | 0.0001 | - | | 5.8124 | 72800 | 0.0 | - | | 5.8164 | 72850 | 0.0001 | - | | 5.8204 | 72900 | 0.0001 | - | | 5.8244 | 72950 | 0.025 | - | | 5.8283 | 73000 | 0.0001 | - | | 5.8323 | 73050 | 0.0 | - | | 5.8363 | 73100 | 0.0247 | - | | 5.8403 | 73150 | 0.0 | - | | 5.8443 | 73200 | 0.0 | - | | 5.8483 | 73250 | 0.0001 | - | | 5.8523 | 73300 | 0.0 | - | | 5.8563 | 73350 | 0.0 | - | | 5.8603 | 73400 | 0.0001 | - | | 5.8643 | 73450 | 0.0001 | - | | 5.8683 | 73500 | 0.0 | - | | 5.8723 | 73550 | 0.0248 | - | | 5.8762 | 73600 | 0.0001 | - | | 5.8802 | 73650 | 0.0001 | - | | 5.8842 | 73700 | 0.0001 | - | | 5.8882 | 73750 | 0.0001 | - | | 5.8922 | 73800 | 0.0248 | - | | 5.8962 | 73850 | 0.0001 | - | | 5.9002 | 73900 | 0.0001 | - | | 5.9042 | 73950 | 0.0001 | - | | 5.9082 | 74000 | 0.0247 | - | | 5.9122 | 74050 | 0.0246 | - | | 5.9162 | 74100 | 0.0001 | - | | 5.9202 | 74150 | 0.0001 | - | | 5.9242 | 74200 | 0.0 | - | | 5.9281 | 74250 | 0.0001 | - | | 5.9321 | 74300 | 0.0 | - | | 5.9361 | 74350 | 0.0 | - | | 5.9401 | 74400 | 0.0 | - | | 5.9441 | 74450 | 0.0001 | - | | 5.9481 | 74500 | 0.0249 | - | | 5.9521 | 74550 | 0.0 | - | | 5.9561 | 74600 | 0.0002 | - | | 5.9601 | 74650 | 0.0001 | - | | 5.9641 | 74700 | 0.0001 | - | | 5.9681 | 74750 | 0.0 | - | | 5.9721 | 74800 | 0.0 | - | | 5.9760 | 74850 | 0.0246 | - | | 5.9800 | 74900 | 0.0001 | - | | 5.9840 | 74950 | 0.0 | - | | 5.9880 | 75000 | 0.0001 | - | | 5.9920 | 75050 | 0.0 | - | | 5.9960 | 75100 | 0.0001 | - | | 6.0 | 75150 | 0.0001 | 0.0594 | | 6.0040 | 75200 | 0.0001 | - | | 6.0080 | 75250 | 0.0497 | - | | 6.0120 | 75300 | 0.0248 | - | | 6.0160 | 75350 | 0.0495 | - | | 6.0200 | 75400 | 0.0 | - | | 6.0240 | 75450 | 0.0001 | - | | 6.0279 | 75500 | 0.0001 | - | | 6.0319 | 75550 | 0.0001 | - | | 6.0359 | 75600 | 0.0252 | - | | 6.0399 | 75650 | 0.0001 | - | | 6.0439 | 75700 | 0.0001 | - | | 6.0479 | 75750 | 0.0001 | - | | 6.0519 | 75800 | 0.0741 | - | | 6.0559 | 75850 | 0.0 | - | | 6.0599 | 75900 | 0.0 | - | | 6.0639 | 75950 | 0.0001 | - | | 6.0679 | 76000 | 0.0001 | - | | 6.0719 | 76050 | 0.0 | - | | 6.0758 | 76100 | 0.0001 | - | | 6.0798 | 76150 | 0.0001 | - | | 6.0838 | 76200 | 0.0247 | - | | 6.0878 | 76250 | 0.0001 | - | | 6.0918 | 76300 | 0.0001 | - | | 6.0958 | 76350 | 0.0244 | - | | 6.0998 | 76400 | 0.0 | - | | 6.1038 | 76450 | 0.0001 | - | | 6.1078 | 76500 | 0.0001 | - | | 6.1118 | 76550 | 0.0001 | - | | 6.1158 | 76600 | 0.0 | - | | 6.1198 | 76650 | 0.0001 | - | | 6.1238 | 76700 | 0.0 | - | | 6.1277 | 76750 | 0.0245 | - | | 6.1317 | 76800 | 0.0001 | - | | 6.1357 | 76850 | 0.0001 | - | | 6.1397 | 76900 | 0.0001 | - | | 6.1437 | 76950 | 0.0001 | - | | 6.1477 | 77000 | 0.0 | - | | 6.1517 | 77050 | 0.0001 | - | | 6.1557 | 77100 | 0.0001 | - | | 6.1597 | 77150 | 0.0001 | - | | 6.1637 | 77200 | 0.0001 | - | | 6.1677 | 77250 | 0.0 | - | | 6.1717 | 77300 | 0.0001 | - | | 6.1756 | 77350 | 0.0001 | - | | 6.1796 | 77400 | 0.0245 | - | | 6.1836 | 77450 | 0.0 | - | | 6.1876 | 77500 | 0.0496 | - | | 6.1916 | 77550 | 0.0246 | - | | 6.1956 | 77600 | 0.0001 | - | | 6.1996 | 77650 | 0.025 | - | | 6.2036 | 77700 | 0.0001 | - | | 6.2076 | 77750 | 0.0 | - | | 6.2116 | 77800 | 0.0001 | - | | 6.2156 | 77850 | 0.0001 | - | | 6.2196 | 77900 | 0.0248 | - | | 6.2236 | 77950 | 0.0247 | - | | 6.2275 | 78000 | 0.0002 | - | | 6.2315 | 78050 | 0.0001 | - | | 6.2355 | 78100 | 0.0246 | - | | 6.2395 | 78150 | 0.0001 | - | | 6.2435 | 78200 | 0.0001 | - | | 6.2475 | 78250 | 0.0001 | - | | 6.2515 | 78300 | 0.0249 | - | | 6.2555 | 78350 | 0.0001 | - | | 6.2595 | 78400 | 0.0251 | - | | 6.2635 | 78450 | 0.0 | - | | 6.2675 | 78500 | 0.0 | - | | 6.2715 | 78550 | 0.0001 | - | | 6.2754 | 78600 | 0.05 | - | | 6.2794 | 78650 | 0.0001 | - | | 6.2834 | 78700 | 0.0001 | - | | 6.2874 | 78750 | 0.0 | - | | 6.2914 | 78800 | 0.0001 | - | | 6.2954 | 78850 | 0.0001 | - | | 6.2994 | 78900 | 0.0 | - | | 6.3034 | 78950 | 0.0246 | - | | 6.3074 | 79000 | 0.0245 | - | | 6.3114 | 79050 | 0.0001 | - | | 6.3154 | 79100 | 0.0 | - | | 6.3194 | 79150 | 0.0 | - | | 6.3234 | 79200 | 0.0002 | - | | 6.3273 | 79250 | 0.0001 | - | | 6.3313 | 79300 | 0.0001 | - | | 6.3353 | 79350 | 0.0 | - | | 6.3393 | 79400 | 0.0001 | - | | 6.3433 | 79450 | 0.0249 | - | | 6.3473 | 79500 | 0.0001 | - | | 6.3513 | 79550 | 0.0001 | - | | 6.3553 | 79600 | 0.0001 | - | | 6.3593 | 79650 | 0.0001 | - | | 6.3633 | 79700 | 0.0 | - | | 6.3673 | 79750 | 0.0247 | - | | 6.3713 | 79800 | 0.0243 | - | | 6.3752 | 79850 | 0.0 | - | | 6.3792 | 79900 | 0.0001 | - | | 6.3832 | 79950 | 0.0001 | - | | 6.3872 | 80000 | 0.0001 | - | | 6.3912 | 80050 | 0.0 | - | | 6.3952 | 80100 | 0.0001 | - | | 6.3992 | 80150 | 0.0 | - | | 6.4032 | 80200 | 0.0249 | - | | 6.4072 | 80250 | 0.0247 | - | | 6.4112 | 80300 | 0.0 | - | | 6.4152 | 80350 | 0.0248 | - | | 6.4192 | 80400 | 0.0001 | - | | 6.4232 | 80450 | 0.0247 | - | | 6.4271 | 80500 | 0.0 | - | | 6.4311 | 80550 | 0.0 | - | | 6.4351 | 80600 | 0.0247 | - | | 6.4391 | 80650 | 0.0246 | - | | 6.4431 | 80700 | 0.0001 | - | | 6.4471 | 80750 | 0.0242 | - | | 6.4511 | 80800 | 0.0 | - | | 6.4551 | 80850 | 0.0001 | - | | 6.4591 | 80900 | 0.0001 | - | | 6.4631 | 80950 | 0.0243 | - | | 6.4671 | 81000 | 0.0001 | - | | 6.4711 | 81050 | 0.0001 | - | | 6.4750 | 81100 | 0.0491 | - | | 6.4790 | 81150 | 0.0 | - | | 6.4830 | 81200 | 0.0001 | - | | 6.4870 | 81250 | 0.0001 | - | | 6.4910 | 81300 | 0.0247 | - | | 6.4950 | 81350 | 0.0 | - | | 6.4990 | 81400 | 0.0001 | - | | 6.5030 | 81450 | 0.0001 | - | | 6.5070 | 81500 | 0.0001 | - | | 6.5110 | 81550 | 0.0001 | - | | 6.5150 | 81600 | 0.0001 | - | | 6.5190 | 81650 | 0.0246 | - | | 6.5230 | 81700 | 0.0246 | - | | 6.5269 | 81750 | 0.0001 | - | | 6.5309 | 81800 | 0.0001 | - | | 6.5349 | 81850 | 0.0 | - | | 6.5389 | 81900 | 0.0 | - | | 6.5429 | 81950 | 0.0247 | - | | 6.5469 | 82000 | 0.0248 | - | | 6.5509 | 82050 | 0.0001 | - | | 6.5549 | 82100 | 0.0 | - | | 6.5589 | 82150 | 0.0001 | - | | 6.5629 | 82200 | 0.0001 | - | | 6.5669 | 82250 | 0.0001 | - | | 6.5709 | 82300 | 0.0 | - | | 6.5749 | 82350 | 0.0001 | - | | 6.5788 | 82400 | 0.0001 | - | | 6.5828 | 82450 | 0.0 | - | | 6.5868 | 82500 | 0.0001 | - | | 6.5908 | 82550 | 0.0001 | - | | 6.5948 | 82600 | 0.0001 | - | | 6.5988 | 82650 | 0.0001 | - | | 6.6028 | 82700 | 0.0001 | - | | 6.6068 | 82750 | 0.0001 | - | | 6.6108 | 82800 | 0.0 | - | | 6.6148 | 82850 | 0.0 | - | | 6.6188 | 82900 | 0.0001 | - | | 6.6228 | 82950 | 0.0244 | - | | 6.6267 | 83000 | 0.0001 | - | | 6.6307 | 83050 | 0.0247 | - | | 6.6347 | 83100 | 0.0 | - | | 6.6387 | 83150 | 0.0001 | - | | 6.6427 | 83200 | 0.0246 | - | | 6.6467 | 83250 | 0.0001 | - | | 6.6507 | 83300 | 0.0001 | - | | 6.6547 | 83350 | 0.0 | - | | 6.6587 | 83400 | 0.0 | - | | 6.6627 | 83450 | 0.0 | - | | 6.6667 | 83500 | 0.0 | - | | 6.6707 | 83550 | 0.0 | - | | 6.6747 | 83600 | 0.0001 | - | | 6.6786 | 83650 | 0.0001 | - | | 6.6826 | 83700 | 0.0 | - | | 6.6866 | 83750 | 0.0001 | - | | 6.6906 | 83800 | 0.0 | - | | 6.6946 | 83850 | 0.0001 | - | | 6.6986 | 83900 | 0.0001 | - | | 6.7026 | 83950 | 0.0001 | - | | 6.7066 | 84000 | 0.025 | - | | 6.7106 | 84050 | 0.0001 | - | | 6.7146 | 84100 | 0.0 | - | | 6.7186 | 84150 | 0.0 | - | | 6.7226 | 84200 | 0.0 | - | | 6.7265 | 84250 | 0.0001 | - | | 6.7305 | 84300 | 0.0246 | - | | 6.7345 | 84350 | 0.0001 | - | | 6.7385 | 84400 | 0.0251 | - | | 6.7425 | 84450 | 0.0 | - | | 6.7465 | 84500 | 0.0 | - | | 6.7505 | 84550 | 0.0001 | - | | 6.7545 | 84600 | 0.0 | - | | 6.7585 | 84650 | 0.0248 | - | | 6.7625 | 84700 | 0.0 | - | | 6.7665 | 84750 | 0.0001 | - | | 6.7705 | 84800 | 0.0 | - | | 6.7745 | 84850 | 0.0247 | - | | 6.7784 | 84900 | 0.0 | - | | 6.7824 | 84950 | 0.0001 | - | | 6.7864 | 85000 | 0.0 | - | | 6.7904 | 85050 | 0.0 | - | | 6.7944 | 85100 | 0.0 | - | | 6.7984 | 85150 | 0.0002 | - | | 6.8024 | 85200 | 0.0 | - | | 6.8064 | 85250 | 0.0001 | - | | 6.8104 | 85300 | 0.0001 | - | | 6.8144 | 85350 | 0.0 | - | | 6.8184 | 85400 | 0.0001 | - | | 6.8224 | 85450 | 0.0001 | - | | 6.8263 | 85500 | 0.0 | - | | 6.8303 | 85550 | 0.0001 | - | | 6.8343 | 85600 | 0.0 | - | | 6.8383 | 85650 | 0.0 | - | | 6.8423 | 85700 | 0.0 | - | | 6.8463 | 85750 | 0.0248 | - | | 6.8503 | 85800 | 0.0 | - | | 6.8543 | 85850 | 0.0001 | - | | 6.8583 | 85900 | 0.0 | - | | 6.8623 | 85950 | 0.0 | - | | 6.8663 | 86000 | 0.0001 | - | | 6.8703 | 86050 | 0.0 | - | | 6.8743 | 86100 | 0.0001 | - | | 6.8782 | 86150 | 0.0001 | - | | 6.8822 | 86200 | 0.0 | - | | 6.8862 | 86250 | 0.0 | - | | 6.8902 | 86300 | 0.0 | - | | 6.8942 | 86350 | 0.0 | - | | 6.8982 | 86400 | 0.0 | - | | 6.9022 | 86450 | 0.0001 | - | | 6.9062 | 86500 | 0.0 | - | | 6.9102 | 86550 | 0.0 | - | | 6.9142 | 86600 | 0.0 | - | | 6.9182 | 86650 | 0.0 | - | | 6.9222 | 86700 | 0.0 | - | | 6.9261 | 86750 | 0.0001 | - | | 6.9301 | 86800 | 0.0246 | - | | 6.9341 | 86850 | 0.025 | - | | 6.9381 | 86900 | 0.0001 | - | | 6.9421 | 86950 | 0.0 | - | | 6.9461 | 87000 | 0.0 | - | | 6.9501 | 87050 | 0.0 | - | | 6.9541 | 87100 | 0.0001 | - | | 6.9581 | 87150 | 0.0001 | - | | 6.9621 | 87200 | 0.0 | - | | 6.9661 | 87250 | 0.0 | - | | 6.9701 | 87300 | 0.0001 | - | | 6.9741 | 87350 | 0.0081 | - | | 6.9780 | 87400 | 0.0 | - | | 6.9820 | 87450 | 0.0469 | - | | 6.9860 | 87500 | 0.0 | - | | 6.9900 | 87550 | 0.0 | - | | 6.9940 | 87600 | 0.0 | - | | 6.9980 | 87650 | 0.0 | - | | **7.0** | **87675** | **-** | **0.039** | | 7.0020 | 87700 | 0.0248 | - | | 7.0060 | 87750 | 0.0246 | - | | 7.0100 | 87800 | 0.0 | - | | 7.0140 | 87850 | 0.0001 | - | | 7.0180 | 87900 | 0.0001 | - | | 7.0220 | 87950 | 0.0 | - | | 7.0259 | 88000 | 0.0 | - | | 7.0299 | 88050 | 0.0001 | - | | 7.0339 | 88100 | 0.0001 | - | | 7.0379 | 88150 | 0.0 | - | | 7.0419 | 88200 | 0.0034 | - | | 7.0459 | 88250 | 0.0001 | - | | 7.0499 | 88300 | 0.004 | - | | 7.0539 | 88350 | 0.0 | - | | 7.0579 | 88400 | 0.0 | - | | 7.0619 | 88450 | 0.0001 | - | | 7.0659 | 88500 | 0.0249 | - | | 7.0699 | 88550 | 0.0 | - | | 7.0739 | 88600 | 0.0 | - | | 7.0778 | 88650 | 0.0001 | - | | 7.0818 | 88700 | 0.0 | - | | 7.0858 | 88750 | 0.0 | - | | 7.0898 | 88800 | 0.0 | - | | 7.0938 | 88850 | 0.0 | - | | 7.0978 | 88900 | 0.0 | - | | 7.1018 | 88950 | 0.0 | - | | 7.1058 | 89000 | 0.0 | - | | 7.1098 | 89050 | 0.0 | - | | 7.1138 | 89100 | 0.0 | - | | 7.1178 | 89150 | 0.0 | - | | 7.1218 | 89200 | 0.0001 | - | | 7.1257 | 89250 | 0.0024 | - | | 7.1297 | 89300 | 0.0 | - | | 7.1337 | 89350 | 0.0001 | - | | 7.1377 | 89400 | 0.0 | - | | 7.1417 | 89450 | 0.0 | - | | 7.1457 | 89500 | 0.0001 | - | | 7.1497 | 89550 | 0.0 | - | | 7.1537 | 89600 | 0.0 | - | | 7.1577 | 89650 | 0.0 | - | | 7.1617 | 89700 | 0.0001 | - | | 7.1657 | 89750 | 0.0001 | - | | 7.1697 | 89800 | 0.0 | - | | 7.1737 | 89850 | 0.0 | - | | 7.1776 | 89900 | 0.0135 | - | | 7.1816 | 89950 | 0.0001 | - | | 7.1856 | 90000 | 0.0043 | - | | 7.1896 | 90050 | 0.0072 | - | | 7.1936 | 90100 | 0.0 | - | | 7.1976 | 90150 | 0.0 | - | | 7.2016 | 90200 | 0.0 | - | | 7.2056 | 90250 | 0.0247 | - | | 7.2096 | 90300 | 0.0 | - | | 7.2136 | 90350 | 0.0 | - | | 7.2176 | 90400 | 0.0 | - | | 7.2216 | 90450 | 0.0 | - | | 7.2255 | 90500 | 0.0001 | - | | 7.2295 | 90550 | 0.0 | - | | 7.2335 | 90600 | 0.0363 | - | | 7.2375 | 90650 | 0.0 | - | | 7.2415 | 90700 | 0.0 | - | | 7.2455 | 90750 | 0.0 | - | | 7.2495 | 90800 | 0.0 | - | | 7.2535 | 90850 | 0.0 | - | | 7.2575 | 90900 | 0.0 | - | | 7.2615 | 90950 | 0.0138 | - | | 7.2655 | 91000 | 0.0 | - | | 7.2695 | 91050 | 0.0 | - | | 7.2735 | 91100 | 0.0 | - | | 7.2774 | 91150 | 0.0 | - | | 7.2814 | 91200 | 0.0252 | - | | 7.2854 | 91250 | 0.0 | - | | 7.2894 | 91300 | 0.0 | - | | 7.2934 | 91350 | 0.0 | - | | 7.2974 | 91400 | 0.0105 | - | | 7.3014 | 91450 | 0.0244 | - | | 7.3054 | 91500 | 0.0 | - | | 7.3094 | 91550 | 0.0 | - | | 7.3134 | 91600 | 0.0247 | - | | 7.3174 | 91650 | 0.0 | - | | 7.3214 | 91700 | 0.0001 | - | | 7.3253 | 91750 | 0.0 | - | | 7.3293 | 91800 | 0.0 | - | | 7.3333 | 91850 | 0.0189 | - | | 7.3373 | 91900 | 0.0 | - | | 7.3413 | 91950 | 0.0246 | - | | 7.3453 | 92000 | 0.0 | - | | 7.3493 | 92050 | 0.0 | - | | 7.3533 | 92100 | 0.0 | - | | 7.3573 | 92150 | 0.0 | - | | 7.3613 | 92200 | 0.0 | - | | 7.3653 | 92250 | 0.0247 | - | | 7.3693 | 92300 | 0.0122 | - | | 7.3733 | 92350 | 0.0 | - | | 7.3772 | 92400 | 0.0 | - | | 7.3812 | 92450 | 0.0022 | - | | 7.3852 | 92500 | 0.0 | - | | 7.3892 | 92550 | 0.0001 | - | | 7.3932 | 92600 | 0.0 | - | | 7.3972 | 92650 | 0.0 | - | | 7.4012 | 92700 | 0.0 | - | | 7.4052 | 92750 | 0.0032 | - | | 7.4092 | 92800 | 0.0001 | - | | 7.4132 | 92850 | 0.0037 | - | | 7.4172 | 92900 | 0.0001 | - | | 7.4212 | 92950 | 0.0028 | - | | 7.4251 | 93000 | 0.0001 | - | | 7.4291 | 93050 | 0.0 | - | | 7.4331 | 93100 | 0.0039 | - | | 7.4371 | 93150 | 0.0036 | - | | 7.4411 | 93200 | 0.0 | - | | 7.4451 | 93250 | 0.0 | - | | 7.4491 | 93300 | 0.0 | - | | 7.4531 | 93350 | 0.0 | - | | 7.4571 | 93400 | 0.0001 | - | | 7.4611 | 93450 | 0.0091 | - | | 7.4651 | 93500 | 0.0 | - | | 7.4691 | 93550 | 0.0 | - | | 7.4731 | 93600 | 0.0275 | - | | 7.4770 | 93650 | 0.0 | - | | 7.4810 | 93700 | 0.0 | - | | 7.4850 | 93750 | 0.0035 | - | | 7.4890 | 93800 | 0.0246 | - | | 7.4930 | 93850 | 0.0025 | - | | 7.4970 | 93900 | 0.0 | - | | 7.5010 | 93950 | 0.0 | - | | 7.5050 | 94000 | 0.0 | - | | 7.5090 | 94050 | 0.0 | - | | 7.5130 | 94100 | 0.0 | - | | 7.5170 | 94150 | 0.0 | - | | 7.5210 | 94200 | 0.0023 | - | | 7.5250 | 94250 | 0.0 | - | | 7.5289 | 94300 | 0.0 | - | | 7.5329 | 94350 | 0.0 | - | | 7.5369 | 94400 | 0.0 | - | | 7.5409 | 94450 | 0.0027 | - | | 7.5449 | 94500 | 0.0028 | - | | 7.5489 | 94550 | 0.0247 | - | | 7.5529 | 94600 | 0.0 | - | | 7.5569 | 94650 | 0.0 | - | | 7.5609 | 94700 | 0.0 | - | | 7.5649 | 94750 | 0.0036 | - | | 7.5689 | 94800 | 0.0 | - | | 7.5729 | 94850 | 0.0 | - | | 7.5768 | 94900 | 0.0 | - | | 7.5808 | 94950 | 0.0 | - | | 7.5848 | 95000 | 0.0 | - | | 7.5888 | 95050 | 0.0 | - | | 7.5928 | 95100 | 0.0001 | - | | 7.5968 | 95150 | 0.0 | - | | 7.6008 | 95200 | 0.0 | - | | 7.6048 | 95250 | 0.0001 | - | | 7.6088 | 95300 | 0.0 | - | | 7.6128 | 95350 | 0.0 | - | | 7.6168 | 95400 | 0.0028 | - | | 7.6208 | 95450 | 0.0119 | - | | 7.6248 | 95500 | 0.0028 | - | | 7.6287 | 95550 | 0.0 | - | | 7.6327 | 95600 | 0.0001 | - | | 7.6367 | 95650 | 0.0 | - | | 7.6407 | 95700 | 0.0318 | - | | 7.6447 | 95750 | 0.0037 | - | | 7.6487 | 95800 | 0.0035 | - | | 7.6527 | 95850 | 0.0089 | - | | 7.6567 | 95900 | 0.0 | - | | 7.6607 | 95950 | 0.006 | - | | 7.6647 | 96000 | 0.0 | - | | 7.6687 | 96050 | 0.0 | - | | 7.6727 | 96100 | 0.0 | - | | 7.6766 | 96150 | 0.0 | - | | 7.6806 | 96200 | 0.0 | - | | 7.6846 | 96250 | 0.0 | - | | 7.6886 | 96300 | 0.0105 | - | | 7.6926 | 96350 | 0.0 | - | | 7.6966 | 96400 | 0.0 | - | | 7.7006 | 96450 | 0.0 | - | | 7.7046 | 96500 | 0.0 | - | | 7.7086 | 96550 | 0.0 | - | | 7.7126 | 96600 | 0.0 | - | | 7.7166 | 96650 | 0.0024 | - | | 7.7206 | 96700 | 0.0001 | - | | 7.7246 | 96750 | 0.0 | - | | 7.7285 | 96800 | 0.0123 | - | | 7.7325 | 96850 | 0.0 | - | | 7.7365 | 96900 | 0.0031 | - | | 7.7405 | 96950 | 0.0 | - | | 7.7445 | 97000 | 0.0025 | - | | 7.7485 | 97050 | 0.0 | - | | 7.7525 | 97100 | 0.0 | - | | 7.7565 | 97150 | 0.0 | - | | 7.7605 | 97200 | 0.0022 | - | | 7.7645 | 97250 | 0.0251 | - | | 7.7685 | 97300 | 0.002 | - | | 7.7725 | 97350 | 0.0118 | - | | 7.7764 | 97400 | 0.0019 | - | | 7.7804 | 97450 | 0.0001 | - | | 7.7844 | 97500 | 0.0123 | - | | 7.7884 | 97550 | 0.0 | - | | 7.7924 | 97600 | 0.0 | - | | 7.7964 | 97650 | 0.0 | - | | 7.8004 | 97700 | 0.0097 | - | | 7.8044 | 97750 | 0.0 | - | | 7.8084 | 97800 | 0.0 | - | | 7.8124 | 97850 | 0.0 | - | | 7.8164 | 97900 | 0.0001 | - | | 7.8204 | 97950 | 0.0001 | - | | 7.8244 | 98000 | 0.0251 | - | | 7.8283 | 98050 | 0.0 | - | | 7.8323 | 98100 | 0.009 | - | | 7.8363 | 98150 | 0.0246 | - | | 7.8403 | 98200 | 0.0 | - | | 7.8443 | 98250 | 0.0 | - | | 7.8483 | 98300 | 0.0 | - | | 7.8523 | 98350 | 0.0 | - | | 7.8563 | 98400 | 0.0 | - | | 7.8603 | 98450 | 0.0001 | - | | 7.8643 | 98500 | 0.0 | - | | 7.8683 | 98550 | 0.0018 | - | | 7.8723 | 98600 | 0.0 | - | | 7.8762 | 98650 | 0.0001 | - | | 7.8802 | 98700 | 0.0 | - | | 7.8842 | 98750 | 0.0 | - | | 7.8882 | 98800 | 0.0024 | - | | 7.8922 | 98850 | 0.0 | - | | 7.8962 | 98900 | 0.0 | - | | 7.9002 | 98950 | 0.0 | - | | 7.9042 | 99000 | 0.0027 | - | | 7.9082 | 99050 | 0.0027 | - | | 7.9122 | 99100 | 0.0 | - | | 7.9162 | 99150 | 0.0 | - | | 7.9202 | 99200 | 0.0 | - | | 7.9242 | 99250 | 0.0 | - | | 7.9281 | 99300 | 0.0138 | - | | 7.9321 | 99350 | 0.0 | - | | 7.9361 | 99400 | 0.0 | - | | 7.9401 | 99450 | 0.0 | - | | 7.9441 | 99500 | 0.0001 | - | | 7.9481 | 99550 | 0.0019 | - | | 7.9521 | 99600 | 0.0 | - | | 7.9561 | 99650 | 0.0 | - | | 7.9601 | 99700 | 0.0001 | - | | 7.9641 | 99750 | 0.0 | - | | 7.9681 | 99800 | 0.0 | - | | 7.9721 | 99850 | 0.0016 | - | | 7.9760 | 99900 | 0.0001 | - | | 7.9800 | 99950 | 0.0265 | - | | 7.9840 | 100000 | 0.0 | - | | 7.9880 | 100050 | 0.0 | - | | 7.9920 | 100100 | 0.0 | - | | 7.9960 | 100150 | 0.0127 | - | | 8.0 | 100200 | 0.0001 | 0.0405 | | 8.0040 | 100250 | 0.0247 | - | | 8.0080 | 100300 | 0.025 | - | | 8.0120 | 100350 | 0.0 | - | | 8.0160 | 100400 | 0.0072 | - | | 8.0200 | 100450 | 0.0 | - | | 8.0240 | 100500 | 0.0 | - | | 8.0279 | 100550 | 0.0 | - | | 8.0319 | 100600 | 0.0 | - | | 8.0359 | 100650 | 0.0251 | - | | 8.0399 | 100700 | 0.0 | - | | 8.0439 | 100750 | 0.0 | - | | 8.0479 | 100800 | 0.0042 | - | | 8.0519 | 100850 | 0.0036 | - | | 8.0559 | 100900 | 0.0 | - | | 8.0599 | 100950 | 0.0 | - | | 8.0639 | 101000 | 0.0 | - | | 8.0679 | 101050 | 0.0001 | - | | 8.0719 | 101100 | 0.0 | - | | 8.0758 | 101150 | 0.0 | - | | 8.0798 | 101200 | 0.0116 | - | | 8.0838 | 101250 | 0.0027 | - | | 8.0878 | 101300 | 0.0 | - | | 8.0918 | 101350 | 0.0 | - | | 8.0958 | 101400 | 0.0032 | - | | 8.0998 | 101450 | 0.0 | - | | 8.1038 | 101500 | 0.0 | - | | 8.1078 | 101550 | 0.0 | - | | 8.1118 | 101600 | 0.0097 | - | | 8.1158 | 101650 | 0.0 | - | | 8.1198 | 101700 | 0.0105 | - | | 8.1238 | 101750 | 0.0 | - | | 8.1277 | 101800 | 0.0026 | - | | 8.1317 | 101850 | 0.0 | - | | 8.1357 | 101900 | 0.0 | - | | 8.1397 | 101950 | 0.0 | - | | 8.1437 | 102000 | 0.0 | - | | 8.1477 | 102050 | 0.0 | - | | 8.1517 | 102100 | 0.0 | - | | 8.1557 | 102150 | 0.0 | - | | 8.1597 | 102200 | 0.0 | - | | 8.1637 | 102250 | 0.0 | - | | 8.1677 | 102300 | 0.0 | - | | 8.1717 | 102350 | 0.0 | - | | 8.1756 | 102400 | 0.0028 | - | | 8.1796 | 102450 | 0.0 | - | | 8.1836 | 102500 | 0.0037 | - | | 8.1876 | 102550 | 0.0065 | - | | 8.1916 | 102600 | 0.0 | - | | 8.1956 | 102650 | 0.0001 | - | | 8.1996 | 102700 | 0.0251 | - | | 8.2036 | 102750 | 0.0 | - | | 8.2076 | 102800 | 0.0 | - | | 8.2116 | 102850 | 0.0 | - | | 8.2156 | 102900 | 0.0 | - | | 8.2196 | 102950 | 0.0023 | - | | 8.2236 | 103000 | 0.0023 | - | | 8.2275 | 103050 | 0.0 | - | | 8.2315 | 103100 | 0.0246 | - | | 8.2355 | 103150 | 0.0 | - | | 8.2395 | 103200 | 0.0 | - | | 8.2435 | 103250 | 0.0 | - | | 8.2475 | 103300 | 0.0 | - | | 8.2515 | 103350 | 0.0253 | - | | 8.2555 | 103400 | 0.0 | - | | 8.2595 | 103450 | 0.0148 | - | | 8.2635 | 103500 | 0.0 | - | | 8.2675 | 103550 | 0.0 | - | | 8.2715 | 103600 | 0.0001 | - | | 8.2754 | 103650 | 0.0271 | - | | 8.2794 | 103700 | 0.0 | - | | 8.2834 | 103750 | 0.0 | - | | 8.2874 | 103800 | 0.0 | - | | 8.2914 | 103850 | 0.0 | - | | 8.2954 | 103900 | 0.0108 | - | | 8.2994 | 103950 | 0.0245 | - | | 8.3034 | 104000 | 0.0 | - | | 8.3074 | 104050 | 0.0248 | - | | 8.3114 | 104100 | 0.0 | - | | 8.3154 | 104150 | 0.0 | - | | 8.3194 | 104200 | 0.0 | - | | 8.3234 | 104250 | 0.0001 | - | | 8.3273 | 104300 | 0.0 | - | | 8.3313 | 104350 | 0.0098 | - | | 8.3353 | 104400 | 0.0 | - | | 8.3393 | 104450 | 0.0247 | - | | 8.3433 | 104500 | 0.0001 | - | | 8.3473 | 104550 | 0.0 | - | | 8.3513 | 104600 | 0.0 | - | | 8.3553 | 104650 | 0.0001 | - | | 8.3593 | 104700 | 0.0 | - | | 8.3633 | 104750 | 0.0247 | - | | 8.3673 | 104800 | 0.0 | - | | 8.3713 | 104850 | 0.0001 | - | | 8.3752 | 104900 | 0.0001 | - | | 8.3792 | 104950 | 0.0 | - | | 8.3832 | 105000 | 0.0 | - | | 8.3872 | 105050 | 0.0 | - | | 8.3912 | 105100 | 0.0 | - | | 8.3952 | 105150 | 0.0 | - | | 8.3992 | 105200 | 0.0001 | - | | 8.4032 | 105250 | 0.0 | - | | 8.4072 | 105300 | 0.0001 | - | | 8.4112 | 105350 | 0.0001 | - | | 8.4152 | 105400 | 0.0 | - | | 8.4192 | 105450 | 0.0 | - | | 8.4232 | 105500 | 0.025 | - | | 8.4271 | 105550 | 0.0 | - | | 8.4311 | 105600 | 0.0 | - | | 8.4351 | 105650 | 0.0 | - | | 8.4391 | 105700 | 0.0 | - | | 8.4431 | 105750 | 0.0001 | - | | 8.4471 | 105800 | 0.0 | - | | 8.4511 | 105850 | 0.0 | - | | 8.4551 | 105900 | 0.0001 | - | | 8.4591 | 105950 | 0.0246 | - | | 8.4631 | 106000 | 0.0 | - | | 8.4671 | 106050 | 0.0 | - | | 8.4711 | 106100 | 0.0246 | - | | 8.4750 | 106150 | 0.0001 | - | | 8.4790 | 106200 | 0.0 | - | | 8.4830 | 106250 | 0.0 | - | | 8.4870 | 106300 | 0.0246 | - | | 8.4910 | 106350 | 0.0 | - | | 8.4950 | 106400 | 0.0 | - | | 8.4990 | 106450 | 0.0001 | - | | 8.5030 | 106500 | 0.0001 | - | | 8.5070 | 106550 | 0.0 | - | | 8.5110 | 106600 | 0.0 | - | | 8.5150 | 106650 | 0.0001 | - | | 8.5190 | 106700 | 0.0 | - | | 8.5230 | 106750 | 0.0 | - | | 8.5269 | 106800 | 0.0 | - | | 8.5309 | 106850 | 0.0001 | - | | 8.5349 | 106900 | 0.0 | - | | 8.5389 | 106950 | 0.0 | - | | 8.5429 | 107000 | 0.0001 | - | | 8.5469 | 107050 | 0.0 | - | | 8.5509 | 107100 | 0.0 | - | | 8.5549 | 107150 | 0.0 | - | | 8.5589 | 107200 | 0.0 | - | | 8.5629 | 107250 | 0.0001 | - | | 8.5669 | 107300 | 0.0 | - | | 8.5709 | 107350 | 0.0 | - | | 8.5749 | 107400 | 0.0001 | - | | 8.5788 | 107450 | 0.0251 | - | | 8.5828 | 107500 | 0.0 | - | | 8.5868 | 107550 | 0.0 | - | | 8.5908 | 107600 | 0.0001 | - | | 8.5948 | 107650 | 0.0 | - | | 8.5988 | 107700 | 0.0 | - | | 8.6028 | 107750 | 0.0 | - | | 8.6068 | 107800 | 0.0001 | - | | 8.6108 | 107850 | 0.0 | - | | 8.6148 | 107900 | 0.0 | - | | 8.6188 | 107950 | 0.0245 | - | | 8.6228 | 108000 | 0.0 | - | | 8.6267 | 108050 | 0.0 | - | | 8.6307 | 108100 | 0.0249 | - | | 8.6347 | 108150 | 0.0 | - | | 8.6387 | 108200 | 0.0246 | - | | 8.6427 | 108250 | 0.0 | - | | 8.6467 | 108300 | 0.0001 | - | | 8.6507 | 108350 | 0.0001 | - | | 8.6547 | 108400 | 0.0001 | - | | 8.6587 | 108450 | 0.0 | - | | 8.6627 | 108500 | 0.0 | - | | 8.6667 | 108550 | 0.0 | - | | 8.6707 | 108600 | 0.0 | - | | 8.6747 | 108650 | 0.0 | - | | 8.6786 | 108700 | 0.0 | - | | 8.6826 | 108750 | 0.0 | - | | 8.6866 | 108800 | 0.0001 | - | | 8.6906 | 108850 | 0.0 | - | | 8.6946 | 108900 | 0.0 | - | | 8.6986 | 108950 | 0.0 | - | | 8.7026 | 109000 | 0.0 | - | | 8.7066 | 109050 | 0.0248 | - | | 8.7106 | 109100 | 0.0001 | - | | 8.7146 | 109150 | 0.0 | - | | 8.7186 | 109200 | 0.0 | - | | 8.7226 | 109250 | 0.0 | - | | 8.7265 | 109300 | 0.0246 | - | | 8.7305 | 109350 | 0.0001 | - | | 8.7345 | 109400 | 0.0 | - | | 8.7385 | 109450 | 0.025 | - | | 8.7425 | 109500 | 0.0 | - | | 8.7465 | 109550 | 0.0 | - | | 8.7505 | 109600 | 0.0 | - | | 8.7545 | 109650 | 0.0 | - | | 8.7585 | 109700 | 0.025 | - | | 8.7625 | 109750 | 0.0001 | - | | 8.7665 | 109800 | 0.0001 | - | | 8.7705 | 109850 | 0.0248 | - | | 8.7745 | 109900 | 0.0001 | - | | 8.7784 | 109950 | 0.0 | - | | 8.7824 | 110000 | 0.0 | - | | 8.7864 | 110050 | 0.0 | - | | 8.7904 | 110100 | 0.0 | - | | 8.7944 | 110150 | 0.0 | - | | 8.7984 | 110200 | 0.0001 | - | | 8.8024 | 110250 | 0.0 | - | | 8.8064 | 110300 | 0.0 | - | | 8.8104 | 110350 | 0.0 | - | | 8.8144 | 110400 | 0.0 | - | | 8.8184 | 110450 | 0.0001 | - | | 8.8224 | 110500 | 0.0001 | - | | 8.8263 | 110550 | 0.0 | - | | 8.8303 | 110600 | 0.0001 | - | | 8.8343 | 110650 | 0.0 | - | | 8.8383 | 110700 | 0.0 | - | | 8.8423 | 110750 | 0.0 | - | | 8.8463 | 110800 | 0.0247 | - | | 8.8503 | 110850 | 0.0 | - | | 8.8543 | 110900 | 0.0 | - | | 8.8583 | 110950 | 0.0 | - | | 8.8623 | 111000 | 0.0 | - | | 8.8663 | 111050 | 0.0001 | - | | 8.8703 | 111100 | 0.0 | - | | 8.8743 | 111150 | 0.0001 | - | | 8.8782 | 111200 | 0.0001 | - | | 8.8822 | 111250 | 0.0 | - | | 8.8862 | 111300 | 0.0 | - | | 8.8902 | 111350 | 0.0001 | - | | 8.8942 | 111400 | 0.0 | - | | 8.8982 | 111450 | 0.0 | - | | 8.9022 | 111500 | 0.0 | - | | 8.9062 | 111550 | 0.0 | - | | 8.9102 | 111600 | 0.0 | - | | 8.9142 | 111650 | 0.0 | - | | 8.9182 | 111700 | 0.0 | - | | 8.9222 | 111750 | 0.0 | - | | 8.9261 | 111800 | 0.0247 | - | | 8.9301 | 111850 | 0.0 | - | | 8.9341 | 111900 | 0.0248 | - | | 8.9381 | 111950 | 0.0 | - | | 8.9421 | 112000 | 0.0 | - | | 8.9461 | 112050 | 0.0 | - | | 8.9501 | 112100 | 0.0 | - | | 8.9541 | 112150 | 0.0 | - | | 8.9581 | 112200 | 0.0 | - | | 8.9621 | 112250 | 0.0001 | - | | 8.9661 | 112300 | 0.0 | - | | 8.9701 | 112350 | 0.0001 | - | | 8.9741 | 112400 | 0.0001 | - | | 8.9780 | 112450 | 0.0247 | - | | 8.9820 | 112500 | 0.0496 | - | | 8.9860 | 112550 | 0.0 | - | | 8.9900 | 112600 | 0.0001 | - | | 8.9940 | 112650 | 0.0 | - | | 8.9980 | 112700 | 0.0 | - | | 9.0 | 112725 | - | 0.0579 | | 9.0020 | 112750 | 0.0493 | - | | 9.0060 | 112800 | 0.0 | - | | 9.0100 | 112850 | 0.0001 | - | | 9.0140 | 112900 | 0.0001 | - | | 9.0180 | 112950 | 0.0 | - | | 9.0220 | 113000 | 0.0 | - | | 9.0259 | 113050 | 0.0 | - | | 9.0299 | 113100 | 0.0 | - | | 9.0339 | 113150 | 0.0001 | - | | 9.0379 | 113200 | 0.0 | - | | 9.0419 | 113250 | 0.0 | - | | 9.0459 | 113300 | 0.0 | - | | 9.0499 | 113350 | 0.0 | - | | 9.0539 | 113400 | 0.0 | - | | 9.0579 | 113450 | 0.0 | - | | 9.0619 | 113500 | 0.0 | - | | 9.0659 | 113550 | 0.0246 | - | | 9.0699 | 113600 | 0.0 | - | | 9.0739 | 113650 | 0.0 | - | | 9.0778 | 113700 | 0.0001 | - | | 9.0818 | 113750 | 0.0001 | - | | 9.0858 | 113800 | 0.0 | - | | 9.0898 | 113850 | 0.0001 | - | | 9.0938 | 113900 | 0.0 | - | | 9.0978 | 113950 | 0.0 | - | | 9.1018 | 114000 | 0.0 | - | | 9.1058 | 114050 | 0.0 | - | | 9.1098 | 114100 | 0.0 | - | | 9.1138 | 114150 | 0.0 | - | | 9.1178 | 114200 | 0.0 | - | | 9.1218 | 114250 | 0.0 | - | | 9.1257 | 114300 | 0.0001 | - | | 9.1297 | 114350 | 0.0 | - | | 9.1337 | 114400 | 0.0001 | - | | 9.1377 | 114450 | 0.0 | - | | 9.1417 | 114500 | 0.0 | - | | 9.1457 | 114550 | 0.0001 | - | | 9.1497 | 114600 | 0.0 | - | | 9.1537 | 114650 | 0.0 | - | | 9.1577 | 114700 | 0.0 | - | | 9.1617 | 114750 | 0.0001 | - | | 9.1657 | 114800 | 0.0 | - | | 9.1697 | 114850 | 0.0 | - | | 9.1737 | 114900 | 0.0 | - | | 9.1776 | 114950 | 0.0247 | - | | 9.1816 | 115000 | 0.0001 | - | | 9.1856 | 115050 | 0.0001 | - | | 9.1896 | 115100 | 0.0001 | - | | 9.1936 | 115150 | 0.0 | - | | 9.1976 | 115200 | 0.0 | - | | 9.2016 | 115250 | 0.0 | - | | 9.2056 | 115300 | 0.0247 | - | | 9.2096 | 115350 | 0.0 | - | | 9.2136 | 115400 | 0.0 | - | | 9.2176 | 115450 | 0.0 | - | | 9.2216 | 115500 | 0.0 | - | | 9.2255 | 115550 | 0.0 | - | | 9.2295 | 115600 | 0.0245 | - | | 9.2335 | 115650 | 0.0248 | - | | 9.2375 | 115700 | 0.0 | - | | 9.2415 | 115750 | 0.0001 | - | | 9.2455 | 115800 | 0.0 | - | | 9.2495 | 115850 | 0.0 | - | | 9.2535 | 115900 | 0.0 | - | | 9.2575 | 115950 | 0.0246 | - | | 9.2615 | 116000 | 0.0 | - | | 9.2655 | 116050 | 0.0 | - | | 9.2695 | 116100 | 0.0 | - | | 9.2735 | 116150 | 0.0 | - | | 9.2774 | 116200 | 0.0 | - | | 9.2814 | 116250 | 0.0246 | - | | 9.2854 | 116300 | 0.0 | - | | 9.2894 | 116350 | 0.0 | - | | 9.2934 | 116400 | 0.0247 | - | | 9.2974 | 116450 | 0.0245 | - | | 9.3014 | 116500 | 0.0 | - | | 9.3054 | 116550 | 0.0 | - | | 9.3094 | 116600 | 0.0 | - | | 9.3134 | 116650 | 0.0244 | - | | 9.3174 | 116700 | 0.0001 | - | | 9.3214 | 116750 | 0.0 | - | | 9.3253 | 116800 | 0.0001 | - | | 9.3293 | 116850 | 0.0232 | - | | 9.3333 | 116900 | 0.0192 | - | | 9.3373 | 116950 | 0.0246 | - | | 9.3413 | 117000 | 0.0 | - | | 9.3453 | 117050 | 0.0005 | - | | 9.3493 | 117100 | 0.0007 | - | | 9.3533 | 117150 | 0.0002 | - | | 9.3573 | 117200 | 0.0001 | - | | 9.3613 | 117250 | 0.0244 | - | | 9.3653 | 117300 | 0.0002 | - | | 9.3693 | 117350 | 0.0188 | - | | 9.3733 | 117400 | 0.0001 | - | | 9.3772 | 117450 | 0.0003 | - | | 9.3812 | 117500 | 0.001 | - | | 9.3852 | 117550 | 0.0 | - | | 9.3892 | 117600 | 0.0001 | - | | 9.3932 | 117650 | 0.0001 | - | | 9.3972 | 117700 | 0.0003 | - | | 9.4012 | 117750 | 0.0029 | - | | 9.4052 | 117800 | 0.0003 | - | | 9.4092 | 117850 | 0.0026 | - | | 9.4132 | 117900 | 0.0019 | - | | 9.4172 | 117950 | 0.0002 | - | | 9.4212 | 118000 | 0.0007 | - | | 9.4251 | 118050 | 0.0 | - | | 9.4291 | 118100 | 0.0019 | - | | 9.4331 | 118150 | 0.004 | - | | 9.4371 | 118200 | 0.001 | - | | 9.4411 | 118250 | 0.0016 | - | | 9.4451 | 118300 | 0.0028 | - | | 9.4491 | 118350 | 0.0001 | - | | 9.4531 | 118400 | 0.0 | - | | 9.4571 | 118450 | 0.0105 | - | | 9.4611 | 118500 | 0.0013 | - | | 9.4651 | 118550 | 0.0 | - | | 9.4691 | 118600 | 0.0221 | - | | 9.4731 | 118650 | 0.0001 | - | | 9.4770 | 118700 | 0.0008 | - | | 9.4810 | 118750 | 0.0001 | - | | 9.4850 | 118800 | 0.0214 | - | | 9.4890 | 118850 | 0.0001 | - | | 9.4930 | 118900 | 0.0018 | - | | 9.4970 | 118950 | 0.0011 | - | | 9.5010 | 119000 | 0.0001 | - | | 9.5050 | 119050 | 0.0009 | - | | 9.5090 | 119100 | 0.0004 | - | | 9.5130 | 119150 | 0.0004 | - | | 9.5170 | 119200 | 0.0034 | - | | 9.5210 | 119250 | 0.0016 | - | | 9.5250 | 119300 | 0.0006 | - | | 9.5289 | 119350 | 0.0 | - | | 9.5329 | 119400 | 0.0001 | - | | 9.5369 | 119450 | 0.0041 | - | | 9.5409 | 119500 | 0.0029 | - | | 9.5449 | 119550 | 0.0001 | - | | 9.5489 | 119600 | 0.0189 | - | | 9.5529 | 119650 | 0.0001 | - | | 9.5569 | 119700 | 0.0 | - | | 9.5609 | 119750 | 0.0 | - | | 9.5649 | 119800 | 0.0042 | - | | 9.5689 | 119850 | 0.0009 | - | | 9.5729 | 119900 | 0.0 | - | | 9.5768 | 119950 | 0.0 | - | | 9.5808 | 120000 | 0.0 | - | | 9.5848 | 120050 | 0.0007 | - | | 9.5888 | 120100 | 0.0009 | - | | 9.5928 | 120150 | 0.0006 | - | | 9.5968 | 120200 | 0.0001 | - | | 9.6008 | 120250 | 0.0001 | - | | 9.6048 | 120300 | 0.0007 | - | | 9.6088 | 120350 | 0.0001 | - | | 9.6128 | 120400 | 0.0025 | - | | 9.6168 | 120450 | 0.0136 | - | | 9.6208 | 120500 | 0.0011 | - | | 9.6248 | 120550 | 0.002 | - | | 9.6287 | 120600 | 0.001 | - | | 9.6327 | 120650 | 0.0008 | - | | 9.6367 | 120700 | 0.0298 | - | | 9.6407 | 120750 | 0.009 | - | | 9.6447 | 120800 | 0.0042 | - | | 9.6487 | 120850 | 0.0011 | - | | 9.6527 | 120900 | 0.0089 | - | | 9.6567 | 120950 | 0.0054 | - | | 9.6607 | 121000 | 0.0019 | - | | 9.6647 | 121050 | 0.0006 | - | | 9.6687 | 121100 | 0.0 | - | | 9.6727 | 121150 | 0.0 | - | | 9.6766 | 121200 | 0.0001 | - | | 9.6806 | 121250 | 0.0001 | - | | 9.6846 | 121300 | 0.0 | - | | 9.6886 | 121350 | 0.0128 | - | | 9.6926 | 121400 | 0.0 | - | | 9.6966 | 121450 | 0.0001 | - | | 9.7006 | 121500 | 0.0 | - | | 9.7046 | 121550 | 0.0007 | - | | 9.7086 | 121600 | 0.0001 | - | | 9.7126 | 121650 | 0.0001 | - | | 9.7166 | 121700 | 0.0021 | - | | 9.7206 | 121750 | 0.0001 | - | | 9.7246 | 121800 | 0.0207 | - | | 9.7285 | 121850 | 0.0001 | - | | 9.7325 | 121900 | 0.0032 | - | | 9.7365 | 121950 | 0.0008 | - | | 9.7405 | 122000 | 0.0038 | - | | 9.7445 | 122050 | 0.0005 | - | | 9.7485 | 122100 | 0.0002 | - | | 9.7525 | 122150 | 0.0005 | - | | 9.7565 | 122200 | 0.0043 | - | | 9.7605 | 122250 | 0.0003 | - | | 9.7645 | 122300 | 0.021 | - | | 9.7685 | 122350 | 0.0128 | - | | 9.7725 | 122400 | 0.0032 | - | | 9.7764 | 122450 | 0.0001 | - | | 9.7804 | 122500 | 0.0 | - | | 9.7844 | 122550 | 0.0119 | - | | 9.7884 | 122600 | 0.0 | - | | 9.7924 | 122650 | 0.0 | - | | 9.7964 | 122700 | 0.0 | - | | 9.8004 | 122750 | 0.0092 | - | | 9.8044 | 122800 | 0.0001 | - | | 9.8084 | 122850 | 0.0008 | - | | 9.8124 | 122900 | 0.0009 | - | | 9.8164 | 122950 | 0.0021 | - | | 9.8204 | 123000 | 0.0 | - | | 9.8244 | 123050 | 0.0174 | - | | 9.8283 | 123100 | 0.0001 | - | | 9.8323 | 123150 | 0.0095 | - | | 9.8363 | 123200 | 0.0183 | - | | 9.8403 | 123250 | 0.0001 | - | | 9.8443 | 123300 | 0.0002 | - | | 9.8483 | 123350 | 0.0 | - | | 9.8523 | 123400 | 0.0004 | - | | 9.8563 | 123450 | 0.0 | - | | 9.8603 | 123500 | 0.0001 | - | | 9.8643 | 123550 | 0.0028 | - | | 9.8683 | 123600 | 0.0 | - | | 9.8723 | 123650 | 0.0001 | - | | 9.8762 | 123700 | 0.0 | - | | 9.8802 | 123750 | 0.0004 | - | | 9.8842 | 123800 | 0.0035 | - | | 9.8882 | 123850 | 0.0001 | - | | 9.8922 | 123900 | 0.0 | - | | 9.8962 | 123950 | 0.0001 | - | | 9.9002 | 124000 | 0.0038 | - | | 9.9042 | 124050 | 0.0028 | - | | 9.9082 | 124100 | 0.0002 | - | | 9.9122 | 124150 | 0.0001 | - | | 9.9162 | 124200 | 0.0 | - | | 9.9202 | 124250 | 0.0005 | - | | 9.9242 | 124300 | 0.016 | - | | 9.9281 | 124350 | 0.0001 | - | | 9.9321 | 124400 | 0.0001 | - | | 9.9361 | 124450 | 0.0 | - | | 9.9401 | 124500 | 0.0009 | - | | 9.9441 | 124550 | 0.0 | - | | 9.9481 | 124600 | 0.0015 | - | | 9.9521 | 124650 | 0.0 | - | | 9.9561 | 124700 | 0.0 | - | | 9.9601 | 124750 | 0.0002 | - | | 9.9641 | 124800 | 0.0 | - | | 9.9681 | 124850 | 0.0028 | - | | 9.9721 | 124900 | 0.0004 | - | | 9.9760 | 124950 | 0.014 | - | | 9.9800 | 125000 | 0.0138 | - | | 9.9840 | 125050 | 0.0008 | - | | 9.9880 | 125100 | 0.0001 | - | | 9.9920 | 125150 | 0.0 | - | | 9.9960 | 125200 | 0.0136 | - | | 10.0 | 125250 | 0.0182 | 0.0777 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.14 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.4.0 - Datasets: 2.21.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:-----------|:-----------------------------------------------------------------------------------------------------------------| | subject | <ul><li>'vidourle river (france)'</li><li>'knockout kings 2000 (game)'</li><li>'social practice (art)'</li></ul> | | genre/form | <ul><li>'hadith stories'</li><li>'discographies'</li><li>'dance drama'</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.9556 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("ellarose/test-trainer-alternate") # Run inference preds = model("prison fiction") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 1 | 2.415 | 10 | | Label | Training Sample Count | |:-----------|:----------------------| | subject | 500 | | genre/form | 500 | ### Training Hyperparameters - batch_size: (40, 40) - num_epochs: (10, 10) - max_steps: -1 - sampling_strategy: oversampling - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: True ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:-------:|:---------:|:-------------:|:---------------:| | 0.0001 | 1 | 0.3379 | - | | 0.0040 | 50 | 0.3311 | - | | 0.0080 | 100 | 0.3642 | - | | 0.0120 | 150 | 0.3077 | - | | 0.0160 | 200 | 0.2542 | - | | 0.0200 | 250 | 0.233 | - | | 0.0240 | 300 | 0.23 | - | | 0.0279 | 350 | 0.2247 | - | | 0.0319 | 400 | 0.2009 | - | | 0.0359 | 450 | 0.2354 | - | | 0.0399 | 500 | 0.1823 | - | | 0.0439 | 550 | 0.2048 | - | | 0.0479 | 600 | 0.1546 | - | | 0.0519 | 650 | 0.1363 | - | | 0.0559 | 700 | 0.1031 | - | | 0.0599 | 750 | 0.0668 | - | | 0.0639 | 800 | 0.1156 | - | | 0.0679 | 850 | 0.0435 | - | | 0.0719 | 900 | 0.0495 | - | | 0.0758 | 950 | 0.046 | - | | 0.0798 | 1000 | 0.0424 | - | | 0.0838 | 1050 | 0.1312 | - | | 0.0878 | 1100 | 0.0246 | - | | 0.0918 | 1150 | 0.0273 | - | | 0.0958 | 1200 | 0.0075 | - | | 0.0998 | 1250 | 0.0203 | - | | 0.1038 | 1300 | 0.0073 | - | | 0.1078 | 1350 | 0.0328 | - | | 0.1118 | 1400 | 0.0274 | - | | 0.1158 | 1450 | 0.0042 | - | | 0.1198 | 1500 | 0.0494 | - | | 0.1238 | 1550 | 0.0413 | - | | 0.1277 | 1600 | 0.0036 | - | | 0.1317 | 1650 | 0.0329 | - | | 0.1357 | 1700 | 0.0168 | - | | 0.1397 | 1750 | 0.0028 | - | | 0.1437 | 1800 | 0.0227 | - | | 0.1477 | 1850 | 0.002 | - | | 0.1517 | 1900 | 0.0121 | - | | 0.1557 | 1950 | 0.0018 | - | | 0.1597 | 2000 | 0.0019 | - | | 0.1637 | 2050 | 0.001 | - | | 0.1677 | 2100 | 0.0009 | - | | 0.1717 | 2150 | 0.0012 | - | | 0.1756 | 2200 | 0.0007 | - | | 0.1796 | 2250 | 0.001 | - | | 0.1836 | 2300 | 0.0008 | - | | 0.1876 | 2350 | 0.0009 | - | | 0.1916 | 2400 | 0.001 | - | | 0.1956 | 2450 | 0.0009 | - | | 0.1996 | 2500 | 0.0247 | - | | 0.2036 | 2550 | 0.0007 | - | | 0.2076 | 2600 | 0.0008 | - | | 0.2116 | 2650 | 0.0008 | - | | 0.2156 | 2700 | 0.0006 | - | | 0.2196 | 2750 | 0.0023 | - | | 0.2236 | 2800 | 0.0007 | - | | 0.2275 | 2850 | 0.0004 | - | | 0.2315 | 2900 | 0.0054 | - | | 0.2355 | 2950 | 0.0007 | - | | 0.2395 | 3000 | 0.0004 | - | | 0.2435 | 3050 | 0.0007 | - | | 0.2475 | 3100 | 0.0244 | - | | 0.2515 | 3150 | 0.0243 | - | | 0.2555 | 3200 | 0.0005 | - | | 0.2595 | 3250 | 0.0004 | - | | 0.2635 | 3300 | 0.0004 | - | | 0.2675 | 3350 | 0.0004 | - | | 0.2715 | 3400 | 0.0004 | - | | 0.2754 | 3450 | 0.0482 | - | | 0.2794 | 3500 | 0.0004 | - | | 0.2834 | 3550 | 0.0005 | - | | 0.2874 | 3600 | 0.0005 | - | | 0.2914 | 3650 | 0.0007 | - | | 0.2954 | 3700 | 0.0063 | - | | 0.2994 | 3750 | 0.0043 | - | | 0.3034 | 3800 | 0.0005 | - | | 0.3074 | 3850 | 0.0366 | - | | 0.3114 | 3900 | 0.0245 | - | | 0.3154 | 3950 | 0.0242 | - | | 0.3194 | 4000 | 0.0003 | - | | 0.3234 | 4050 | 0.0007 | - | | 0.3273 | 4100 | 0.0123 | - | | 0.3313 | 4150 | 0.0004 | - | | 0.3353 | 4200 | 0.0007 | - | | 0.3393 | 4250 | 0.0238 | - | | 0.3433 | 4300 | 0.0002 | - | | 0.3473 | 4350 | 0.0238 | - | | 0.3513 | 4400 | 0.0003 | - | | 0.3553 | 4450 | 0.0224 | - | | 0.3593 | 4500 | 0.0006 | - | | 0.3633 | 4550 | 0.0005 | - | | 0.3673 | 4600 | 0.0004 | - | | 0.3713 | 4650 | 0.0025 | - | | 0.3752 | 4700 | 0.0003 | - | | 0.3792 | 4750 | 0.0218 | - | | 0.3832 | 4800 | 0.001 | - | | 0.3872 | 4850 | 0.0004 | - | | 0.3912 | 4900 | 0.0004 | - | | 0.3952 | 4950 | 0.0161 | - | | 0.3992 | 5000 | 0.0008 | - | | 0.4032 | 5050 | 0.0024 | - | | 0.4072 | 5100 | 0.0003 | - | | 0.4112 | 5150 | 0.0002 | - | | 0.4152 | 5200 | 0.0005 | - | | 0.4192 | 5250 | 0.0021 | - | | 0.4232 | 5300 | 0.0235 | - | | 0.4271 | 5350 | 0.0035 | - | | 0.4311 | 5400 | 0.0007 | - | | 0.4351 | 5450 | 0.0007 | - | | 0.4391 | 5500 | 0.0217 | - | | 0.4431 | 5550 | 0.0006 | - | | 0.4471 | 5600 | 0.0054 | - | | 0.4511 | 5650 | 0.002 | - | | 0.4551 | 5700 | 0.0013 | - | | 0.4591 | 5750 | 0.0026 | - | | 0.4631 | 5800 | 0.0051 | - | | 0.4671 | 5850 | 0.0003 | - | | 0.4711 | 5900 | 0.0003 | - | | 0.4750 | 5950 | 0.0119 | - | | 0.4790 | 6000 | 0.0011 | - | | 0.4830 | 6050 | 0.0253 | - | | 0.4870 | 6100 | 0.0244 | - | | 0.4910 | 6150 | 0.0002 | - | | 0.4950 | 6200 | 0.0002 | - | | 0.4990 | 6250 | 0.0002 | - | | 0.5030 | 6300 | 0.0167 | - | | 0.5070 | 6350 | 0.0002 | - | | 0.5110 | 6400 | 0.0003 | - | | 0.5150 | 6450 | 0.0012 | - | | 0.5190 | 6500 | 0.003 | - | | 0.5230 | 6550 | 0.0003 | - | | 0.5269 | 6600 | 0.0003 | - | | 0.5309 | 6650 | 0.0006 | - | | 0.5349 | 6700 | 0.0026 | - | | 0.5389 | 6750 | 0.0004 | - | | 0.5429 | 6800 | 0.0001 | - | | 0.5469 | 6850 | 0.0002 | - | | 0.5509 | 6900 | 0.0003 | - | | 0.5549 | 6950 | 0.0028 | - | | 0.5589 | 7000 | 0.0022 | - | | 0.5629 | 7050 | 0.0007 | - | | 0.5669 | 7100 | 0.0004 | - | | 0.5709 | 7150 | 0.0002 | - | | 0.5749 | 7200 | 0.0001 | - | | 0.5788 | 7250 | 0.0122 | - | | 0.5828 | 7300 | 0.0017 | - | | 0.5868 | 7350 | 0.0001 | - | | 0.5908 | 7400 | 0.0002 | - | | 0.5948 | 7450 | 0.0001 | - | | 0.5988 | 7500 | 0.0003 | - | | 0.6028 | 7550 | 0.0011 | - | | 0.6068 | 7600 | 0.0002 | - | | 0.6108 | 7650 | 0.0003 | - | | 0.6148 | 7700 | 0.0001 | - | | 0.6188 | 7750 | 0.0001 | - | | 0.6228 | 7800 | 0.0001 | - | | 0.6267 | 7850 | 0.0002 | - | | 0.6307 | 7900 | 0.0149 | - | | 0.6347 | 7950 | 0.0106 | - | | 0.6387 | 8000 | 0.0015 | - | | 0.6427 | 8050 | 0.0001 | - | | 0.6467 | 8100 | 0.0009 | - | | 0.6507 | 8150 | 0.0015 | - | | 0.6547 | 8200 | 0.0306 | - | | 0.6587 | 8250 | 0.0054 | - | | 0.6627 | 8300 | 0.0011 | - | | 0.6667 | 8350 | 0.0003 | - | | 0.6707 | 8400 | 0.0001 | - | | 0.6747 | 8450 | 0.0024 | - | | 0.6786 | 8500 | 0.0001 | - | | 0.6826 | 8550 | 0.0001 | - | | 0.6866 | 8600 | 0.0001 | - | | 0.6906 | 8650 | 0.0072 | - | | 0.6946 | 8700 | 0.0001 | - | | 0.6986 | 8750 | 0.0002 | - | | 0.7026 | 8800 | 0.0001 | - | | 0.7066 | 8850 | 0.0243 | - | | 0.7106 | 8900 | 0.0001 | - | | 0.7146 | 8950 | 0.0001 | - | | 0.7186 | 9000 | 0.0001 | - | | 0.7226 | 9050 | 0.0001 | - | | 0.7265 | 9100 | 0.0001 | - | | 0.7305 | 9150 | 0.0001 | - | | 0.7345 | 9200 | 0.0008 | - | | 0.7385 | 9250 | 0.021 | - | | 0.7425 | 9300 | 0.0229 | - | | 0.7465 | 9350 | 0.0001 | - | | 0.7505 | 9400 | 0.002 | - | | 0.7545 | 9450 | 0.0008 | - | | 0.7585 | 9500 | 0.0225 | - | | 0.7625 | 9550 | 0.0001 | - | | 0.7665 | 9600 | 0.0041 | - | | 0.7705 | 9650 | 0.0012 | - | | 0.7745 | 9700 | 0.0034 | - | | 0.7784 | 9750 | 0.0011 | - | | 0.7824 | 9800 | 0.0008 | - | | 0.7864 | 9850 | 0.0101 | - | | 0.7904 | 9900 | 0.0039 | - | | 0.7944 | 9950 | 0.0001 | - | | 0.7984 | 10000 | 0.0005 | - | | 0.8024 | 10050 | 0.0011 | - | | 0.8064 | 10100 | 0.0025 | - | | 0.8104 | 10150 | 0.0001 | - | | 0.8144 | 10200 | 0.0003 | - | | 0.8184 | 10250 | 0.0002 | - | | 0.8224 | 10300 | 0.0002 | - | | 0.8263 | 10350 | 0.0001 | - | | 0.8303 | 10400 | 0.0007 | - | | 0.8343 | 10450 | 0.0005 | - | | 0.8383 | 10500 | 0.0005 | - | | 0.8423 | 10550 | 0.0001 | - | | 0.8463 | 10600 | 0.0206 | - | | 0.8503 | 10650 | 0.0023 | - | | 0.8543 | 10700 | 0.0001 | - | | 0.8583 | 10750 | 0.0001 | - | | 0.8623 | 10800 | 0.0001 | - | | 0.8663 | 10850 | 0.0001 | - | | 0.8703 | 10900 | 0.0001 | - | | 0.8743 | 10950 | 0.0002 | - | | 0.8782 | 11000 | 0.0007 | - | | 0.8822 | 11050 | 0.0025 | - | | 0.8862 | 11100 | 0.0001 | - | | 0.8902 | 11150 | 0.0001 | - | | 0.8942 | 11200 | 0.0001 | - | | 0.8982 | 11250 | 0.0001 | - | | 0.9022 | 11300 | 0.0047 | - | | 0.9062 | 11350 | 0.0001 | - | | 0.9102 | 11400 | 0.0002 | - | | 0.9142 | 11450 | 0.0001 | - | | 0.9182 | 11500 | 0.0013 | - | | 0.9222 | 11550 | 0.0011 | - | | 0.9261 | 11600 | 0.0001 | - | | 0.9301 | 11650 | 0.001 | - | | 0.9341 | 11700 | 0.0145 | - | | 0.9381 | 11750 | 0.0001 | - | | 0.9421 | 11800 | 0.0156 | - | | 0.9461 | 11850 | 0.0001 | - | | 0.9501 | 11900 | 0.0016 | - | | 0.9541 | 11950 | 0.0001 | - | | 0.9581 | 12000 | 0.0011 | - | | 0.9621 | 12050 | 0.002 | - | | 0.9661 | 12100 | 0.0001 | - | | 0.9701 | 12150 | 0.0004 | - | | 0.9741 | 12200 | 0.0007 | - | | 0.9780 | 12250 | 0.0014 | - | | 0.9820 | 12300 | 0.0255 | - | | 0.9860 | 12350 | 0.004 | - | | 0.9900 | 12400 | 0.002 | - | | 0.9940 | 12450 | 0.0146 | - | | 0.9980 | 12500 | 0.0008 | - | | 1.0 | 12525 | - | 0.0588 | | 1.0020 | 12550 | 0.0175 | - | | 1.0060 | 12600 | 0.0001 | - | | 1.0100 | 12650 | 0.0006 | - | | 1.0140 | 12700 | 0.0002 | - | | 1.0180 | 12750 | 0.0149 | - | | 1.0220 | 12800 | 0.0001 | - | | 1.0259 | 12850 | 0.0001 | - | | 1.0299 | 12900 | 0.0001 | - | | 1.0339 | 12950 | 0.003 | - | | 1.0379 | 13000 | 0.0003 | - | | 1.0419 | 13050 | 0.0254 | - | | 1.0459 | 13100 | 0.0001 | - | | 1.0499 | 13150 | 0.0001 | - | | 1.0539 | 13200 | 0.0001 | - | | 1.0579 | 13250 | 0.0001 | - | | 1.0619 | 13300 | 0.0003 | - | | 1.0659 | 13350 | 0.0244 | - | | 1.0699 | 13400 | 0.0001 | - | | 1.0739 | 13450 | 0.0001 | - | | 1.0778 | 13500 | 0.0175 | - | | 1.0818 | 13550 | 0.0002 | - | | 1.0858 | 13600 | 0.0002 | - | | 1.0898 | 13650 | 0.0001 | - | | 1.0938 | 13700 | 0.0001 | - | | 1.0978 | 13750 | 0.0002 | - | | 1.1018 | 13800 | 0.0001 | - | | 1.1058 | 13850 | 0.0001 | - | | 1.1098 | 13900 | 0.0001 | - | | 1.1138 | 13950 | 0.0005 | - | | 1.1178 | 14000 | 0.0001 | - | | 1.1218 | 14050 | 0.0001 | - | | 1.1257 | 14100 | 0.0002 | - | | 1.1297 | 14150 | 0.0001 | - | | 1.1337 | 14200 | 0.0002 | - | | 1.1377 | 14250 | 0.0008 | - | | 1.1417 | 14300 | 0.0001 | - | | 1.1457 | 14350 | 0.0001 | - | | 1.1497 | 14400 | 0.0013 | - | | 1.1537 | 14450 | 0.0001 | - | | 1.1577 | 14500 | 0.0001 | - | | 1.1617 | 14550 | 0.0004 | - | | 1.1657 | 14600 | 0.0001 | - | | 1.1697 | 14650 | 0.0001 | - | | 1.1737 | 14700 | 0.001 | - | | 1.1776 | 14750 | 0.0156 | - | | 1.1816 | 14800 | 0.0001 | - | | 1.1856 | 14850 | 0.0003 | - | | 1.1896 | 14900 | 0.0045 | - | | 1.1936 | 14950 | 0.0011 | - | | 1.1976 | 15000 | 0.0015 | - | | 1.2016 | 15050 | 0.0017 | - | | 1.2056 | 15100 | 0.017 | - | | 1.2096 | 15150 | 0.0001 | - | | 1.2136 | 15200 | 0.0011 | - | | 1.2176 | 15250 | 0.0003 | - | | 1.2216 | 15300 | 0.0001 | - | | 1.2255 | 15350 | 0.0001 | - | | 1.2295 | 15400 | 0.0005 | - | | 1.2335 | 15450 | 0.0144 | - | | 1.2375 | 15500 | 0.0001 | - | | 1.2415 | 15550 | 0.0001 | - | | 1.2455 | 15600 | 0.0191 | - | | 1.2495 | 15650 | 0.0001 | - | | 1.2535 | 15700 | 0.0001 | - | | 1.2575 | 15750 | 0.0001 | - | | 1.2615 | 15800 | 0.0001 | - | | 1.2655 | 15850 | 0.0008 | - | | 1.2695 | 15900 | 0.0005 | - | | 1.2735 | 15950 | 0.013 | - | | 1.2774 | 16000 | 0.0001 | - | | 1.2814 | 16050 | 0.0201 | - | | 1.2854 | 16100 | 0.0008 | - | | 1.2894 | 16150 | 0.0001 | - | | 1.2934 | 16200 | 0.0001 | - | | 1.2974 | 16250 | 0.0001 | - | | 1.3014 | 16300 | 0.0001 | - | | 1.3054 | 16350 | 0.0 | - | | 1.3094 | 16400 | 0.0118 | - | | 1.3134 | 16450 | 0.0253 | - | | 1.3174 | 16500 | 0.0001 | - | | 1.3214 | 16550 | 0.0012 | - | | 1.3253 | 16600 | 0.0017 | - | | 1.3293 | 16650 | 0.0001 | - | | 1.3333 | 16700 | 0.0094 | - | | 1.3373 | 16750 | 0.0001 | - | | 1.3413 | 16800 | 0.0243 | - | | 1.3453 | 16850 | 0.049 | - | | 1.3493 | 16900 | 0.0001 | - | | 1.3533 | 16950 | 0.0247 | - | | 1.3573 | 17000 | 0.0001 | - | | 1.3613 | 17050 | 0.0001 | - | | 1.3653 | 17100 | 0.0001 | - | | 1.3693 | 17150 | 0.0246 | - | | 1.3733 | 17200 | 0.0001 | - | | 1.3772 | 17250 | 0.0245 | - | | 1.3812 | 17300 | 0.0012 | - | | 1.3852 | 17350 | 0.0001 | - | | 1.3892 | 17400 | 0.0001 | - | | 1.3932 | 17450 | 0.0001 | - | | 1.3972 | 17500 | 0.0001 | - | | 1.4012 | 17550 | 0.0001 | - | | 1.4052 | 17600 | 0.0002 | - | | 1.4092 | 17650 | 0.0001 | - | | 1.4132 | 17700 | 0.0001 | - | | 1.4172 | 17750 | 0.0039 | - | | 1.4212 | 17800 | 0.0012 | - | | 1.4251 | 17850 | 0.0043 | - | | 1.4291 | 17900 | 0.0001 | - | | 1.4331 | 17950 | 0.0001 | - | | 1.4371 | 18000 | 0.0001 | - | | 1.4411 | 18050 | 0.0001 | - | | 1.4451 | 18100 | 0.0043 | - | | 1.4491 | 18150 | 0.0023 | - | | 1.4531 | 18200 | 0.0001 | - | | 1.4571 | 18250 | 0.0002 | - | | 1.4611 | 18300 | 0.0001 | - | | 1.4651 | 18350 | 0.0001 | - | | 1.4691 | 18400 | 0.0001 | - | | 1.4731 | 18450 | 0.0099 | - | | 1.4770 | 18500 | 0.0001 | - | | 1.4810 | 18550 | 0.0001 | - | | 1.4850 | 18600 | 0.03 | - | | 1.4890 | 18650 | 0.0001 | - | | 1.4930 | 18700 | 0.0014 | - | | 1.4970 | 18750 | 0.0 | - | | 1.5010 | 18800 | 0.0246 | - | | 1.5050 | 18850 | 0.0001 | - | | 1.5090 | 18900 | 0.0001 | - | | 1.5130 | 18950 | 0.0001 | - | | 1.5170 | 19000 | 0.0001 | - | | 1.5210 | 19050 | 0.0001 | - | | 1.5250 | 19100 | 0.0001 | - | | 1.5289 | 19150 | 0.0001 | - | | 1.5329 | 19200 | 0.0032 | - | | 1.5369 | 19250 | 0.0001 | - | | 1.5409 | 19300 | 0.0001 | - | | 1.5449 | 19350 | 0.0001 | - | | 1.5489 | 19400 | 0.025 | - | | 1.5529 | 19450 | 0.0028 | - | | 1.5569 | 19500 | 0.0028 | - | | 1.5609 | 19550 | 0.0001 | - | | 1.5649 | 19600 | 0.005 | - | | 1.5689 | 19650 | 0.0024 | - | | 1.5729 | 19700 | 0.0001 | - | | 1.5768 | 19750 | 0.0 | - | | 1.5808 | 19800 | 0.0001 | - | | 1.5848 | 19850 | 0.0001 | - | | 1.5888 | 19900 | 0.0001 | - | | 1.5928 | 19950 | 0.0001 | - | | 1.5968 | 20000 | 0.0001 | - | | 1.6008 | 20050 | 0.0001 | - | | 1.6048 | 20100 | 0.0001 | - | | 1.6088 | 20150 | 0.0001 | - | | 1.6128 | 20200 | 0.0 | - | | 1.6168 | 20250 | 0.0001 | - | | 1.6208 | 20300 | 0.0001 | - | | 1.6248 | 20350 | 0.0028 | - | | 1.6287 | 20400 | 0.0019 | - | | 1.6327 | 20450 | 0.0115 | - | | 1.6367 | 20500 | 0.0029 | - | | 1.6407 | 20550 | 0.0035 | - | | 1.6447 | 20600 | 0.0005 | - | | 1.6487 | 20650 | 0.0007 | - | | 1.6527 | 20700 | 0.0374 | - | | 1.6567 | 20750 | 0.0064 | - | | 1.6607 | 20800 | 0.004 | - | | 1.6647 | 20850 | 0.0009 | - | | 1.6687 | 20900 | 0.0 | - | | 1.6727 | 20950 | 0.0017 | - | | 1.6766 | 21000 | 0.0001 | - | | 1.6806 | 21050 | 0.0001 | - | | 1.6846 | 21100 | 0.0001 | - | | 1.6886 | 21150 | 0.0083 | - | | 1.6926 | 21200 | 0.0001 | - | | 1.6966 | 21250 | 0.0001 | - | | 1.7006 | 21300 | 0.0001 | - | | 1.7046 | 21350 | 0.0009 | - | | 1.7086 | 21400 | 0.0001 | - | | 1.7126 | 21450 | 0.0001 | - | | 1.7166 | 21500 | 0.0015 | - | | 1.7206 | 21550 | 0.0001 | - | | 1.7246 | 21600 | 0.0001 | - | | 1.7285 | 21650 | 0.0001 | - | | 1.7325 | 21700 | 0.0007 | - | | 1.7365 | 21750 | 0.0001 | - | | 1.7405 | 21800 | 0.0213 | - | | 1.7445 | 21850 | 0.0007 | - | | 1.7485 | 21900 | 0.0029 | - | | 1.7525 | 21950 | 0.0007 | - | | 1.7565 | 22000 | 0.0025 | - | | 1.7605 | 22050 | 0.0002 | - | | 1.7645 | 22100 | 0.0173 | - | | 1.7685 | 22150 | 0.0012 | - | | 1.7725 | 22200 | 0.0027 | - | | 1.7764 | 22250 | 0.0009 | - | | 1.7804 | 22300 | 0.0013 | - | | 1.7844 | 22350 | 0.024 | - | | 1.7884 | 22400 | 0.0043 | - | | 1.7924 | 22450 | 0.0001 | - | | 1.7964 | 22500 | 0.0001 | - | | 1.8004 | 22550 | 0.0109 | - | | 1.8044 | 22600 | 0.0001 | - | | 1.8084 | 22650 | 0.0002 | - | | 1.8124 | 22700 | 0.0246 | - | | 1.8164 | 22750 | 0.0001 | - | | 1.8204 | 22800 | 0.0002 | - | | 1.8244 | 22850 | 0.0248 | - | | 1.8283 | 22900 | 0.0001 | - | | 1.8323 | 22950 | 0.0119 | - | | 1.8363 | 23000 | 0.0254 | - | | 1.8403 | 23050 | 0.1233 | - | | 1.8443 | 23100 | 0.0003 | - | | 1.8483 | 23150 | 0.0001 | - | | 1.8523 | 23200 | 0.0 | - | | 1.8563 | 23250 | 0.0 | - | | 1.8603 | 23300 | 0.0001 | - | | 1.8643 | 23350 | 0.0001 | - | | 1.8683 | 23400 | 0.0001 | - | | 1.8723 | 23450 | 0.0001 | - | | 1.8762 | 23500 | 0.0001 | - | | 1.8802 | 23550 | 0.0017 | - | | 1.8842 | 23600 | 0.0 | - | | 1.8882 | 23650 | 0.0001 | - | | 1.8922 | 23700 | 0.0 | - | | 1.8962 | 23750 | 0.0001 | - | | 1.9002 | 23800 | 0.0055 | - | | 1.9042 | 23850 | 0.0001 | - | | 1.9082 | 23900 | 0.0003 | - | | 1.9122 | 23950 | 0.0002 | - | | 1.9162 | 24000 | 0.0007 | - | | 1.9202 | 24050 | 0.0011 | - | | 1.9242 | 24100 | 0.0001 | - | | 1.9281 | 24150 | 0.0001 | - | | 1.9321 | 24200 | 0.0001 | - | | 1.9361 | 24250 | 0.0001 | - | | 1.9401 | 24300 | 0.0192 | - | | 1.9441 | 24350 | 0.0001 | - | | 1.9481 | 24400 | 0.0008 | - | | 1.9521 | 24450 | 0.0 | - | | 1.9561 | 24500 | 0.0004 | - | | 1.9601 | 24550 | 0.0001 | - | | 1.9641 | 24600 | 0.0001 | - | | 1.9681 | 24650 | 0.0001 | - | | 1.9721 | 24700 | 0.0001 | - | | 1.9760 | 24750 | 0.0013 | - | | 1.9800 | 24800 | 0.0129 | - | | 1.9840 | 24850 | 0.0024 | - | | 1.9880 | 24900 | 0.0015 | - | | 1.9920 | 24950 | 0.0196 | - | | 1.9960 | 25000 | 0.0164 | - | | 2.0 | 25050 | 0.002 | 0.0769 | | 2.0040 | 25100 | 0.0001 | - | | 2.0080 | 25150 | 0.0156 | - | | 2.0120 | 25200 | 0.0 | - | | 2.0160 | 25250 | 0.0169 | - | | 2.0200 | 25300 | 0.0023 | - | | 2.0240 | 25350 | 0.0001 | - | | 2.0279 | 25400 | 0.0 | - | | 2.0319 | 25450 | 0.001 | - | | 2.0359 | 25500 | 0.0152 | - | | 2.0399 | 25550 | 0.0001 | - | | 2.0439 | 25600 | 0.001 | - | | 2.0479 | 25650 | 0.0001 | - | | 2.0519 | 25700 | 0.002 | - | | 2.0559 | 25750 | 0.0006 | - | | 2.0599 | 25800 | 0.0031 | - | | 2.0639 | 25850 | 0.0016 | - | | 2.0679 | 25900 | 0.0 | - | | 2.0719 | 25950 | 0.0001 | - | | 2.0758 | 26000 | 0.0001 | - | | 2.0798 | 26050 | 0.0159 | - | | 2.0838 | 26100 | 0.0005 | - | | 2.0878 | 26150 | 0.0001 | - | | 2.0918 | 26200 | 0.0001 | - | | 2.0958 | 26250 | 0.0006 | - | | 2.0998 | 26300 | 0.0008 | - | | 2.1038 | 26350 | 0.0 | - | | 2.1078 | 26400 | 0.0 | - | | 2.1118 | 26450 | 0.0183 | - | | 2.1158 | 26500 | 0.0001 | - | | 2.1198 | 26550 | 0.0167 | - | | 2.1238 | 26600 | 0.0001 | - | | 2.1277 | 26650 | 0.0014 | - | | 2.1317 | 26700 | 0.0003 | - | | 2.1357 | 26750 | 0.0014 | - | | 2.1397 | 26800 | 0.0001 | - | | 2.1437 | 26850 | 0.0001 | - | | 2.1477 | 26900 | 0.0011 | - | | 2.1517 | 26950 | 0.0 | - | | 2.1557 | 27000 | 0.0001 | - | | 2.1597 | 27050 | 0.0001 | - | | 2.1637 | 27100 | 0.0007 | - | | 2.1677 | 27150 | 0.0001 | - | | 2.1717 | 27200 | 0.0 | - | | 2.1756 | 27250 | 0.0001 | - | | 2.1796 | 27300 | 0.0005 | - | | 2.1836 | 27350 | 0.0 | - | | 2.1876 | 27400 | 0.0002 | - | | 2.1916 | 27450 | 0.0001 | - | | 2.1956 | 27500 | 0.0001 | - | | 2.1996 | 27550 | 0.025 | - | | 2.2036 | 27600 | 0.0001 | - | | 2.2076 | 27650 | 0.0001 | - | | 2.2116 | 27700 | 0.0001 | - | | 2.2156 | 27750 | 0.0001 | - | | 2.2196 | 27800 | 0.0001 | - | | 2.2236 | 27850 | 0.0001 | - | | 2.2275 | 27900 | 0.0001 | - | | 2.2315 | 27950 | 0.0 | - | | 2.2355 | 28000 | 0.0001 | - | | 2.2395 | 28050 | 0.0001 | - | | 2.2435 | 28100 | 0.0245 | - | | 2.2475 | 28150 | 0.0001 | - | | 2.2515 | 28200 | 0.0251 | - | | 2.2555 | 28250 | 0.0001 | - | | 2.2595 | 28300 | 0.0497 | - | | 2.2635 | 28350 | 0.0002 | - | | 2.2675 | 28400 | 0.0487 | - | | 2.2715 | 28450 | 0.0703 | - | | 2.2754 | 28500 | 0.0248 | - | | 2.2794 | 28550 | 0.0001 | - | | 2.2834 | 28600 | 0.0245 | - | | 2.2874 | 28650 | 0.0004 | - | | 2.2914 | 28700 | 0.0001 | - | | 2.2954 | 28750 | 0.0001 | - | | 2.2994 | 28800 | 0.0 | - | | 2.3034 | 28850 | 0.0002 | - | | 2.3074 | 28900 | 0.0489 | - | | 2.3114 | 28950 | 0.0245 | - | | 2.3154 | 29000 | 0.0002 | - | | 2.3194 | 29050 | 0.0001 | - | | 2.3234 | 29100 | 0.0001 | - | | 2.3273 | 29150 | 0.0247 | - | | 2.3313 | 29200 | 0.0003 | - | | 2.3353 | 29250 | 0.0001 | - | | 2.3393 | 29300 | 0.0001 | - | | 2.3433 | 29350 | 0.0246 | - | | 2.3473 | 29400 | 0.0246 | - | | 2.3513 | 29450 | 0.0246 | - | | 2.3553 | 29500 | 0.0001 | - | | 2.3593 | 29550 | 0.0001 | - | | 2.3633 | 29600 | 0.0001 | - | | 2.3673 | 29650 | 0.0246 | - | | 2.3713 | 29700 | 0.0 | - | | 2.3752 | 29750 | 0.0246 | - | | 2.3792 | 29800 | 0.0001 | - | | 2.3832 | 29850 | 0.0001 | - | | 2.3872 | 29900 | 0.0001 | - | | 2.3912 | 29950 | 0.0002 | - | | 2.3952 | 30000 | 0.0248 | - | | 2.3992 | 30050 | 0.0002 | - | | 2.4032 | 30100 | 0.0001 | - | | 2.4072 | 30150 | 0.0001 | - | | 2.4112 | 30200 | 0.0001 | - | | 2.4152 | 30250 | 0.0001 | - | | 2.4192 | 30300 | 0.0001 | - | | 2.4232 | 30350 | 0.0245 | - | | 2.4271 | 30400 | 0.0001 | - | | 2.4311 | 30450 | 0.0001 | - | | 2.4351 | 30500 | 0.0001 | - | | 2.4391 | 30550 | 0.0 | - | | 2.4431 | 30600 | 0.0001 | - | | 2.4471 | 30650 | 0.0001 | - | | 2.4511 | 30700 | 0.0001 | - | | 2.4551 | 30750 | 0.0001 | - | | 2.4591 | 30800 | 0.0001 | - | | 2.4631 | 30850 | 0.0002 | - | | 2.4671 | 30900 | 0.0001 | - | | 2.4711 | 30950 | 0.0245 | - | | 2.4750 | 31000 | 0.0001 | - | | 2.4790 | 31050 | 0.0249 | - | | 2.4830 | 31100 | 0.0246 | - | | 2.4870 | 31150 | 0.0001 | - | | 2.4910 | 31200 | 0.0246 | - | | 2.4950 | 31250 | 0.0001 | - | | 2.4990 | 31300 | 0.0247 | - | | 2.5030 | 31350 | 0.0001 | - | | 2.5070 | 31400 | 0.0001 | - | | 2.5110 | 31450 | 0.0003 | - | | 2.5150 | 31500 | 0.0002 | - | | 2.5190 | 31550 | 0.0002 | - | | 2.5230 | 31600 | 0.0001 | - | | 2.5269 | 31650 | 0.0001 | - | | 2.5309 | 31700 | 0.0003 | - | | 2.5349 | 31750 | 0.0001 | - | | 2.5389 | 31800 | 0.0001 | - | | 2.5429 | 31850 | 0.0001 | - | | 2.5469 | 31900 | 0.0001 | - | | 2.5509 | 31950 | 0.0493 | - | | 2.5549 | 32000 | 0.0001 | - | | 2.5589 | 32050 | 0.0001 | - | | 2.5629 | 32100 | 0.0003 | - | | 2.5669 | 32150 | 0.0001 | - | | 2.5709 | 32200 | 0.025 | - | | 2.5749 | 32250 | 0.0001 | - | | 2.5788 | 32300 | 0.0249 | - | | 2.5828 | 32350 | 0.0001 | - | | 2.5868 | 32400 | 0.0001 | - | | 2.5908 | 32450 | 0.0001 | - | | 2.5948 | 32500 | 0.0001 | - | | 2.5988 | 32550 | 0.0004 | - | | 2.6028 | 32600 | 0.0001 | - | | 2.6068 | 32650 | 0.0001 | - | | 2.6108 | 32700 | 0.0001 | - | | 2.6148 | 32750 | 0.0001 | - | | 2.6188 | 32800 | 0.0001 | - | | 2.6228 | 32850 | 0.0001 | - | | 2.6267 | 32900 | 0.0001 | - | | 2.6307 | 32950 | 0.0492 | - | | 2.6347 | 33000 | 0.0001 | - | | 2.6387 | 33050 | 0.0001 | - | | 2.6427 | 33100 | 0.0 | - | | 2.6467 | 33150 | 0.0001 | - | | 2.6507 | 33200 | 0.0247 | - | | 2.6547 | 33250 | 0.0001 | - | | 2.6587 | 33300 | 0.0001 | - | | 2.6627 | 33350 | 0.0001 | - | | 2.6667 | 33400 | 0.0001 | - | | 2.6707 | 33450 | 0.0001 | - | | 2.6747 | 33500 | 0.0001 | - | | 2.6786 | 33550 | 0.0001 | - | | 2.6826 | 33600 | 0.0001 | - | | 2.6866 | 33650 | 0.0002 | - | | 2.6906 | 33700 | 0.0001 | - | | 2.6946 | 33750 | 0.0001 | - | | 2.6986 | 33800 | 0.0001 | - | | 2.7026 | 33850 | 0.0001 | - | | 2.7066 | 33900 | 0.0254 | - | | 2.7106 | 33950 | 0.0001 | - | | 2.7146 | 34000 | 0.0001 | - | | 2.7186 | 34050 | 0.0001 | - | | 2.7226 | 34100 | 0.0001 | - | | 2.7265 | 34150 | 0.0001 | - | | 2.7305 | 34200 | 0.0001 | - | | 2.7345 | 34250 | 0.0002 | - | | 2.7385 | 34300 | 0.0498 | - | | 2.7425 | 34350 | 0.0001 | - | | 2.7465 | 34400 | 0.0001 | - | | 2.7505 | 34450 | 0.0001 | - | | 2.7545 | 34500 | 0.0001 | - | | 2.7585 | 34550 | 0.0248 | - | | 2.7625 | 34600 | 0.0 | - | | 2.7665 | 34650 | 0.0001 | - | | 2.7705 | 34700 | 0.0001 | - | | 2.7745 | 34750 | 0.0001 | - | | 2.7784 | 34800 | 0.0001 | - | | 2.7824 | 34850 | 0.0247 | - | | 2.7864 | 34900 | 0.0001 | - | | 2.7904 | 34950 | 0.0001 | - | | 2.7944 | 35000 | 0.0001 | - | | 2.7984 | 35050 | 0.0001 | - | | 2.8024 | 35100 | 0.0001 | - | | 2.8064 | 35150 | 0.0001 | - | | 2.8104 | 35200 | 0.0001 | - | | 2.8144 | 35250 | 0.0001 | - | | 2.8184 | 35300 | 0.0001 | - | | 2.8224 | 35350 | 0.0001 | - | | 2.8263 | 35400 | 0.0 | - | | 2.8303 | 35450 | 0.0001 | - | | 2.8343 | 35500 | 0.0 | - | | 2.8383 | 35550 | 0.0 | - | | 2.8423 | 35600 | 0.0001 | - | | 2.8463 | 35650 | 0.0254 | - | | 2.8503 | 35700 | 0.0001 | - | | 2.8543 | 35750 | 0.0001 | - | | 2.8583 | 35800 | 0.0001 | - | | 2.8623 | 35850 | 0.0 | - | | 2.8663 | 35900 | 0.0001 | - | | 2.8703 | 35950 | 0.0001 | - | | 2.8743 | 36000 | 0.0002 | - | | 2.8782 | 36050 | 0.0001 | - | | 2.8822 | 36100 | 0.0001 | - | | 2.8862 | 36150 | 0.0 | - | | 2.8902 | 36200 | 0.0001 | - | | 2.8942 | 36250 | 0.0001 | - | | 2.8982 | 36300 | 0.0001 | - | | 2.9022 | 36350 | 0.0001 | - | | 2.9062 | 36400 | 0.0001 | - | | 2.9102 | 36450 | 0.0001 | - | | 2.9142 | 36500 | 0.0001 | - | | 2.9182 | 36550 | 0.0001 | - | | 2.9222 | 36600 | 0.0001 | - | | 2.9261 | 36650 | 0.0002 | - | | 2.9301 | 36700 | 0.0001 | - | | 2.9341 | 36750 | 0.0248 | - | | 2.9381 | 36800 | 0.0245 | - | | 2.9421 | 36850 | 0.0001 | - | | 2.9461 | 36900 | 0.0 | - | | 2.9501 | 36950 | 0.0001 | - | | 2.9541 | 37000 | 0.0001 | - | | 2.9581 | 37050 | 0.0001 | - | | 2.9621 | 37100 | 0.0001 | - | | 2.9661 | 37150 | 0.0001 | - | | 2.9701 | 37200 | 0.0001 | - | | 2.9741 | 37250 | 0.0001 | - | | 2.9780 | 37300 | 0.0 | - | | 2.9820 | 37350 | 0.0503 | - | | 2.9860 | 37400 | 0.0001 | - | | 2.9900 | 37450 | 0.0246 | - | | 2.9940 | 37500 | 0.0001 | - | | 2.9980 | 37550 | 0.0001 | - | | 3.0 | 37575 | - | 0.0396 | | 3.0020 | 37600 | 0.0248 | - | | 3.0060 | 37650 | 0.0001 | - | | 3.0100 | 37700 | 0.0001 | - | | 3.0140 | 37750 | 0.0245 | - | | 3.0180 | 37800 | 0.0002 | - | | 3.0220 | 37850 | 0.0 | - | | 3.0259 | 37900 | 0.0001 | - | | 3.0299 | 37950 | 0.0001 | - | | 3.0339 | 38000 | 0.0003 | - | | 3.0379 | 38050 | 0.0001 | - | | 3.0419 | 38100 | 0.0001 | - | | 3.0459 | 38150 | 0.0001 | - | | 3.0499 | 38200 | 0.0001 | - | | 3.0539 | 38250 | 0.0001 | - | | 3.0579 | 38300 | 0.0001 | - | | 3.0619 | 38350 | 0.0002 | - | | 3.0659 | 38400 | 0.0251 | - | | 3.0699 | 38450 | 0.0001 | - | | 3.0739 | 38500 | 0.0001 | - | | 3.0778 | 38550 | 0.0001 | - | | 3.0818 | 38600 | 0.0001 | - | | 3.0858 | 38650 | 0.0001 | - | | 3.0898 | 38700 | 0.0001 | - | | 3.0938 | 38750 | 0.0001 | - | | 3.0978 | 38800 | 0.0001 | - | | 3.1018 | 38850 | 0.0001 | - | | 3.1058 | 38900 | 0.0001 | - | | 3.1098 | 38950 | 0.0 | - | | 3.1138 | 39000 | 0.0001 | - | | 3.1178 | 39050 | 0.0001 | - | | 3.1218 | 39100 | 0.0001 | - | | 3.1257 | 39150 | 0.0001 | - | | 3.1297 | 39200 | 0.0001 | - | | 3.1337 | 39250 | 0.0001 | - | | 3.1377 | 39300 | 0.0 | - | | 3.1417 | 39350 | 0.0001 | - | | 3.1457 | 39400 | 0.0002 | - | | 3.1497 | 39450 | 0.0001 | - | | 3.1537 | 39500 | 0.0002 | - | | 3.1577 | 39550 | 0.0001 | - | | 3.1617 | 39600 | 0.0717 | - | | 3.1657 | 39650 | 0.0001 | - | | 3.1697 | 39700 | 0.0001 | - | | 3.1737 | 39750 | 0.0004 | - | | 3.1776 | 39800 | 0.0244 | - | | 3.1816 | 39850 | 0.0001 | - | | 3.1856 | 39900 | 0.0239 | - | | 3.1896 | 39950 | 0.0245 | - | | 3.1936 | 40000 | 0.0245 | - | | 3.1976 | 40050 | 0.0001 | - | | 3.2016 | 40100 | 0.0184 | - | | 3.2056 | 40150 | 0.0246 | - | | 3.2096 | 40200 | 0.0001 | - | | 3.2136 | 40250 | 0.0001 | - | | 3.2176 | 40300 | 0.0001 | - | | 3.2216 | 40350 | 0.0001 | - | | 3.2255 | 40400 | 0.0001 | - | | 3.2295 | 40450 | 0.0002 | - | | 3.2335 | 40500 | 0.0248 | - | | 3.2375 | 40550 | 0.0001 | - | | 3.2415 | 40600 | 0.0244 | - | | 3.2455 | 40650 | 0.0002 | - | | 3.2495 | 40700 | 0.0001 | - | | 3.2535 | 40750 | 0.0001 | - | | 3.2575 | 40800 | 0.0 | - | | 3.2615 | 40850 | 0.0 | - | | 3.2655 | 40900 | 0.0001 | - | | 3.2695 | 40950 | 0.0247 | - | | 3.2735 | 41000 | 0.0001 | - | | 3.2774 | 41050 | 0.0001 | - | | 3.2814 | 41100 | 0.0246 | - | | 3.2854 | 41150 | 0.0001 | - | | 3.2894 | 41200 | 0.0001 | - | | 3.2934 | 41250 | 0.0001 | - | | 3.2974 | 41300 | 0.0001 | - | | 3.3014 | 41350 | 0.0001 | - | | 3.3054 | 41400 | 0.0246 | - | | 3.3094 | 41450 | 0.0246 | - | | 3.3134 | 41500 | 0.0246 | - | | 3.3174 | 41550 | 0.0001 | - | | 3.3214 | 41600 | 0.0003 | - | | 3.3253 | 41650 | 0.0001 | - | | 3.3293 | 41700 | 0.0001 | - | | 3.3333 | 41750 | 0.025 | - | | 3.3373 | 41800 | 0.0 | - | | 3.3413 | 41850 | 0.0245 | - | | 3.3453 | 41900 | 0.0001 | - | | 3.3493 | 41950 | 0.0246 | - | | 3.3533 | 42000 | 0.0001 | - | | 3.3573 | 42050 | 0.0001 | - | | 3.3613 | 42100 | 0.0001 | - | | 3.3653 | 42150 | 0.0001 | - | | 3.3693 | 42200 | 0.0248 | - | | 3.3733 | 42250 | 0.0245 | - | | 3.3772 | 42300 | 0.0001 | - | | 3.3812 | 42350 | 0.0 | - | | 3.3852 | 42400 | 0.0001 | - | | 3.3892 | 42450 | 0.0001 | - | | 3.3932 | 42500 | 0.0001 | - | | 3.3972 | 42550 | 0.0001 | - | | 3.4012 | 42600 | 0.0001 | - | | 3.4052 | 42650 | 0.0001 | - | | 3.4092 | 42700 | 0.0001 | - | | 3.4132 | 42750 | 0.0001 | - | | 3.4172 | 42800 | 0.0001 | - | | 3.4212 | 42850 | 0.0 | - | | 3.4251 | 42900 | 0.0 | - | | 3.4291 | 42950 | 0.0001 | - | | 3.4331 | 43000 | 0.0001 | - | | 3.4371 | 43050 | 0.0001 | - | | 3.4411 | 43100 | 0.0001 | - | | 3.4451 | 43150 | 0.0002 | - | | 3.4491 | 43200 | 0.0001 | - | | 3.4531 | 43250 | 0.0002 | - | | 3.4571 | 43300 | 0.0001 | - | | 3.4611 | 43350 | 0.0 | - | | 3.4651 | 43400 | 0.0001 | - | | 3.4691 | 43450 | 0.0246 | - | | 3.4731 | 43500 | 0.0001 | - | | 3.4770 | 43550 | 0.0001 | - | | 3.4810 | 43600 | 0.0246 | - | | 3.4850 | 43650 | 0.0001 | - | | 3.4890 | 43700 | 0.0001 | - | | 3.4930 | 43750 | 0.0001 | - | | 3.4970 | 43800 | 0.0245 | - | | 3.5010 | 43850 | 0.0001 | - | | 3.5050 | 43900 | 0.0001 | - | | 3.5090 | 43950 | 0.0001 | - | | 3.5130 | 44000 | 0.0001 | - | | 3.5170 | 44050 | 0.0001 | - | | 3.5210 | 44100 | 0.0001 | - | | 3.5250 | 44150 | 0.0001 | - | | 3.5289 | 44200 | 0.0001 | - | | 3.5329 | 44250 | 0.0001 | - | | 3.5369 | 44300 | 0.0 | - | | 3.5409 | 44350 | 0.0001 | - | | 3.5449 | 44400 | 0.0 | - | | 3.5489 | 44450 | 0.0249 | - | | 3.5529 | 44500 | 0.0 | - | | 3.5569 | 44550 | 0.0002 | - | | 3.5609 | 44600 | 0.0001 | - | | 3.5649 | 44650 | 0.0002 | - | | 3.5689 | 44700 | 0.0001 | - | | 3.5729 | 44750 | 0.0 | - | | 3.5768 | 44800 | 0.0 | - | | 3.5808 | 44850 | 0.0 | - | | 3.5848 | 44900 | 0.0 | - | | 3.5888 | 44950 | 0.0 | - | | 3.5928 | 45000 | 0.0001 | - | | 3.5968 | 45050 | 0.0001 | - | | 3.6008 | 45100 | 0.0001 | - | | 3.6048 | 45150 | 0.0001 | - | | 3.6088 | 45200 | 0.0002 | - | | 3.6128 | 45250 | 0.0 | - | | 3.6168 | 45300 | 0.0001 | - | | 3.6208 | 45350 | 0.0001 | - | | 3.6248 | 45400 | 0.0001 | - | | 3.6287 | 45450 | 0.0244 | - | | 3.6327 | 45500 | 0.0 | - | | 3.6367 | 45550 | 0.0001 | - | | 3.6407 | 45600 | 0.0 | - | | 3.6447 | 45650 | 0.0001 | - | | 3.6487 | 45700 | 0.0243 | - | | 3.6527 | 45750 | 0.0252 | - | | 3.6567 | 45800 | 0.0001 | - | | 3.6607 | 45850 | 0.0001 | - | | 3.6647 | 45900 | 0.0001 | - | | 3.6687 | 45950 | 0.0 | - | | 3.6727 | 46000 | 0.0001 | - | | 3.6766 | 46050 | 0.0001 | - | | 3.6806 | 46100 | 0.0002 | - | | 3.6846 | 46150 | 0.0 | - | | 3.6886 | 46200 | 0.0247 | - | | 3.6926 | 46250 | 0.0 | - | | 3.6966 | 46300 | 0.0001 | - | | 3.7006 | 46350 | 0.0 | - | | 3.7046 | 46400 | 0.0001 | - | | 3.7086 | 46450 | 0.0001 | - | | 3.7126 | 46500 | 0.0001 | - | | 3.7166 | 46550 | 0.0 | - | | 3.7206 | 46600 | 0.0001 | - | | 3.7246 | 46650 | 0.0 | - | | 3.7285 | 46700 | 0.0001 | - | | 3.7325 | 46750 | 0.0001 | - | | 3.7365 | 46800 | 0.0246 | - | | 3.7405 | 46850 | 0.0 | - | | 3.7445 | 46900 | 0.0001 | - | | 3.7485 | 46950 | 0.0001 | - | | 3.7525 | 47000 | 0.0001 | - | | 3.7565 | 47050 | 0.0001 | - | | 3.7605 | 47100 | 0.0001 | - | | 3.7645 | 47150 | 0.025 | - | | 3.7685 | 47200 | 0.0001 | - | | 3.7725 | 47250 | 0.0002 | - | | 3.7764 | 47300 | 0.0001 | - | | 3.7804 | 47350 | 0.0247 | - | | 3.7844 | 47400 | 0.0248 | - | | 3.7884 | 47450 | 0.0001 | - | | 3.7924 | 47500 | 0.0 | - | | 3.7964 | 47550 | 0.0001 | - | | 3.8004 | 47600 | 0.025 | - | | 3.8044 | 47650 | 0.0001 | - | | 3.8084 | 47700 | 0.0001 | - | | 3.8124 | 47750 | 0.0002 | - | | 3.8164 | 47800 | 0.0001 | - | | 3.8204 | 47850 | 0.0001 | - | | 3.8244 | 47900 | 0.0252 | - | | 3.8283 | 47950 | 0.0001 | - | | 3.8323 | 48000 | 0.0254 | - | | 3.8363 | 48050 | 0.0249 | - | | 3.8403 | 48100 | 0.0001 | - | | 3.8443 | 48150 | 0.0001 | - | | 3.8483 | 48200 | 0.0001 | - | | 3.8523 | 48250 | 0.0 | - | | 3.8563 | 48300 | 0.0001 | - | | 3.8603 | 48350 | 0.0001 | - | | 3.8643 | 48400 | 0.0001 | - | | 3.8683 | 48450 | 0.0001 | - | | 3.8723 | 48500 | 0.0001 | - | | 3.8762 | 48550 | 0.0006 | - | | 3.8802 | 48600 | 0.0003 | - | | 3.8842 | 48650 | 0.0 | - | | 3.8882 | 48700 | 0.0003 | - | | 3.8922 | 48750 | 0.0001 | - | | 3.8962 | 48800 | 0.0001 | - | | 3.9002 | 48850 | 0.0001 | - | | 3.9042 | 48900 | 0.0001 | - | | 3.9082 | 48950 | 0.0001 | - | | 3.9122 | 49000 | 0.0001 | - | | 3.9162 | 49050 | 0.0246 | - | | 3.9202 | 49100 | 0.0 | - | | 3.9242 | 49150 | 0.0001 | - | | 3.9281 | 49200 | 0.0001 | - | | 3.9321 | 49250 | 0.0001 | - | | 3.9361 | 49300 | 0.0246 | - | | 3.9401 | 49350 | 0.0 | - | | 3.9441 | 49400 | 0.0001 | - | | 3.9481 | 49450 | 0.0002 | - | | 3.9521 | 49500 | 0.0 | - | | 3.9561 | 49550 | 0.0 | - | | 3.9601 | 49600 | 0.0002 | - | | 3.9641 | 49650 | 0.0248 | - | | 3.9681 | 49700 | 0.0001 | - | | 3.9721 | 49750 | 0.0001 | - | | 3.9760 | 49800 | 0.0001 | - | | 3.9800 | 49850 | 0.0248 | - | | 3.9840 | 49900 | 0.0001 | - | | 3.9880 | 49950 | 0.0245 | - | | 3.9920 | 50000 | 0.0001 | - | | 3.9960 | 50050 | 0.0487 | - | | 4.0 | 50100 | 0.0002 | 0.0927 | | 4.0040 | 50150 | 0.0001 | - | | 4.0080 | 50200 | 0.0251 | - | | 4.0120 | 50250 | 0.0245 | - | | 4.0160 | 50300 | 0.0001 | - | | 4.0200 | 50350 | 0.0001 | - | | 4.0240 | 50400 | 0.0001 | - | | 4.0279 | 50450 | 0.0 | - | | 4.0319 | 50500 | 0.0001 | - | | 4.0359 | 50550 | 0.0255 | - | | 4.0399 | 50600 | 0.0001 | - | | 4.0439 | 50650 | 0.0 | - | | 4.0479 | 50700 | 0.0001 | - | | 4.0519 | 50750 | 0.0001 | - | | 4.0559 | 50800 | 0.0 | - | | 4.0599 | 50850 | 0.0 | - | | 4.0639 | 50900 | 0.0001 | - | | 4.0679 | 50950 | 0.0001 | - | | 4.0719 | 51000 | 0.0001 | - | | 4.0758 | 51050 | 0.0001 | - | | 4.0798 | 51100 | 0.0242 | - | | 4.0838 | 51150 | 0.0001 | - | | 4.0878 | 51200 | 0.0001 | - | | 4.0918 | 51250 | 0.0001 | - | | 4.0958 | 51300 | 0.0 | - | | 4.0998 | 51350 | 0.0001 | - | | 4.1038 | 51400 | 0.0 | - | | 4.1078 | 51450 | 0.0 | - | | 4.1118 | 51500 | 0.0246 | - | | 4.1158 | 51550 | 0.0 | - | | 4.1198 | 51600 | 0.0249 | - | | 4.1238 | 51650 | 0.0001 | - | | 4.1277 | 51700 | 0.0001 | - | | 4.1317 | 51750 | 0.0001 | - | | 4.1357 | 51800 | 0.0001 | - | | 4.1397 | 51850 | 0.0 | - | | 4.1437 | 51900 | 0.0001 | - | | 4.1477 | 51950 | 0.0 | - | | 4.1517 | 52000 | 0.0001 | - | | 4.1557 | 52050 | 0.0001 | - | | 4.1597 | 52100 | 0.0001 | - | | 4.1637 | 52150 | 0.0001 | - | | 4.1677 | 52200 | 0.0001 | - | | 4.1717 | 52250 | 0.0001 | - | | 4.1756 | 52300 | 0.0 | - | | 4.1796 | 52350 | 0.0001 | - | | 4.1836 | 52400 | 0.0001 | - | | 4.1876 | 52450 | 0.0 | - | | 4.1916 | 52500 | 0.0001 | - | | 4.1956 | 52550 | 0.0001 | - | | 4.1996 | 52600 | 0.0252 | - | | 4.2036 | 52650 | 0.0001 | - | | 4.2076 | 52700 | 0.0001 | - | | 4.2116 | 52750 | 0.0001 | - | | 4.2156 | 52800 | 0.0001 | - | | 4.2196 | 52850 | 0.0001 | - | | 4.2236 | 52900 | 0.0001 | - | | 4.2275 | 52950 | 0.0 | - | | 4.2315 | 53000 | 0.0 | - | | 4.2355 | 53050 | 0.0001 | - | | 4.2395 | 53100 | 0.0244 | - | | 4.2435 | 53150 | 0.0001 | - | | 4.2475 | 53200 | 0.0001 | - | | 4.2515 | 53250 | 0.0248 | - | | 4.2555 | 53300 | 0.0001 | - | | 4.2595 | 53350 | 0.0 | - | | 4.2635 | 53400 | 0.0 | - | | 4.2675 | 53450 | 0.0245 | - | | 4.2715 | 53500 | 0.0 | - | | 4.2754 | 53550 | 0.0251 | - | | 4.2794 | 53600 | 0.0 | - | | 4.2834 | 53650 | 0.0001 | - | | 4.2874 | 53700 | 0.0001 | - | | 4.2914 | 53750 | 0.0001 | - | | 4.2954 | 53800 | 0.0 | - | | 4.2994 | 53850 | 0.0 | - | | 4.3034 | 53900 | 0.0247 | - | | 4.3074 | 53950 | 0.049 | - | | 4.3114 | 54000 | 0.0 | - | | 4.3154 | 54050 | 0.0001 | - | | 4.3194 | 54100 | 0.0 | - | | 4.3234 | 54150 | 0.0001 | - | | 4.3273 | 54200 | 0.0001 | - | | 4.3313 | 54250 | 0.0001 | - | | 4.3353 | 54300 | 0.0001 | - | | 4.3393 | 54350 | 0.0243 | - | | 4.3433 | 54400 | 0.0001 | - | | 4.3473 | 54450 | 0.0246 | - | | 4.3513 | 54500 | 0.0 | - | | 4.3553 | 54550 | 0.0001 | - | | 4.3593 | 54600 | 0.0001 | - | | 4.3633 | 54650 | 0.0001 | - | | 4.3673 | 54700 | 0.0 | - | | 4.3713 | 54750 | 0.0246 | - | | 4.3752 | 54800 | 0.0 | - | | 4.3792 | 54850 | 0.0 | - | | 4.3832 | 54900 | 0.0001 | - | | 4.3872 | 54950 | 0.0001 | - | | 4.3912 | 55000 | 0.0001 | - | | 4.3952 | 55050 | 0.0001 | - | | 4.3992 | 55100 | 0.0001 | - | | 4.4032 | 55150 | 0.0 | - | | 4.4072 | 55200 | 0.0001 | - | | 4.4112 | 55250 | 0.0 | - | | 4.4152 | 55300 | 0.0 | - | | 4.4192 | 55350 | 0.0001 | - | | 4.4232 | 55400 | 0.0244 | - | | 4.4271 | 55450 | 0.0 | - | | 4.4311 | 55500 | 0.0 | - | | 4.4351 | 55550 | 0.0001 | - | | 4.4391 | 55600 | 0.0 | - | | 4.4431 | 55650 | 0.0001 | - | | 4.4471 | 55700 | 0.0001 | - | | 4.4511 | 55750 | 0.0959 | - | | 4.4551 | 55800 | 0.0002 | - | | 4.4591 | 55850 | 0.0001 | - | | 4.4631 | 55900 | 0.0001 | - | | 4.4671 | 55950 | 0.0246 | - | | 4.4711 | 56000 | 0.0001 | - | | 4.4750 | 56050 | 0.0001 | - | | 4.4790 | 56100 | 0.0246 | - | | 4.4830 | 56150 | 0.024 | - | | 4.4870 | 56200 | 0.0001 | - | | 4.4910 | 56250 | 0.0001 | - | | 4.4950 | 56300 | 0.0245 | - | | 4.4990 | 56350 | 0.0001 | - | | 4.5030 | 56400 | 0.0001 | - | | 4.5070 | 56450 | 0.0001 | - | | 4.5110 | 56500 | 0.0001 | - | | 4.5150 | 56550 | 0.0001 | - | | 4.5190 | 56600 | 0.0001 | - | | 4.5230 | 56650 | 0.0001 | - | | 4.5269 | 56700 | 0.0 | - | | 4.5309 | 56750 | 0.0002 | - | | 4.5349 | 56800 | 0.0001 | - | | 4.5389 | 56850 | 0.0001 | - | | 4.5429 | 56900 | 0.0001 | - | | 4.5469 | 56950 | 0.0001 | - | | 4.5509 | 57000 | 0.0 | - | | 4.5549 | 57050 | 0.0001 | - | | 4.5589 | 57100 | 0.0001 | - | | 4.5629 | 57150 | 0.0001 | - | | 4.5669 | 57200 | 0.0 | - | | 4.5709 | 57250 | 0.0001 | - | | 4.5749 | 57300 | 0.0001 | - | | 4.5788 | 57350 | 0.0252 | - | | 4.5828 | 57400 | 0.0 | - | | 4.5868 | 57450 | 0.0 | - | | 4.5908 | 57500 | 0.0001 | - | | 4.5948 | 57550 | 0.0001 | - | | 4.5988 | 57600 | 0.0001 | - | | 4.6028 | 57650 | 0.0001 | - | | 4.6068 | 57700 | 0.0 | - | | 4.6108 | 57750 | 0.0 | - | | 4.6148 | 57800 | 0.0001 | - | | 4.6188 | 57850 | 0.0001 | - | | 4.6228 | 57900 | 0.0 | - | | 4.6267 | 57950 | 0.0244 | - | | 4.6307 | 58000 | 0.0416 | - | | 4.6347 | 58050 | 0.0001 | - | | 4.6387 | 58100 | 0.0 | - | | 4.6427 | 58150 | 0.0 | - | | 4.6467 | 58200 | 0.0245 | - | | 4.6507 | 58250 | 0.0001 | - | | 4.6547 | 58300 | 0.0001 | - | | 4.6587 | 58350 | 0.0 | - | | 4.6627 | 58400 | 0.0001 | - | | 4.6667 | 58450 | 0.0001 | - | | 4.6707 | 58500 | 0.0001 | - | | 4.6747 | 58550 | 0.0001 | - | | 4.6786 | 58600 | 0.0001 | - | | 4.6826 | 58650 | 0.0001 | - | | 4.6866 | 58700 | 0.0002 | - | | 4.6906 | 58750 | 0.0 | - | | 4.6946 | 58800 | 0.0001 | - | | 4.6986 | 58850 | 0.0001 | - | | 4.7026 | 58900 | 0.0001 | - | | 4.7066 | 58950 | 0.0253 | - | | 4.7106 | 59000 | 0.0001 | - | | 4.7146 | 59050 | 0.0 | - | | 4.7186 | 59100 | 0.0001 | - | | 4.7226 | 59150 | 0.0 | - | | 4.7265 | 59200 | 0.0001 | - | | 4.7305 | 59250 | 0.0001 | - | | 4.7345 | 59300 | 0.0246 | - | | 4.7385 | 59350 | 0.0252 | - | | 4.7425 | 59400 | 0.0001 | - | | 4.7465 | 59450 | 0.1531 | - | | 4.7505 | 59500 | 0.0001 | - | | 4.7545 | 59550 | 0.0001 | - | | 4.7585 | 59600 | 0.025 | - | | 4.7625 | 59650 | 0.0 | - | | 4.7665 | 59700 | 0.0001 | - | | 4.7705 | 59750 | 0.0001 | - | | 4.7745 | 59800 | 0.0001 | - | | 4.7784 | 59850 | 0.0244 | - | | 4.7824 | 59900 | 0.0009 | - | | 4.7864 | 59950 | 0.0001 | - | | 4.7904 | 60000 | 0.0009 | - | | 4.7944 | 60050 | 0.0015 | - | | 4.7984 | 60100 | 0.0252 | - | | 4.8024 | 60150 | 0.0001 | - | | 4.8064 | 60200 | 0.0245 | - | | 4.8104 | 60250 | 0.0003 | - | | 4.8144 | 60300 | 0.0002 | - | | 4.8184 | 60350 | 0.0001 | - | | 4.8224 | 60400 | 0.0001 | - | | 4.8263 | 60450 | 0.0249 | - | | 4.8303 | 60500 | 0.0002 | - | | 4.8343 | 60550 | 0.0001 | - | | 4.8383 | 60600 | 0.0001 | - | | 4.8423 | 60650 | 0.0001 | - | | 4.8463 | 60700 | 0.0242 | - | | 4.8503 | 60750 | 0.0001 | - | | 4.8543 | 60800 | 0.0002 | - | | 4.8583 | 60850 | 0.0001 | - | | 4.8623 | 60900 | 0.0001 | - | | 4.8663 | 60950 | 0.0005 | - | | 4.8703 | 61000 | 0.0001 | - | | 4.8743 | 61050 | 0.0247 | - | | 4.8782 | 61100 | 0.0001 | - | | 4.8822 | 61150 | 0.0001 | - | | 4.8862 | 61200 | 0.0001 | - | | 4.8902 | 61250 | 0.0001 | - | | 4.8942 | 61300 | 0.0247 | - | | 4.8982 | 61350 | 0.0245 | - | | 4.9022 | 61400 | 0.0001 | - | | 4.9062 | 61450 | 0.0001 | - | | 4.9102 | 61500 | 0.0249 | - | | 4.9142 | 61550 | 0.2375 | - | | 4.9182 | 61600 | 0.0001 | - | | 4.9222 | 61650 | 0.0001 | - | | 4.9261 | 61700 | 0.0002 | - | | 4.9301 | 61750 | 0.0001 | - | | 4.9341 | 61800 | 0.0494 | - | | 4.9381 | 61850 | 0.0001 | - | | 4.9421 | 61900 | 0.0001 | - | | 4.9461 | 61950 | 0.0001 | - | | 4.9501 | 62000 | 0.0001 | - | | 4.9541 | 62050 | 0.0001 | - | | 4.9581 | 62100 | 0.024 | - | | 4.9621 | 62150 | 0.0001 | - | | 4.9661 | 62200 | 0.0001 | - | | 4.9701 | 62250 | 0.0001 | - | | 4.9741 | 62300 | 0.0002 | - | | 4.9780 | 62350 | 0.0001 | - | | 4.9820 | 62400 | 0.0502 | - | | 4.9860 | 62450 | 0.0244 | - | | 4.9900 | 62500 | 0.0001 | - | | 4.9940 | 62550 | 0.0001 | - | | 4.9980 | 62600 | 0.0001 | - | | 5.0 | 62625 | - | 0.1298 | | 5.0020 | 62650 | 0.025 | - | | 5.0060 | 62700 | 0.0001 | - | | 5.0100 | 62750 | 0.0244 | - | | 5.0140 | 62800 | 0.0001 | - | | 5.0180 | 62850 | 0.0001 | - | | 5.0220 | 62900 | 0.0001 | - | | 5.0259 | 62950 | 0.0001 | - | | 5.0299 | 63000 | 0.0001 | - | | 5.0339 | 63050 | 0.0002 | - | | 5.0379 | 63100 | 0.0001 | - | | 5.0419 | 63150 | 0.0001 | - | | 5.0459 | 63200 | 0.0001 | - | | 5.0499 | 63250 | 0.0001 | - | | 5.0539 | 63300 | 0.0001 | - | | 5.0579 | 63350 | 0.0001 | - | | 5.0619 | 63400 | 0.0001 | - | | 5.0659 | 63450 | 0.0249 | - | | 5.0699 | 63500 | 0.0001 | - | | 5.0739 | 63550 | 0.0001 | - | | 5.0778 | 63600 | 0.0002 | - | | 5.0818 | 63650 | 0.0001 | - | | 5.0858 | 63700 | 0.0001 | - | | 5.0898 | 63750 | 0.0001 | - | | 5.0938 | 63800 | 0.0001 | - | | 5.0978 | 63850 | 0.0001 | - | | 5.1018 | 63900 | 0.0001 | - | | 5.1058 | 63950 | 0.0001 | - | | 5.1098 | 64000 | 0.0001 | - | | 5.1138 | 64050 | 0.0001 | - | | 5.1178 | 64100 | 0.0001 | - | | 5.1218 | 64150 | 0.0001 | - | | 5.1257 | 64200 | 0.0001 | - | | 5.1297 | 64250 | 0.0001 | - | | 5.1337 | 64300 | 0.0002 | - | | 5.1377 | 64350 | 0.0001 | - | | 5.1417 | 64400 | 0.0001 | - | | 5.1457 | 64450 | 0.0002 | - | | 5.1497 | 64500 | 0.0001 | - | | 5.1537 | 64550 | 0.0001 | - | | 5.1577 | 64600 | 0.0001 | - | | 5.1617 | 64650 | 0.0003 | - | | 5.1657 | 64700 | 0.0001 | - | | 5.1697 | 64750 | 0.0001 | - | | 5.1737 | 64800 | 0.0001 | - | | 5.1776 | 64850 | 0.0243 | - | | 5.1816 | 64900 | 0.0001 | - | | 5.1856 | 64950 | 0.0003 | - | | 5.1896 | 65000 | 0.0001 | - | | 5.1936 | 65050 | 0.0001 | - | | 5.1976 | 65100 | 0.0001 | - | | 5.2016 | 65150 | 0.0001 | - | | 5.2056 | 65200 | 0.0245 | - | | 5.2096 | 65250 | 0.0001 | - | | 5.2136 | 65300 | 0.0001 | - | | 5.2176 | 65350 | 0.0001 | - | | 5.2216 | 65400 | 0.0001 | - | | 5.2255 | 65450 | 0.0001 | - | | 5.2295 | 65500 | 0.0002 | - | | 5.2335 | 65550 | 0.0248 | - | | 5.2375 | 65600 | 0.0243 | - | | 5.2415 | 65650 | 0.0001 | - | | 5.2455 | 65700 | 0.0002 | - | | 5.2495 | 65750 | 0.0001 | - | | 5.2535 | 65800 | 0.0001 | - | | 5.2575 | 65850 | 0.0 | - | | 5.2615 | 65900 | 0.0001 | - | | 5.2655 | 65950 | 0.0245 | - | | 5.2695 | 66000 | 0.0001 | - | | 5.2735 | 66050 | 0.0001 | - | | 5.2774 | 66100 | 0.0001 | - | | 5.2814 | 66150 | 0.0244 | - | | 5.2854 | 66200 | 0.0001 | - | | 5.2894 | 66250 | 0.0001 | - | | 5.2934 | 66300 | 0.0001 | - | | 5.2974 | 66350 | 0.0001 | - | | 5.3014 | 66400 | 0.0247 | - | | 5.3054 | 66450 | 0.0244 | - | | 5.3094 | 66500 | 0.0001 | - | | 5.3134 | 66550 | 0.0248 | - | | 5.3174 | 66600 | 0.0001 | - | | 5.3214 | 66650 | 0.0003 | - | | 5.3253 | 66700 | 0.0001 | - | | 5.3293 | 66750 | 0.0001 | - | | 5.3333 | 66800 | 0.0249 | - | | 5.3373 | 66850 | 0.0244 | - | | 5.3413 | 66900 | 0.0001 | - | | 5.3453 | 66950 | 0.0246 | - | | 5.3493 | 67000 | 0.0 | - | | 5.3533 | 67050 | 0.0001 | - | | 5.3573 | 67100 | 0.0001 | - | | 5.3613 | 67150 | 0.0001 | - | | 5.3653 | 67200 | 0.0001 | - | | 5.3693 | 67250 | 0.0494 | - | | 5.3733 | 67300 | 0.0001 | - | | 5.3772 | 67350 | 0.0001 | - | | 5.3812 | 67400 | 0.0001 | - | | 5.3852 | 67450 | 0.0001 | - | | 5.3892 | 67500 | 0.0001 | - | | 5.3932 | 67550 | 0.0001 | - | | 5.3972 | 67600 | 0.0001 | - | | 5.4012 | 67650 | 0.0001 | - | | 5.4052 | 67700 | 0.0001 | - | | 5.4092 | 67750 | 0.0001 | - | | 5.4132 | 67800 | 0.0001 | - | | 5.4172 | 67850 | 0.0001 | - | | 5.4212 | 67900 | 0.0 | - | | 5.4251 | 67950 | 0.0 | - | | 5.4291 | 68000 | 0.0001 | - | | 5.4331 | 68050 | 0.0001 | - | | 5.4371 | 68100 | 0.0001 | - | | 5.4411 | 68150 | 0.0001 | - | | 5.4451 | 68200 | 0.0002 | - | | 5.4491 | 68250 | 0.0001 | - | | 5.4531 | 68300 | 0.0001 | - | | 5.4571 | 68350 | 0.0001 | - | | 5.4611 | 68400 | 0.0001 | - | | 5.4651 | 68450 | 0.0245 | - | | 5.4691 | 68500 | 0.0001 | - | | 5.4731 | 68550 | 0.0001 | - | | 5.4770 | 68600 | 0.0245 | - | | 5.4810 | 68650 | 0.0001 | - | | 5.4850 | 68700 | 0.0001 | - | | 5.4890 | 68750 | 0.0001 | - | | 5.4930 | 68800 | 0.0246 | - | | 5.4970 | 68850 | 0.0 | - | | 5.5010 | 68900 | 0.0001 | - | | 5.5050 | 68950 | 0.0001 | - | | 5.5090 | 69000 | 0.0001 | - | | 5.5130 | 69050 | 0.0001 | - | | 5.5170 | 69100 | 0.0001 | - | | 5.5210 | 69150 | 0.0001 | - | | 5.5250 | 69200 | 0.0001 | - | | 5.5289 | 69250 | 0.0001 | - | | 5.5329 | 69300 | 0.0001 | - | | 5.5369 | 69350 | 0.0 | - | | 5.5409 | 69400 | 0.0001 | - | | 5.5449 | 69450 | 0.0001 | - | | 5.5489 | 69500 | 0.0249 | - | | 5.5529 | 69550 | 0.0 | - | | 5.5569 | 69600 | 0.0001 | - | | 5.5609 | 69650 | 0.0001 | - | | 5.5649 | 69700 | 0.0016 | - | | 5.5689 | 69750 | 0.0001 | - | | 5.5729 | 69800 | 0.0 | - | | 5.5768 | 69850 | 0.0 | - | | 5.5808 | 69900 | 0.0 | - | | 5.5848 | 69950 | 0.0 | - | | 5.5888 | 70000 | 0.0 | - | | 5.5928 | 70050 | 0.0001 | - | | 5.5968 | 70100 | 0.0001 | - | | 5.6008 | 70150 | 0.0001 | - | | 5.6048 | 70200 | 0.0001 | - | | 5.6088 | 70250 | 0.0001 | - | | 5.6128 | 70300 | 0.0 | - | | 5.6168 | 70350 | 0.0 | - | | 5.6208 | 70400 | 0.0043 | - | | 5.6248 | 70450 | 0.0151 | - | | 5.6287 | 70500 | 0.0 | - | | 5.6327 | 70550 | 0.0 | - | | 5.6367 | 70600 | 0.0 | - | | 5.6407 | 70650 | 0.0033 | - | | 5.6447 | 70700 | 0.0274 | - | | 5.6487 | 70750 | 0.0031 | - | | 5.6527 | 70800 | 0.0248 | - | | 5.6567 | 70850 | 0.0 | - | | 5.6607 | 70900 | 0.0245 | - | | 5.6647 | 70950 | 0.0248 | - | | 5.6687 | 71000 | 0.0 | - | | 5.6727 | 71050 | 0.0001 | - | | 5.6766 | 71100 | 0.0001 | - | | 5.6806 | 71150 | 0.0001 | - | | 5.6846 | 71200 | 0.0001 | - | | 5.6886 | 71250 | 0.0002 | - | | 5.6926 | 71300 | 0.0 | - | | 5.6966 | 71350 | 0.0001 | - | | 5.7006 | 71400 | 0.0001 | - | | 5.7046 | 71450 | 0.0001 | - | | 5.7086 | 71500 | 0.0001 | - | | 5.7126 | 71550 | 0.0001 | - | | 5.7166 | 71600 | 0.0247 | - | | 5.7206 | 71650 | 0.0001 | - | | 5.7246 | 71700 | 0.0001 | - | | 5.7285 | 71750 | 0.0001 | - | | 5.7325 | 71800 | 0.0001 | - | | 5.7365 | 71850 | 0.0001 | - | | 5.7405 | 71900 | 0.0245 | - | | 5.7445 | 71950 | 0.0001 | - | | 5.7485 | 72000 | 0.0247 | - | | 5.7525 | 72050 | 0.0001 | - | | 5.7565 | 72100 | 0.0 | - | | 5.7605 | 72150 | 0.0001 | - | | 5.7645 | 72200 | 0.05 | - | | 5.7685 | 72250 | 0.0001 | - | | 5.7725 | 72300 | 0.0247 | - | | 5.7764 | 72350 | 0.0002 | - | | 5.7804 | 72400 | 0.0244 | - | | 5.7844 | 72450 | 0.0 | - | | 5.7884 | 72500 | 0.0001 | - | | 5.7924 | 72550 | 0.0001 | - | | 5.7964 | 72600 | 0.0 | - | | 5.8004 | 72650 | 0.0001 | - | | 5.8044 | 72700 | 0.0001 | - | | 5.8084 | 72750 | 0.0001 | - | | 5.8124 | 72800 | 0.0 | - | | 5.8164 | 72850 | 0.0001 | - | | 5.8204 | 72900 | 0.0001 | - | | 5.8244 | 72950 | 0.025 | - | | 5.8283 | 73000 | 0.0001 | - | | 5.8323 | 73050 | 0.0 | - | | 5.8363 | 73100 | 0.0247 | - | | 5.8403 | 73150 | 0.0 | - | | 5.8443 | 73200 | 0.0 | - | | 5.8483 | 73250 | 0.0001 | - | | 5.8523 | 73300 | 0.0 | - | | 5.8563 | 73350 | 0.0 | - | | 5.8603 | 73400 | 0.0001 | - | | 5.8643 | 73450 | 0.0001 | - | | 5.8683 | 73500 | 0.0 | - | | 5.8723 | 73550 | 0.0248 | - | | 5.8762 | 73600 | 0.0001 | - | | 5.8802 | 73650 | 0.0001 | - | | 5.8842 | 73700 | 0.0001 | - | | 5.8882 | 73750 | 0.0001 | - | | 5.8922 | 73800 | 0.0248 | - | | 5.8962 | 73850 | 0.0001 | - | | 5.9002 | 73900 | 0.0001 | - | | 5.9042 | 73950 | 0.0001 | - | | 5.9082 | 74000 | 0.0247 | - | | 5.9122 | 74050 | 0.0246 | - | | 5.9162 | 74100 | 0.0001 | - | | 5.9202 | 74150 | 0.0001 | - | | 5.9242 | 74200 | 0.0 | - | | 5.9281 | 74250 | 0.0001 | - | | 5.9321 | 74300 | 0.0 | - | | 5.9361 | 74350 | 0.0 | - | | 5.9401 | 74400 | 0.0 | - | | 5.9441 | 74450 | 0.0001 | - | | 5.9481 | 74500 | 0.0249 | - | | 5.9521 | 74550 | 0.0 | - | | 5.9561 | 74600 | 0.0002 | - | | 5.9601 | 74650 | 0.0001 | - | | 5.9641 | 74700 | 0.0001 | - | | 5.9681 | 74750 | 0.0 | - | | 5.9721 | 74800 | 0.0 | - | | 5.9760 | 74850 | 0.0246 | - | | 5.9800 | 74900 | 0.0001 | - | | 5.9840 | 74950 | 0.0 | - | | 5.9880 | 75000 | 0.0001 | - | | 5.9920 | 75050 | 0.0 | - | | 5.9960 | 75100 | 0.0001 | - | | 6.0 | 75150 | 0.0001 | 0.0594 | | 6.0040 | 75200 | 0.0001 | - | | 6.0080 | 75250 | 0.0497 | - | | 6.0120 | 75300 | 0.0248 | - | | 6.0160 | 75350 | 0.0495 | - | | 6.0200 | 75400 | 0.0 | - | | 6.0240 | 75450 | 0.0001 | - | | 6.0279 | 75500 | 0.0001 | - | | 6.0319 | 75550 | 0.0001 | - | | 6.0359 | 75600 | 0.0252 | - | | 6.0399 | 75650 | 0.0001 | - | | 6.0439 | 75700 | 0.0001 | - | | 6.0479 | 75750 | 0.0001 | - | | 6.0519 | 75800 | 0.0741 | - | | 6.0559 | 75850 | 0.0 | - | | 6.0599 | 75900 | 0.0 | - | | 6.0639 | 75950 | 0.0001 | - | | 6.0679 | 76000 | 0.0001 | - | | 6.0719 | 76050 | 0.0 | - | | 6.0758 | 76100 | 0.0001 | - | | 6.0798 | 76150 | 0.0001 | - | | 6.0838 | 76200 | 0.0247 | - | | 6.0878 | 76250 | 0.0001 | - | | 6.0918 | 76300 | 0.0001 | - | | 6.0958 | 76350 | 0.0244 | - | | 6.0998 | 76400 | 0.0 | - | | 6.1038 | 76450 | 0.0001 | - | | 6.1078 | 76500 | 0.0001 | - | | 6.1118 | 76550 | 0.0001 | - | | 6.1158 | 76600 | 0.0 | - | | 6.1198 | 76650 | 0.0001 | - | | 6.1238 | 76700 | 0.0 | - | | 6.1277 | 76750 | 0.0245 | - | | 6.1317 | 76800 | 0.0001 | - | | 6.1357 | 76850 | 0.0001 | - | | 6.1397 | 76900 | 0.0001 | - | | 6.1437 | 76950 | 0.0001 | - | | 6.1477 | 77000 | 0.0 | - | | 6.1517 | 77050 | 0.0001 | - | | 6.1557 | 77100 | 0.0001 | - | | 6.1597 | 77150 | 0.0001 | - | | 6.1637 | 77200 | 0.0001 | - | | 6.1677 | 77250 | 0.0 | - | | 6.1717 | 77300 | 0.0001 | - | | 6.1756 | 77350 | 0.0001 | - | | 6.1796 | 77400 | 0.0245 | - | | 6.1836 | 77450 | 0.0 | - | | 6.1876 | 77500 | 0.0496 | - | | 6.1916 | 77550 | 0.0246 | - | | 6.1956 | 77600 | 0.0001 | - | | 6.1996 | 77650 | 0.025 | - | | 6.2036 | 77700 | 0.0001 | - | | 6.2076 | 77750 | 0.0 | - | | 6.2116 | 77800 | 0.0001 | - | | 6.2156 | 77850 | 0.0001 | - | | 6.2196 | 77900 | 0.0248 | - | | 6.2236 | 77950 | 0.0247 | - | | 6.2275 | 78000 | 0.0002 | - | | 6.2315 | 78050 | 0.0001 | - | | 6.2355 | 78100 | 0.0246 | - | | 6.2395 | 78150 | 0.0001 | - | | 6.2435 | 78200 | 0.0001 | - | | 6.2475 | 78250 | 0.0001 | - | | 6.2515 | 78300 | 0.0249 | - | | 6.2555 | 78350 | 0.0001 | - | | 6.2595 | 78400 | 0.0251 | - | | 6.2635 | 78450 | 0.0 | - | | 6.2675 | 78500 | 0.0 | - | | 6.2715 | 78550 | 0.0001 | - | | 6.2754 | 78600 | 0.05 | - | | 6.2794 | 78650 | 0.0001 | - | | 6.2834 | 78700 | 0.0001 | - | | 6.2874 | 78750 | 0.0 | - | | 6.2914 | 78800 | 0.0001 | - | | 6.2954 | 78850 | 0.0001 | - | | 6.2994 | 78900 | 0.0 | - | | 6.3034 | 78950 | 0.0246 | - | | 6.3074 | 79000 | 0.0245 | - | | 6.3114 | 79050 | 0.0001 | - | | 6.3154 | 79100 | 0.0 | - | | 6.3194 | 79150 | 0.0 | - | | 6.3234 | 79200 | 0.0002 | - | | 6.3273 | 79250 | 0.0001 | - | | 6.3313 | 79300 | 0.0001 | - | | 6.3353 | 79350 | 0.0 | - | | 6.3393 | 79400 | 0.0001 | - | | 6.3433 | 79450 | 0.0249 | - | | 6.3473 | 79500 | 0.0001 | - | | 6.3513 | 79550 | 0.0001 | - | | 6.3553 | 79600 | 0.0001 | - | | 6.3593 | 79650 | 0.0001 | - | | 6.3633 | 79700 | 0.0 | - | | 6.3673 | 79750 | 0.0247 | - | | 6.3713 | 79800 | 0.0243 | - | | 6.3752 | 79850 | 0.0 | - | | 6.3792 | 79900 | 0.0001 | - | | 6.3832 | 79950 | 0.0001 | - | | 6.3872 | 80000 | 0.0001 | - | | 6.3912 | 80050 | 0.0 | - | | 6.3952 | 80100 | 0.0001 | - | | 6.3992 | 80150 | 0.0 | - | | 6.4032 | 80200 | 0.0249 | - | | 6.4072 | 80250 | 0.0247 | - | | 6.4112 | 80300 | 0.0 | - | | 6.4152 | 80350 | 0.0248 | - | | 6.4192 | 80400 | 0.0001 | - | | 6.4232 | 80450 | 0.0247 | - | | 6.4271 | 80500 | 0.0 | - | | 6.4311 | 80550 | 0.0 | - | | 6.4351 | 80600 | 0.0247 | - | | 6.4391 | 80650 | 0.0246 | - | | 6.4431 | 80700 | 0.0001 | - | | 6.4471 | 80750 | 0.0242 | - | | 6.4511 | 80800 | 0.0 | - | | 6.4551 | 80850 | 0.0001 | - | | 6.4591 | 80900 | 0.0001 | - | | 6.4631 | 80950 | 0.0243 | - | | 6.4671 | 81000 | 0.0001 | - | | 6.4711 | 81050 | 0.0001 | - | | 6.4750 | 81100 | 0.0491 | - | | 6.4790 | 81150 | 0.0 | - | | 6.4830 | 81200 | 0.0001 | - | | 6.4870 | 81250 | 0.0001 | - | | 6.4910 | 81300 | 0.0247 | - | | 6.4950 | 81350 | 0.0 | - | | 6.4990 | 81400 | 0.0001 | - | | 6.5030 | 81450 | 0.0001 | - | | 6.5070 | 81500 | 0.0001 | - | | 6.5110 | 81550 | 0.0001 | - | | 6.5150 | 81600 | 0.0001 | - | | 6.5190 | 81650 | 0.0246 | - | | 6.5230 | 81700 | 0.0246 | - | | 6.5269 | 81750 | 0.0001 | - | | 6.5309 | 81800 | 0.0001 | - | | 6.5349 | 81850 | 0.0 | - | | 6.5389 | 81900 | 0.0 | - | | 6.5429 | 81950 | 0.0247 | - | | 6.5469 | 82000 | 0.0248 | - | | 6.5509 | 82050 | 0.0001 | - | | 6.5549 | 82100 | 0.0 | - | | 6.5589 | 82150 | 0.0001 | - | | 6.5629 | 82200 | 0.0001 | - | | 6.5669 | 82250 | 0.0001 | - | | 6.5709 | 82300 | 0.0 | - | | 6.5749 | 82350 | 0.0001 | - | | 6.5788 | 82400 | 0.0001 | - | | 6.5828 | 82450 | 0.0 | - | | 6.5868 | 82500 | 0.0001 | - | | 6.5908 | 82550 | 0.0001 | - | | 6.5948 | 82600 | 0.0001 | - | | 6.5988 | 82650 | 0.0001 | - | | 6.6028 | 82700 | 0.0001 | - | | 6.6068 | 82750 | 0.0001 | - | | 6.6108 | 82800 | 0.0 | - | | 6.6148 | 82850 | 0.0 | - | | 6.6188 | 82900 | 0.0001 | - | | 6.6228 | 82950 | 0.0244 | - | | 6.6267 | 83000 | 0.0001 | - | | 6.6307 | 83050 | 0.0247 | - | | 6.6347 | 83100 | 0.0 | - | | 6.6387 | 83150 | 0.0001 | - | | 6.6427 | 83200 | 0.0246 | - | | 6.6467 | 83250 | 0.0001 | - | | 6.6507 | 83300 | 0.0001 | - | | 6.6547 | 83350 | 0.0 | - | | 6.6587 | 83400 | 0.0 | - | | 6.6627 | 83450 | 0.0 | - | | 6.6667 | 83500 | 0.0 | - | | 6.6707 | 83550 | 0.0 | - | | 6.6747 | 83600 | 0.0001 | - | | 6.6786 | 83650 | 0.0001 | - | | 6.6826 | 83700 | 0.0 | - | | 6.6866 | 83750 | 0.0001 | - | | 6.6906 | 83800 | 0.0 | - | | 6.6946 | 83850 | 0.0001 | - | | 6.6986 | 83900 | 0.0001 | - | | 6.7026 | 83950 | 0.0001 | - | | 6.7066 | 84000 | 0.025 | - | | 6.7106 | 84050 | 0.0001 | - | | 6.7146 | 84100 | 0.0 | - | | 6.7186 | 84150 | 0.0 | - | | 6.7226 | 84200 | 0.0 | - | | 6.7265 | 84250 | 0.0001 | - | | 6.7305 | 84300 | 0.0246 | - | | 6.7345 | 84350 | 0.0001 | - | | 6.7385 | 84400 | 0.0251 | - | | 6.7425 | 84450 | 0.0 | - | | 6.7465 | 84500 | 0.0 | - | | 6.7505 | 84550 | 0.0001 | - | | 6.7545 | 84600 | 0.0 | - | | 6.7585 | 84650 | 0.0248 | - | | 6.7625 | 84700 | 0.0 | - | | 6.7665 | 84750 | 0.0001 | - | | 6.7705 | 84800 | 0.0 | - | | 6.7745 | 84850 | 0.0247 | - | | 6.7784 | 84900 | 0.0 | - | | 6.7824 | 84950 | 0.0001 | - | | 6.7864 | 85000 | 0.0 | - | | 6.7904 | 85050 | 0.0 | - | | 6.7944 | 85100 | 0.0 | - | | 6.7984 | 85150 | 0.0002 | - | | 6.8024 | 85200 | 0.0 | - | | 6.8064 | 85250 | 0.0001 | - | | 6.8104 | 85300 | 0.0001 | - | | 6.8144 | 85350 | 0.0 | - | | 6.8184 | 85400 | 0.0001 | - | | 6.8224 | 85450 | 0.0001 | - | | 6.8263 | 85500 | 0.0 | - | | 6.8303 | 85550 | 0.0001 | - | | 6.8343 | 85600 | 0.0 | - | | 6.8383 | 85650 | 0.0 | - | | 6.8423 | 85700 | 0.0 | - | | 6.8463 | 85750 | 0.0248 | - | | 6.8503 | 85800 | 0.0 | - | | 6.8543 | 85850 | 0.0001 | - | | 6.8583 | 85900 | 0.0 | - | | 6.8623 | 85950 | 0.0 | - | | 6.8663 | 86000 | 0.0001 | - | | 6.8703 | 86050 | 0.0 | - | | 6.8743 | 86100 | 0.0001 | - | | 6.8782 | 86150 | 0.0001 | - | | 6.8822 | 86200 | 0.0 | - | | 6.8862 | 86250 | 0.0 | - | | 6.8902 | 86300 | 0.0 | - | | 6.8942 | 86350 | 0.0 | - | | 6.8982 | 86400 | 0.0 | - | | 6.9022 | 86450 | 0.0001 | - | | 6.9062 | 86500 | 0.0 | - | | 6.9102 | 86550 | 0.0 | - | | 6.9142 | 86600 | 0.0 | - | | 6.9182 | 86650 | 0.0 | - | | 6.9222 | 86700 | 0.0 | - | | 6.9261 | 86750 | 0.0001 | - | | 6.9301 | 86800 | 0.0246 | - | | 6.9341 | 86850 | 0.025 | - | | 6.9381 | 86900 | 0.0001 | - | | 6.9421 | 86950 | 0.0 | - | | 6.9461 | 87000 | 0.0 | - | | 6.9501 | 87050 | 0.0 | - | | 6.9541 | 87100 | 0.0001 | - | | 6.9581 | 87150 | 0.0001 | - | | 6.9621 | 87200 | 0.0 | - | | 6.9661 | 87250 | 0.0 | - | | 6.9701 | 87300 | 0.0001 | - | | 6.9741 | 87350 | 0.0081 | - | | 6.9780 | 87400 | 0.0 | - | | 6.9820 | 87450 | 0.0469 | - | | 6.9860 | 87500 | 0.0 | - | | 6.9900 | 87550 | 0.0 | - | | 6.9940 | 87600 | 0.0 | - | | 6.9980 | 87650 | 0.0 | - | | **7.0** | **87675** | **-** | **0.039** | | 7.0020 | 87700 | 0.0248 | - | | 7.0060 | 87750 | 0.0246 | - | | 7.0100 | 87800 | 0.0 | - | | 7.0140 | 87850 | 0.0001 | - | | 7.0180 | 87900 | 0.0001 | - | | 7.0220 | 87950 | 0.0 | - | | 7.0259 | 88000 | 0.0 | - | | 7.0299 | 88050 | 0.0001 | - | | 7.0339 | 88100 | 0.0001 | - | | 7.0379 | 88150 | 0.0 | - | | 7.0419 | 88200 | 0.0034 | - | | 7.0459 | 88250 | 0.0001 | - | | 7.0499 | 88300 | 0.004 | - | | 7.0539 | 88350 | 0.0 | - | | 7.0579 | 88400 | 0.0 | - | | 7.0619 | 88450 | 0.0001 | - | | 7.0659 | 88500 | 0.0249 | - | | 7.0699 | 88550 | 0.0 | - | | 7.0739 | 88600 | 0.0 | - | | 7.0778 | 88650 | 0.0001 | - | | 7.0818 | 88700 | 0.0 | - | | 7.0858 | 88750 | 0.0 | - | | 7.0898 | 88800 | 0.0 | - | | 7.0938 | 88850 | 0.0 | - | | 7.0978 | 88900 | 0.0 | - | | 7.1018 | 88950 | 0.0 | - | | 7.1058 | 89000 | 0.0 | - | | 7.1098 | 89050 | 0.0 | - | | 7.1138 | 89100 | 0.0 | - | | 7.1178 | 89150 | 0.0 | - | | 7.1218 | 89200 | 0.0001 | - | | 7.1257 | 89250 | 0.0024 | - | | 7.1297 | 89300 | 0.0 | - | | 7.1337 | 89350 | 0.0001 | - | | 7.1377 | 89400 | 0.0 | - | | 7.1417 | 89450 | 0.0 | - | | 7.1457 | 89500 | 0.0001 | - | | 7.1497 | 89550 | 0.0 | - | | 7.1537 | 89600 | 0.0 | - | | 7.1577 | 89650 | 0.0 | - | | 7.1617 | 89700 | 0.0001 | - | | 7.1657 | 89750 | 0.0001 | - | | 7.1697 | 89800 | 0.0 | - | | 7.1737 | 89850 | 0.0 | - | | 7.1776 | 89900 | 0.0135 | - | | 7.1816 | 89950 | 0.0001 | - | | 7.1856 | 90000 | 0.0043 | - | | 7.1896 | 90050 | 0.0072 | - | | 7.1936 | 90100 | 0.0 | - | | 7.1976 | 90150 | 0.0 | - | | 7.2016 | 90200 | 0.0 | - | | 7.2056 | 90250 | 0.0247 | - | | 7.2096 | 90300 | 0.0 | - | | 7.2136 | 90350 | 0.0 | - | | 7.2176 | 90400 | 0.0 | - | | 7.2216 | 90450 | 0.0 | - | | 7.2255 | 90500 | 0.0001 | - | | 7.2295 | 90550 | 0.0 | - | | 7.2335 | 90600 | 0.0363 | - | | 7.2375 | 90650 | 0.0 | - | | 7.2415 | 90700 | 0.0 | - | | 7.2455 | 90750 | 0.0 | - | | 7.2495 | 90800 | 0.0 | - | | 7.2535 | 90850 | 0.0 | - | | 7.2575 | 90900 | 0.0 | - | | 7.2615 | 90950 | 0.0138 | - | | 7.2655 | 91000 | 0.0 | - | | 7.2695 | 91050 | 0.0 | - | | 7.2735 | 91100 | 0.0 | - | | 7.2774 | 91150 | 0.0 | - | | 7.2814 | 91200 | 0.0252 | - | | 7.2854 | 91250 | 0.0 | - | | 7.2894 | 91300 | 0.0 | - | | 7.2934 | 91350 | 0.0 | - | | 7.2974 | 91400 | 0.0105 | - | | 7.3014 | 91450 | 0.0244 | - | | 7.3054 | 91500 | 0.0 | - | | 7.3094 | 91550 | 0.0 | - | | 7.3134 | 91600 | 0.0247 | - | | 7.3174 | 91650 | 0.0 | - | | 7.3214 | 91700 | 0.0001 | - | | 7.3253 | 91750 | 0.0 | - | | 7.3293 | 91800 | 0.0 | - | | 7.3333 | 91850 | 0.0189 | - | | 7.3373 | 91900 | 0.0 | - | | 7.3413 | 91950 | 0.0246 | - | | 7.3453 | 92000 | 0.0 | - | | 7.3493 | 92050 | 0.0 | - | | 7.3533 | 92100 | 0.0 | - | | 7.3573 | 92150 | 0.0 | - | | 7.3613 | 92200 | 0.0 | - | | 7.3653 | 92250 | 0.0247 | - | | 7.3693 | 92300 | 0.0122 | - | | 7.3733 | 92350 | 0.0 | - | | 7.3772 | 92400 | 0.0 | - | | 7.3812 | 92450 | 0.0022 | - | | 7.3852 | 92500 | 0.0 | - | | 7.3892 | 92550 | 0.0001 | - | | 7.3932 | 92600 | 0.0 | - | | 7.3972 | 92650 | 0.0 | - | | 7.4012 | 92700 | 0.0 | - | | 7.4052 | 92750 | 0.0032 | - | | 7.4092 | 92800 | 0.0001 | - | | 7.4132 | 92850 | 0.0037 | - | | 7.4172 | 92900 | 0.0001 | - | | 7.4212 | 92950 | 0.0028 | - | | 7.4251 | 93000 | 0.0001 | - | | 7.4291 | 93050 | 0.0 | - | | 7.4331 | 93100 | 0.0039 | - | | 7.4371 | 93150 | 0.0036 | - | | 7.4411 | 93200 | 0.0 | - | | 7.4451 | 93250 | 0.0 | - | | 7.4491 | 93300 | 0.0 | - | | 7.4531 | 93350 | 0.0 | - | | 7.4571 | 93400 | 0.0001 | - | | 7.4611 | 93450 | 0.0091 | - | | 7.4651 | 93500 | 0.0 | - | | 7.4691 | 93550 | 0.0 | - | | 7.4731 | 93600 | 0.0275 | - | | 7.4770 | 93650 | 0.0 | - | | 7.4810 | 93700 | 0.0 | - | | 7.4850 | 93750 | 0.0035 | - | | 7.4890 | 93800 | 0.0246 | - | | 7.4930 | 93850 | 0.0025 | - | | 7.4970 | 93900 | 0.0 | - | | 7.5010 | 93950 | 0.0 | - | | 7.5050 | 94000 | 0.0 | - | | 7.5090 | 94050 | 0.0 | - | | 7.5130 | 94100 | 0.0 | - | | 7.5170 | 94150 | 0.0 | - | | 7.5210 | 94200 | 0.0023 | - | | 7.5250 | 94250 | 0.0 | - | | 7.5289 | 94300 | 0.0 | - | | 7.5329 | 94350 | 0.0 | - | | 7.5369 | 94400 | 0.0 | - | | 7.5409 | 94450 | 0.0027 | - | | 7.5449 | 94500 | 0.0028 | - | | 7.5489 | 94550 | 0.0247 | - | | 7.5529 | 94600 | 0.0 | - | | 7.5569 | 94650 | 0.0 | - | | 7.5609 | 94700 | 0.0 | - | | 7.5649 | 94750 | 0.0036 | - | | 7.5689 | 94800 | 0.0 | - | | 7.5729 | 94850 | 0.0 | - | | 7.5768 | 94900 | 0.0 | - | | 7.5808 | 94950 | 0.0 | - | | 7.5848 | 95000 | 0.0 | - | | 7.5888 | 95050 | 0.0 | - | | 7.5928 | 95100 | 0.0001 | - | | 7.5968 | 95150 | 0.0 | - | | 7.6008 | 95200 | 0.0 | - | | 7.6048 | 95250 | 0.0001 | - | | 7.6088 | 95300 | 0.0 | - | | 7.6128 | 95350 | 0.0 | - | | 7.6168 | 95400 | 0.0028 | - | | 7.6208 | 95450 | 0.0119 | - | | 7.6248 | 95500 | 0.0028 | - | | 7.6287 | 95550 | 0.0 | - | | 7.6327 | 95600 | 0.0001 | - | | 7.6367 | 95650 | 0.0 | - | | 7.6407 | 95700 | 0.0318 | - | | 7.6447 | 95750 | 0.0037 | - | | 7.6487 | 95800 | 0.0035 | - | | 7.6527 | 95850 | 0.0089 | - | | 7.6567 | 95900 | 0.0 | - | | 7.6607 | 95950 | 0.006 | - | | 7.6647 | 96000 | 0.0 | - | | 7.6687 | 96050 | 0.0 | - | | 7.6727 | 96100 | 0.0 | - | | 7.6766 | 96150 | 0.0 | - | | 7.6806 | 96200 | 0.0 | - | | 7.6846 | 96250 | 0.0 | - | | 7.6886 | 96300 | 0.0105 | - | | 7.6926 | 96350 | 0.0 | - | | 7.6966 | 96400 | 0.0 | - | | 7.7006 | 96450 | 0.0 | - | | 7.7046 | 96500 | 0.0 | - | | 7.7086 | 96550 | 0.0 | - | | 7.7126 | 96600 | 0.0 | - | | 7.7166 | 96650 | 0.0024 | - | | 7.7206 | 96700 | 0.0001 | - | | 7.7246 | 96750 | 0.0 | - | | 7.7285 | 96800 | 0.0123 | - | | 7.7325 | 96850 | 0.0 | - | | 7.7365 | 96900 | 0.0031 | - | | 7.7405 | 96950 | 0.0 | - | | 7.7445 | 97000 | 0.0025 | - | | 7.7485 | 97050 | 0.0 | - | | 7.7525 | 97100 | 0.0 | - | | 7.7565 | 97150 | 0.0 | - | | 7.7605 | 97200 | 0.0022 | - | | 7.7645 | 97250 | 0.0251 | - | | 7.7685 | 97300 | 0.002 | - | | 7.7725 | 97350 | 0.0118 | - | | 7.7764 | 97400 | 0.0019 | - | | 7.7804 | 97450 | 0.0001 | - | | 7.7844 | 97500 | 0.0123 | - | | 7.7884 | 97550 | 0.0 | - | | 7.7924 | 97600 | 0.0 | - | | 7.7964 | 97650 | 0.0 | - | | 7.8004 | 97700 | 0.0097 | - | | 7.8044 | 97750 | 0.0 | - | | 7.8084 | 97800 | 0.0 | - | | 7.8124 | 97850 | 0.0 | - | | 7.8164 | 97900 | 0.0001 | - | | 7.8204 | 97950 | 0.0001 | - | | 7.8244 | 98000 | 0.0251 | - | | 7.8283 | 98050 | 0.0 | - | | 7.8323 | 98100 | 0.009 | - | | 7.8363 | 98150 | 0.0246 | - | | 7.8403 | 98200 | 0.0 | - | | 7.8443 | 98250 | 0.0 | - | | 7.8483 | 98300 | 0.0 | - | | 7.8523 | 98350 | 0.0 | - | | 7.8563 | 98400 | 0.0 | - | | 7.8603 | 98450 | 0.0001 | - | | 7.8643 | 98500 | 0.0 | - | | 7.8683 | 98550 | 0.0018 | - | | 7.8723 | 98600 | 0.0 | - | | 7.8762 | 98650 | 0.0001 | - | | 7.8802 | 98700 | 0.0 | - | | 7.8842 | 98750 | 0.0 | - | | 7.8882 | 98800 | 0.0024 | - | | 7.8922 | 98850 | 0.0 | - | | 7.8962 | 98900 | 0.0 | - | | 7.9002 | 98950 | 0.0 | - | | 7.9042 | 99000 | 0.0027 | - | | 7.9082 | 99050 | 0.0027 | - | | 7.9122 | 99100 | 0.0 | - | | 7.9162 | 99150 | 0.0 | - | | 7.9202 | 99200 | 0.0 | - | | 7.9242 | 99250 | 0.0 | - | | 7.9281 | 99300 | 0.0138 | - | | 7.9321 | 99350 | 0.0 | - | | 7.9361 | 99400 | 0.0 | - | | 7.9401 | 99450 | 0.0 | - | | 7.9441 | 99500 | 0.0001 | - | | 7.9481 | 99550 | 0.0019 | - | | 7.9521 | 99600 | 0.0 | - | | 7.9561 | 99650 | 0.0 | - | | 7.9601 | 99700 | 0.0001 | - | | 7.9641 | 99750 | 0.0 | - | | 7.9681 | 99800 | 0.0 | - | | 7.9721 | 99850 | 0.0016 | - | | 7.9760 | 99900 | 0.0001 | - | | 7.9800 | 99950 | 0.0265 | - | | 7.9840 | 100000 | 0.0 | - | | 7.9880 | 100050 | 0.0 | - | | 7.9920 | 100100 | 0.0 | - | | 7.9960 | 100150 | 0.0127 | - | | 8.0 | 100200 | 0.0001 | 0.0405 | | 8.0040 | 100250 | 0.0247 | - | | 8.0080 | 100300 | 0.025 | - | | 8.0120 | 100350 | 0.0 | - | | 8.0160 | 100400 | 0.0072 | - | | 8.0200 | 100450 | 0.0 | - | | 8.0240 | 100500 | 0.0 | - | | 8.0279 | 100550 | 0.0 | - | | 8.0319 | 100600 | 0.0 | - | | 8.0359 | 100650 | 0.0251 | - | | 8.0399 | 100700 | 0.0 | - | | 8.0439 | 100750 | 0.0 | - | | 8.0479 | 100800 | 0.0042 | - | | 8.0519 | 100850 | 0.0036 | - | | 8.0559 | 100900 | 0.0 | - | | 8.0599 | 100950 | 0.0 | - | | 8.0639 | 101000 | 0.0 | - | | 8.0679 | 101050 | 0.0001 | - | | 8.0719 | 101100 | 0.0 | - | | 8.0758 | 101150 | 0.0 | - | | 8.0798 | 101200 | 0.0116 | - | | 8.0838 | 101250 | 0.0027 | - | | 8.0878 | 101300 | 0.0 | - | | 8.0918 | 101350 | 0.0 | - | | 8.0958 | 101400 | 0.0032 | - | | 8.0998 | 101450 | 0.0 | - | | 8.1038 | 101500 | 0.0 | - | | 8.1078 | 101550 | 0.0 | - | | 8.1118 | 101600 | 0.0097 | - | | 8.1158 | 101650 | 0.0 | - | | 8.1198 | 101700 | 0.0105 | - | | 8.1238 | 101750 | 0.0 | - | | 8.1277 | 101800 | 0.0026 | - | | 8.1317 | 101850 | 0.0 | - | | 8.1357 | 101900 | 0.0 | - | | 8.1397 | 101950 | 0.0 | - | | 8.1437 | 102000 | 0.0 | - | | 8.1477 | 102050 | 0.0 | - | | 8.1517 | 102100 | 0.0 | - | | 8.1557 | 102150 | 0.0 | - | | 8.1597 | 102200 | 0.0 | - | | 8.1637 | 102250 | 0.0 | - | | 8.1677 | 102300 | 0.0 | - | | 8.1717 | 102350 | 0.0 | - | | 8.1756 | 102400 | 0.0028 | - | | 8.1796 | 102450 | 0.0 | - | | 8.1836 | 102500 | 0.0037 | - | | 8.1876 | 102550 | 0.0065 | - | | 8.1916 | 102600 | 0.0 | - | | 8.1956 | 102650 | 0.0001 | - | | 8.1996 | 102700 | 0.0251 | - | | 8.2036 | 102750 | 0.0 | - | | 8.2076 | 102800 | 0.0 | - | | 8.2116 | 102850 | 0.0 | - | | 8.2156 | 102900 | 0.0 | - | | 8.2196 | 102950 | 0.0023 | - | | 8.2236 | 103000 | 0.0023 | - | | 8.2275 | 103050 | 0.0 | - | | 8.2315 | 103100 | 0.0246 | - | | 8.2355 | 103150 | 0.0 | - | | 8.2395 | 103200 | 0.0 | - | | 8.2435 | 103250 | 0.0 | - | | 8.2475 | 103300 | 0.0 | - | | 8.2515 | 103350 | 0.0253 | - | | 8.2555 | 103400 | 0.0 | - | | 8.2595 | 103450 | 0.0148 | - | | 8.2635 | 103500 | 0.0 | - | | 8.2675 | 103550 | 0.0 | - | | 8.2715 | 103600 | 0.0001 | - | | 8.2754 | 103650 | 0.0271 | - | | 8.2794 | 103700 | 0.0 | - | | 8.2834 | 103750 | 0.0 | - | | 8.2874 | 103800 | 0.0 | - | | 8.2914 | 103850 | 0.0 | - | | 8.2954 | 103900 | 0.0108 | - | | 8.2994 | 103950 | 0.0245 | - | | 8.3034 | 104000 | 0.0 | - | | 8.3074 | 104050 | 0.0248 | - | | 8.3114 | 104100 | 0.0 | - | | 8.3154 | 104150 | 0.0 | - | | 8.3194 | 104200 | 0.0 | - | | 8.3234 | 104250 | 0.0001 | - | | 8.3273 | 104300 | 0.0 | - | | 8.3313 | 104350 | 0.0098 | - | | 8.3353 | 104400 | 0.0 | - | | 8.3393 | 104450 | 0.0247 | - | | 8.3433 | 104500 | 0.0001 | - | | 8.3473 | 104550 | 0.0 | - | | 8.3513 | 104600 | 0.0 | - | | 8.3553 | 104650 | 0.0001 | - | | 8.3593 | 104700 | 0.0 | - | | 8.3633 | 104750 | 0.0247 | - | | 8.3673 | 104800 | 0.0 | - | | 8.3713 | 104850 | 0.0001 | - | | 8.3752 | 104900 | 0.0001 | - | | 8.3792 | 104950 | 0.0 | - | | 8.3832 | 105000 | 0.0 | - | | 8.3872 | 105050 | 0.0 | - | | 8.3912 | 105100 | 0.0 | - | | 8.3952 | 105150 | 0.0 | - | | 8.3992 | 105200 | 0.0001 | - | | 8.4032 | 105250 | 0.0 | - | | 8.4072 | 105300 | 0.0001 | - | | 8.4112 | 105350 | 0.0001 | - | | 8.4152 | 105400 | 0.0 | - | | 8.4192 | 105450 | 0.0 | - | | 8.4232 | 105500 | 0.025 | - | | 8.4271 | 105550 | 0.0 | - | | 8.4311 | 105600 | 0.0 | - | | 8.4351 | 105650 | 0.0 | - | | 8.4391 | 105700 | 0.0 | - | | 8.4431 | 105750 | 0.0001 | - | | 8.4471 | 105800 | 0.0 | - | | 8.4511 | 105850 | 0.0 | - | | 8.4551 | 105900 | 0.0001 | - | | 8.4591 | 105950 | 0.0246 | - | | 8.4631 | 106000 | 0.0 | - | | 8.4671 | 106050 | 0.0 | - | | 8.4711 | 106100 | 0.0246 | - | | 8.4750 | 106150 | 0.0001 | - | | 8.4790 | 106200 | 0.0 | - | | 8.4830 | 106250 | 0.0 | - | | 8.4870 | 106300 | 0.0246 | - | | 8.4910 | 106350 | 0.0 | - | | 8.4950 | 106400 | 0.0 | - | | 8.4990 | 106450 | 0.0001 | - | | 8.5030 | 106500 | 0.0001 | - | | 8.5070 | 106550 | 0.0 | - | | 8.5110 | 106600 | 0.0 | - | | 8.5150 | 106650 | 0.0001 | - | | 8.5190 | 106700 | 0.0 | - | | 8.5230 | 106750 | 0.0 | - | | 8.5269 | 106800 | 0.0 | - | | 8.5309 | 106850 | 0.0001 | - | | 8.5349 | 106900 | 0.0 | - | | 8.5389 | 106950 | 0.0 | - | | 8.5429 | 107000 | 0.0001 | - | | 8.5469 | 107050 | 0.0 | - | | 8.5509 | 107100 | 0.0 | - | | 8.5549 | 107150 | 0.0 | - | | 8.5589 | 107200 | 0.0 | - | | 8.5629 | 107250 | 0.0001 | - | | 8.5669 | 107300 | 0.0 | - | | 8.5709 | 107350 | 0.0 | - | | 8.5749 | 107400 | 0.0001 | - | | 8.5788 | 107450 | 0.0251 | - | | 8.5828 | 107500 | 0.0 | - | | 8.5868 | 107550 | 0.0 | - | | 8.5908 | 107600 | 0.0001 | - | | 8.5948 | 107650 | 0.0 | - | | 8.5988 | 107700 | 0.0 | - | | 8.6028 | 107750 | 0.0 | - | | 8.6068 | 107800 | 0.0001 | - | | 8.6108 | 107850 | 0.0 | - | | 8.6148 | 107900 | 0.0 | - | | 8.6188 | 107950 | 0.0245 | - | | 8.6228 | 108000 | 0.0 | - | | 8.6267 | 108050 | 0.0 | - | | 8.6307 | 108100 | 0.0249 | - | | 8.6347 | 108150 | 0.0 | - | | 8.6387 | 108200 | 0.0246 | - | | 8.6427 | 108250 | 0.0 | - | | 8.6467 | 108300 | 0.0001 | - | | 8.6507 | 108350 | 0.0001 | - | | 8.6547 | 108400 | 0.0001 | - | | 8.6587 | 108450 | 0.0 | - | | 8.6627 | 108500 | 0.0 | - | | 8.6667 | 108550 | 0.0 | - | | 8.6707 | 108600 | 0.0 | - | | 8.6747 | 108650 | 0.0 | - | | 8.6786 | 108700 | 0.0 | - | | 8.6826 | 108750 | 0.0 | - | | 8.6866 | 108800 | 0.0001 | - | | 8.6906 | 108850 | 0.0 | - | | 8.6946 | 108900 | 0.0 | - | | 8.6986 | 108950 | 0.0 | - | | 8.7026 | 109000 | 0.0 | - | | 8.7066 | 109050 | 0.0248 | - | | 8.7106 | 109100 | 0.0001 | - | | 8.7146 | 109150 | 0.0 | - | | 8.7186 | 109200 | 0.0 | - | | 8.7226 | 109250 | 0.0 | - | | 8.7265 | 109300 | 0.0246 | - | | 8.7305 | 109350 | 0.0001 | - | | 8.7345 | 109400 | 0.0 | - | | 8.7385 | 109450 | 0.025 | - | | 8.7425 | 109500 | 0.0 | - | | 8.7465 | 109550 | 0.0 | - | | 8.7505 | 109600 | 0.0 | - | | 8.7545 | 109650 | 0.0 | - | | 8.7585 | 109700 | 0.025 | - | | 8.7625 | 109750 | 0.0001 | - | | 8.7665 | 109800 | 0.0001 | - | | 8.7705 | 109850 | 0.0248 | - | | 8.7745 | 109900 | 0.0001 | - | | 8.7784 | 109950 | 0.0 | - | | 8.7824 | 110000 | 0.0 | - | | 8.7864 | 110050 | 0.0 | - | | 8.7904 | 110100 | 0.0 | - | | 8.7944 | 110150 | 0.0 | - | | 8.7984 | 110200 | 0.0001 | - | | 8.8024 | 110250 | 0.0 | - | | 8.8064 | 110300 | 0.0 | - | | 8.8104 | 110350 | 0.0 | - | | 8.8144 | 110400 | 0.0 | - | | 8.8184 | 110450 | 0.0001 | - | | 8.8224 | 110500 | 0.0001 | - | | 8.8263 | 110550 | 0.0 | - | | 8.8303 | 110600 | 0.0001 | - | | 8.8343 | 110650 | 0.0 | - | | 8.8383 | 110700 | 0.0 | - | | 8.8423 | 110750 | 0.0 | - | | 8.8463 | 110800 | 0.0247 | - | | 8.8503 | 110850 | 0.0 | - | | 8.8543 | 110900 | 0.0 | - | | 8.8583 | 110950 | 0.0 | - | | 8.8623 | 111000 | 0.0 | - | | 8.8663 | 111050 | 0.0001 | - | | 8.8703 | 111100 | 0.0 | - | | 8.8743 | 111150 | 0.0001 | - | | 8.8782 | 111200 | 0.0001 | - | | 8.8822 | 111250 | 0.0 | - | | 8.8862 | 111300 | 0.0 | - | | 8.8902 | 111350 | 0.0001 | - | | 8.8942 | 111400 | 0.0 | - | | 8.8982 | 111450 | 0.0 | - | | 8.9022 | 111500 | 0.0 | - | | 8.9062 | 111550 | 0.0 | - | | 8.9102 | 111600 | 0.0 | - | | 8.9142 | 111650 | 0.0 | - | | 8.9182 | 111700 | 0.0 | - | | 8.9222 | 111750 | 0.0 | - | | 8.9261 | 111800 | 0.0247 | - | | 8.9301 | 111850 | 0.0 | - | | 8.9341 | 111900 | 0.0248 | - | | 8.9381 | 111950 | 0.0 | - | | 8.9421 | 112000 | 0.0 | - | | 8.9461 | 112050 | 0.0 | - | | 8.9501 | 112100 | 0.0 | - | | 8.9541 | 112150 | 0.0 | - | | 8.9581 | 112200 | 0.0 | - | | 8.9621 | 112250 | 0.0001 | - | | 8.9661 | 112300 | 0.0 | - | | 8.9701 | 112350 | 0.0001 | - | | 8.9741 | 112400 | 0.0001 | - | | 8.9780 | 112450 | 0.0247 | - | | 8.9820 | 112500 | 0.0496 | - | | 8.9860 | 112550 | 0.0 | - | | 8.9900 | 112600 | 0.0001 | - | | 8.9940 | 112650 | 0.0 | - | | 8.9980 | 112700 | 0.0 | - | | 9.0 | 112725 | - | 0.0579 | | 9.0020 | 112750 | 0.0493 | - | | 9.0060 | 112800 | 0.0 | - | | 9.0100 | 112850 | 0.0001 | - | | 9.0140 | 112900 | 0.0001 | - | | 9.0180 | 112950 | 0.0 | - | | 9.0220 | 113000 | 0.0 | - | | 9.0259 | 113050 | 0.0 | - | | 9.0299 | 113100 | 0.0 | - | | 9.0339 | 113150 | 0.0001 | - | | 9.0379 | 113200 | 0.0 | - | | 9.0419 | 113250 | 0.0 | - | | 9.0459 | 113300 | 0.0 | - | | 9.0499 | 113350 | 0.0 | - | | 9.0539 | 113400 | 0.0 | - | | 9.0579 | 113450 | 0.0 | - | | 9.0619 | 113500 | 0.0 | - | | 9.0659 | 113550 | 0.0246 | - | | 9.0699 | 113600 | 0.0 | - | | 9.0739 | 113650 | 0.0 | - | | 9.0778 | 113700 | 0.0001 | - | | 9.0818 | 113750 | 0.0001 | - | | 9.0858 | 113800 | 0.0 | - | | 9.0898 | 113850 | 0.0001 | - | | 9.0938 | 113900 | 0.0 | - | | 9.0978 | 113950 | 0.0 | - | | 9.1018 | 114000 | 0.0 | - | | 9.1058 | 114050 | 0.0 | - | | 9.1098 | 114100 | 0.0 | - | | 9.1138 | 114150 | 0.0 | - | | 9.1178 | 114200 | 0.0 | - | | 9.1218 | 114250 | 0.0 | - | | 9.1257 | 114300 | 0.0001 | - | | 9.1297 | 114350 | 0.0 | - | | 9.1337 | 114400 | 0.0001 | - | | 9.1377 | 114450 | 0.0 | - | | 9.1417 | 114500 | 0.0 | - | | 9.1457 | 114550 | 0.0001 | - | | 9.1497 | 114600 | 0.0 | - | | 9.1537 | 114650 | 0.0 | - | | 9.1577 | 114700 | 0.0 | - | | 9.1617 | 114750 | 0.0001 | - | | 9.1657 | 114800 | 0.0 | - | | 9.1697 | 114850 | 0.0 | - | | 9.1737 | 114900 | 0.0 | - | | 9.1776 | 114950 | 0.0247 | - | | 9.1816 | 115000 | 0.0001 | - | | 9.1856 | 115050 | 0.0001 | - | | 9.1896 | 115100 | 0.0001 | - | | 9.1936 | 115150 | 0.0 | - | | 9.1976 | 115200 | 0.0 | - | | 9.2016 | 115250 | 0.0 | - | | 9.2056 | 115300 | 0.0247 | - | | 9.2096 | 115350 | 0.0 | - | | 9.2136 | 115400 | 0.0 | - | | 9.2176 | 115450 | 0.0 | - | | 9.2216 | 115500 | 0.0 | - | | 9.2255 | 115550 | 0.0 | - | | 9.2295 | 115600 | 0.0245 | - | | 9.2335 | 115650 | 0.0248 | - | | 9.2375 | 115700 | 0.0 | - | | 9.2415 | 115750 | 0.0001 | - | | 9.2455 | 115800 | 0.0 | - | | 9.2495 | 115850 | 0.0 | - | | 9.2535 | 115900 | 0.0 | - | | 9.2575 | 115950 | 0.0246 | - | | 9.2615 | 116000 | 0.0 | - | | 9.2655 | 116050 | 0.0 | - | | 9.2695 | 116100 | 0.0 | - | | 9.2735 | 116150 | 0.0 | - | | 9.2774 | 116200 | 0.0 | - | | 9.2814 | 116250 | 0.0246 | - | | 9.2854 | 116300 | 0.0 | - | | 9.2894 | 116350 | 0.0 | - | | 9.2934 | 116400 | 0.0247 | - | | 9.2974 | 116450 | 0.0245 | - | | 9.3014 | 116500 | 0.0 | - | | 9.3054 | 116550 | 0.0 | - | | 9.3094 | 116600 | 0.0 | - | | 9.3134 | 116650 | 0.0244 | - | | 9.3174 | 116700 | 0.0001 | - | | 9.3214 | 116750 | 0.0 | - | | 9.3253 | 116800 | 0.0001 | - | | 9.3293 | 116850 | 0.0232 | - | | 9.3333 | 116900 | 0.0192 | - | | 9.3373 | 116950 | 0.0246 | - | | 9.3413 | 117000 | 0.0 | - | | 9.3453 | 117050 | 0.0005 | - | | 9.3493 | 117100 | 0.0007 | - | | 9.3533 | 117150 | 0.0002 | - | | 9.3573 | 117200 | 0.0001 | - | | 9.3613 | 117250 | 0.0244 | - | | 9.3653 | 117300 | 0.0002 | - | | 9.3693 | 117350 | 0.0188 | - | | 9.3733 | 117400 | 0.0001 | - | | 9.3772 | 117450 | 0.0003 | - | | 9.3812 | 117500 | 0.001 | - | | 9.3852 | 117550 | 0.0 | - | | 9.3892 | 117600 | 0.0001 | - | | 9.3932 | 117650 | 0.0001 | - | | 9.3972 | 117700 | 0.0003 | - | | 9.4012 | 117750 | 0.0029 | - | | 9.4052 | 117800 | 0.0003 | - | | 9.4092 | 117850 | 0.0026 | - | | 9.4132 | 117900 | 0.0019 | - | | 9.4172 | 117950 | 0.0002 | - | | 9.4212 | 118000 | 0.0007 | - | | 9.4251 | 118050 | 0.0 | - | | 9.4291 | 118100 | 0.0019 | - | | 9.4331 | 118150 | 0.004 | - | | 9.4371 | 118200 | 0.001 | - | | 9.4411 | 118250 | 0.0016 | - | | 9.4451 | 118300 | 0.0028 | - | | 9.4491 | 118350 | 0.0001 | - | | 9.4531 | 118400 | 0.0 | - | | 9.4571 | 118450 | 0.0105 | - | | 9.4611 | 118500 | 0.0013 | - | | 9.4651 | 118550 | 0.0 | - | | 9.4691 | 118600 | 0.0221 | - | | 9.4731 | 118650 | 0.0001 | - | | 9.4770 | 118700 | 0.0008 | - | | 9.4810 | 118750 | 0.0001 | - | | 9.4850 | 118800 | 0.0214 | - | | 9.4890 | 118850 | 0.0001 | - | | 9.4930 | 118900 | 0.0018 | - | | 9.4970 | 118950 | 0.0011 | - | | 9.5010 | 119000 | 0.0001 | - | | 9.5050 | 119050 | 0.0009 | - | | 9.5090 | 119100 | 0.0004 | - | | 9.5130 | 119150 | 0.0004 | - | | 9.5170 | 119200 | 0.0034 | - | | 9.5210 | 119250 | 0.0016 | - | | 9.5250 | 119300 | 0.0006 | - | | 9.5289 | 119350 | 0.0 | - | | 9.5329 | 119400 | 0.0001 | - | | 9.5369 | 119450 | 0.0041 | - | | 9.5409 | 119500 | 0.0029 | - | | 9.5449 | 119550 | 0.0001 | - | | 9.5489 | 119600 | 0.0189 | - | | 9.5529 | 119650 | 0.0001 | - | | 9.5569 | 119700 | 0.0 | - | | 9.5609 | 119750 | 0.0 | - | | 9.5649 | 119800 | 0.0042 | - | | 9.5689 | 119850 | 0.0009 | - | | 9.5729 | 119900 | 0.0 | - | | 9.5768 | 119950 | 0.0 | - | | 9.5808 | 120000 | 0.0 | - | | 9.5848 | 120050 | 0.0007 | - | | 9.5888 | 120100 | 0.0009 | - | | 9.5928 | 120150 | 0.0006 | - | | 9.5968 | 120200 | 0.0001 | - | | 9.6008 | 120250 | 0.0001 | - | | 9.6048 | 120300 | 0.0007 | - | | 9.6088 | 120350 | 0.0001 | - | | 9.6128 | 120400 | 0.0025 | - | | 9.6168 | 120450 | 0.0136 | - | | 9.6208 | 120500 | 0.0011 | - | | 9.6248 | 120550 | 0.002 | - | | 9.6287 | 120600 | 0.001 | - | | 9.6327 | 120650 | 0.0008 | - | | 9.6367 | 120700 | 0.0298 | - | | 9.6407 | 120750 | 0.009 | - | | 9.6447 | 120800 | 0.0042 | - | | 9.6487 | 120850 | 0.0011 | - | | 9.6527 | 120900 | 0.0089 | - | | 9.6567 | 120950 | 0.0054 | - | | 9.6607 | 121000 | 0.0019 | - | | 9.6647 | 121050 | 0.0006 | - | | 9.6687 | 121100 | 0.0 | - | | 9.6727 | 121150 | 0.0 | - | | 9.6766 | 121200 | 0.0001 | - | | 9.6806 | 121250 | 0.0001 | - | | 9.6846 | 121300 | 0.0 | - | | 9.6886 | 121350 | 0.0128 | - | | 9.6926 | 121400 | 0.0 | - | | 9.6966 | 121450 | 0.0001 | - | | 9.7006 | 121500 | 0.0 | - | | 9.7046 | 121550 | 0.0007 | - | | 9.7086 | 121600 | 0.0001 | - | | 9.7126 | 121650 | 0.0001 | - | | 9.7166 | 121700 | 0.0021 | - | | 9.7206 | 121750 | 0.0001 | - | | 9.7246 | 121800 | 0.0207 | - | | 9.7285 | 121850 | 0.0001 | - | | 9.7325 | 121900 | 0.0032 | - | | 9.7365 | 121950 | 0.0008 | - | | 9.7405 | 122000 | 0.0038 | - | | 9.7445 | 122050 | 0.0005 | - | | 9.7485 | 122100 | 0.0002 | - | | 9.7525 | 122150 | 0.0005 | - | | 9.7565 | 122200 | 0.0043 | - | | 9.7605 | 122250 | 0.0003 | - | | 9.7645 | 122300 | 0.021 | - | | 9.7685 | 122350 | 0.0128 | - | | 9.7725 | 122400 | 0.0032 | - | | 9.7764 | 122450 | 0.0001 | - | | 9.7804 | 122500 | 0.0 | - | | 9.7844 | 122550 | 0.0119 | - | | 9.7884 | 122600 | 0.0 | - | | 9.7924 | 122650 | 0.0 | - | | 9.7964 | 122700 | 0.0 | - | | 9.8004 | 122750 | 0.0092 | - | | 9.8044 | 122800 | 0.0001 | - | | 9.8084 | 122850 | 0.0008 | - | | 9.8124 | 122900 | 0.0009 | - | | 9.8164 | 122950 | 0.0021 | - | | 9.8204 | 123000 | 0.0 | - | | 9.8244 | 123050 | 0.0174 | - | | 9.8283 | 123100 | 0.0001 | - | | 9.8323 | 123150 | 0.0095 | - | | 9.8363 | 123200 | 0.0183 | - | | 9.8403 | 123250 | 0.0001 | - | | 9.8443 | 123300 | 0.0002 | - | | 9.8483 | 123350 | 0.0 | - | | 9.8523 | 123400 | 0.0004 | - | | 9.8563 | 123450 | 0.0 | - | | 9.8603 | 123500 | 0.0001 | - | | 9.8643 | 123550 | 0.0028 | - | | 9.8683 | 123600 | 0.0 | - | | 9.8723 | 123650 | 0.0001 | - | | 9.8762 | 123700 | 0.0 | - | | 9.8802 | 123750 | 0.0004 | - | | 9.8842 | 123800 | 0.0035 | - | | 9.8882 | 123850 | 0.0001 | - | | 9.8922 | 123900 | 0.0 | - | | 9.8962 | 123950 | 0.0001 | - | | 9.9002 | 124000 | 0.0038 | - | | 9.9042 | 124050 | 0.0028 | - | | 9.9082 | 124100 | 0.0002 | - | | 9.9122 | 124150 | 0.0001 | - | | 9.9162 | 124200 | 0.0 | - | | 9.9202 | 124250 | 0.0005 | - | | 9.9242 | 124300 | 0.016 | - | | 9.9281 | 124350 | 0.0001 | - | | 9.9321 | 124400 | 0.0001 | - | | 9.9361 | 124450 | 0.0 | - | | 9.9401 | 124500 | 0.0009 | - | | 9.9441 | 124550 | 0.0 | - | | 9.9481 | 124600 | 0.0015 | - | | 9.9521 | 124650 | 0.0 | - | | 9.9561 | 124700 | 0.0 | - | | 9.9601 | 124750 | 0.0002 | - | | 9.9641 | 124800 | 0.0 | - | | 9.9681 | 124850 | 0.0028 | - | | 9.9721 | 124900 | 0.0004 | - | | 9.9760 | 124950 | 0.014 | - | | 9.9800 | 125000 | 0.0138 | - | | 9.9840 | 125050 | 0.0008 | - | | 9.9880 | 125100 | 0.0001 | - | | 9.9920 | 125150 | 0.0 | - | | 9.9960 | 125200 | 0.0136 | - | | 10.0 | 125250 | 0.0182 | 0.0777 | * The bold row denotes the saved checkpoint. ### Framework Versions - Python: 3.10.14 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.4.0 - Datasets: 2.21.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/paraphrase-mpnet-base-v2", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "ballades (formes fixes)"}, {"text": "prison fiction"}, {"text": "gregorian chants"}, {"text": "argentina--buenos aires, port of"}, {"text": "passepieds (music)"}], "inference": true, "model-index": [{"name": "SetFit with sentence-transformers/paraphrase-mpnet-base-v2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.9555555555555556, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,470
blasisd/marian-finetuned-kde4-en-to-fr
blasisd
translation
[ "transformers", "safetensors", "marian", "text2text-generation", "translation", "generated_from_trainer", "dataset:kde4", "base_model:Helsinki-NLP/opus-mt-en-fr", "base_model:finetune:Helsinki-NLP/opus-mt-en-fr", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-02-14T11:25:27Z
2025-02-14T13:26:51+00:00
5
0
--- base_model: Helsinki-NLP/opus-mt-en-fr datasets: - kde4 library_name: transformers license: apache-2.0 metrics: - bleu tags: - translation - generated_from_trainer model-index: - name: marian-finetuned-kde4-en-to-fr results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: kde4 type: kde4 config: en-fr split: train args: en-fr metrics: - type: bleu value: 52.90204973205105 name: Bleu --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.8554 - Model Preparation Time: 0.009 - Bleu: 52.9020 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.48.2 - Pytorch 2.5.1+cu124 - Datasets 3.3.0 - Tokenizers 0.21.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.8554 - Model Preparation Time: 0.009 - Bleu: 52.9020 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.48.2 - Pytorch 2.5.1+cu124 - Datasets 3.3.0 - Tokenizers 0.21.0
{"base_model": "Helsinki-NLP/opus-mt-en-fr", "datasets": ["kde4"], "library_name": "transformers", "license": "apache-2.0", "metrics": ["bleu"], "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "marian-finetuned-kde4-en-to-fr", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "kde4", "type": "kde4", "config": "en-fr", "split": "train", "args": "en-fr"}, "metrics": [{"type": "bleu", "value": 52.90204973205105, "name": "Bleu"}]}]}]}
task
[ "TRANSLATION" ]
41,471
RichardErkhov/riotu-lab_-_ArabianGPT-08B-V2-4bits
RichardErkhov
null
[ "safetensors", "gpt2", "4-bit", "bitsandbytes", "region:us" ]
2025-01-27T21:32:17Z
2025-01-27T21:32:37+00:00
5
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) ArabianGPT-08B-V2 - bnb 4bits - Model creator: https://huggingface.co/riotu-lab/ - Original model: https://huggingface.co/riotu-lab/ArabianGPT-08B-V2/ Original model description: --- license: apache-2.0 language: - ar tags: - ArabianGPT widget: - text: "أعلنت وزارة الحج في المملكة العربية السعودية" example_title: "مثال ١" - text: "يبدو اليوم جميلا، سأقوم بتحضير" example_title: "مثال ٢" - text: "إن التقنيات الحديثة" example_title: "مثال ٣" --- # ArabianGPT Model Overview ## Disclaimer for the Use of Large Language Models (LLMs) for Text Generation <p style="color: red;">We disclaim all responsibility for any harm, inaccuracies, or inappropriate content generated by ArabianGPT-0.8B, and users engage with and apply the model's outputs at their own risk.</p> > **Important Note:** Currently, we offer a raw pre-trained model. Our team is actively working on releasing instruction-based LLMs that are fine-tuned and augmented with LRHF. The first set of pre-trained models has been made available for community exploration. While we do have models fine-tuned for specific tasks such as summarization and sentiment analysis, they are still in the development phase. ## How you can use this Pre-Trained? You are invited to utilize this pre-trained, native Arabic language model as an experimental tool to assess its capabilities, aid in its fine-tuning, and evaluate its performance across a variety of downstream tasks. We encourage you to review our technical report for a comprehensive understanding of the model's performance metrics and the specific downstream tasks it has been tested on. This will provide valuable insights into its applicability and effectiveness in diverse applications. ## Introduction ArabianGPT-0.8B, part of the ArabianLLM initiatives, is a specialized GPT model optimized for the Arabic language. Developed at Prince Sultan University's Robotics and Internet of Things Lab, this model is a leap forward in natural language modeling and generation for Arabic, tackling the language's unique challenges. ## Key Features - **Architecture**: GPT-2 - **Model Size**: 0.8 billion parameters - **Layers**: 36 - **Model Attention Layers (MAL)**: 20 - **Context Window Size**: 1024 tokens ## Training - **Dataset**: Scraped texts contains scientific articles, and general texts - **Data Size**: 117 GB - **Tokenizer**: Aranizer 64K - **Tokens**: Over 14 billion - **Hardware**: 5 NDIVIA A100 GPUs - **Performance**: loss of 3.6 ## Role in ArabianLLM Initiatives ArabianGPT-0.8B is crucial for advancing Arabic language processing, addressing challenges unique to Arabic morphology and dialects. ## Usage Suitable for Arabic text generation tasks. Example usage with Transformers Pipeline: ```python from transformers import pipeline pipe = pipeline("text-generation", model="riotu-lab/ArabianGPT-08B", max_new_tokens=1024) text = '' pipe(text) ``` ## Limitations and Ethical Considerations - The model may have context understanding or text generation limitations in certain scenarios. - Emphasis on ethical use to prevent misinformation or harmful content propagation. ## Acknowledgments Special thanks to Prince Sultan University, particularly the Robotics and Internet of Things Lab. ## Contact Information For inquiries: [[email protected]](mailto:[email protected]). ## Disclaimer for the Use of Large Language Models (LLMs) for Text Generation <p style="color: red;">We disclaim all responsibility for any harm, inaccuracies, or inappropriate content generated by ArabianGPT-0.3B, and users engage with and apply the model's outputs at their own risk.</p>
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) ArabianGPT-08B-V2 - bnb 4bits - Model creator: https://huggingface.co/riotu-lab/ - Original model: https://huggingface.co/riotu-lab/ArabianGPT-08B-V2/ Original model description: --- license: apache-2.0 language: - ar tags: - ArabianGPT widget: - text: "أعلنت وزارة الحج في المملكة العربية السعودية" example_title: "مثال ١" - text: "يبدو اليوم جميلا، سأقوم بتحضير" example_title: "مثال ٢" - text: "إن التقنيات الحديثة" example_title: "مثال ٣" --- # ArabianGPT Model Overview ## Disclaimer for the Use of Large Language Models (LLMs) for Text Generation <p style="color: red;">We disclaim all responsibility for any harm, inaccuracies, or inappropriate content generated by ArabianGPT-0.8B, and users engage with and apply the model's outputs at their own risk.</p> > **Important Note:** Currently, we offer a raw pre-trained model. Our team is actively working on releasing instruction-based LLMs that are fine-tuned and augmented with LRHF. The first set of pre-trained models has been made available for community exploration. While we do have models fine-tuned for specific tasks such as summarization and sentiment analysis, they are still in the development phase. ## How you can use this Pre-Trained? You are invited to utilize this pre-trained, native Arabic language model as an experimental tool to assess its capabilities, aid in its fine-tuning, and evaluate its performance across a variety of downstream tasks. We encourage you to review our technical report for a comprehensive understanding of the model's performance metrics and the specific downstream tasks it has been tested on. This will provide valuable insights into its applicability and effectiveness in diverse applications. ## Introduction ArabianGPT-0.8B, part of the ArabianLLM initiatives, is a specialized GPT model optimized for the Arabic language. Developed at Prince Sultan University's Robotics and Internet of Things Lab, this model is a leap forward in natural language modeling and generation for Arabic, tackling the language's unique challenges. ## Key Features - **Architecture**: GPT-2 - **Model Size**: 0.8 billion parameters - **Layers**: 36 - **Model Attention Layers (MAL)**: 20 - **Context Window Size**: 1024 tokens ## Training - **Dataset**: Scraped texts contains scientific articles, and general texts - **Data Size**: 117 GB - **Tokenizer**: Aranizer 64K - **Tokens**: Over 14 billion - **Hardware**: 5 NDIVIA A100 GPUs - **Performance**: loss of 3.6 ## Role in ArabianLLM Initiatives ArabianGPT-0.8B is crucial for advancing Arabic language processing, addressing challenges unique to Arabic morphology and dialects. ## Usage Suitable for Arabic text generation tasks. Example usage with Transformers Pipeline: ```python from transformers import pipeline pipe = pipeline("text-generation", model="riotu-lab/ArabianGPT-08B", max_new_tokens=1024) text = '' pipe(text) ``` ## Limitations and Ethical Considerations - The model may have context understanding or text generation limitations in certain scenarios. - Emphasis on ethical use to prevent misinformation or harmful content propagation. ## Acknowledgments Special thanks to Prince Sultan University, particularly the Robotics and Internet of Things Lab. ## Contact Information For inquiries: [[email protected]](mailto:[email protected]). ## Disclaimer for the Use of Large Language Models (LLMs) for Text Generation <p style="color: red;">We disclaim all responsibility for any harm, inaccuracies, or inappropriate content generated by ArabianGPT-0.3B, and users engage with and apply the model's outputs at their own risk.</p>
{}
task
[ "SUMMARIZATION" ]
41,472
facebook/mms-1b
facebook
null
[ "transformers", "pytorch", "wav2vec2", "pretraining", "mms", "ab", "af", "ak", "am", "ar", "as", "av", "ay", "az", "ba", "bm", "be", "bn", "bi", "bo", "sh", "br", "bg", "ca", "cs", "ce", "cv", "ku", "cy", "da", "de", "dv", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fa", "fj", "fi", "fr", "fy", "ff", "ga", "gl", "gn", "gu", "zh", "ht", "ha", "he", "hi", "hu", "hy", "ig", "ia", "ms", "is", "it", "jv", "ja", "kn", "ka", "kk", "kr", "km", "ki", "rw", "ky", "ko", "kv", "lo", "la", "lv", "ln", "lt", "lb", "lg", "mh", "ml", "mr", "mk", "mg", "mt", "mn", "mi", "my", "nl", "no", "ne", "ny", "oc", "om", "or", "os", "pa", "pl", "pt", "ps", "qu", "ro", "rn", "ru", "sg", "sk", "sl", "sm", "sn", "sd", "so", "es", "sq", "su", "sv", "sw", "ta", "tt", "te", "tg", "tl", "th", "ti", "ts", "tr", "uk", "vi", "wo", "xh", "yo", "zu", "za", "dataset:google/fleurs", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
2023-05-22T19:39:11Z
2023-06-05T10:23:40+00:00
7,123
45
--- datasets: - google/fleurs language: - ab - af - ak - am - ar - as - av - ay - az - ba - bm - be - bn - bi - bo - sh - br - bg - ca - cs - ce - cv - ku - cy - da - de - dv - dz - el - en - eo - et - eu - ee - fo - fa - fj - fi - fr - fy - ff - ga - gl - gn - gu - zh - ht - ha - he - hi - sh - hu - hy - ig - ia - ms - is - it - jv - ja - kn - ka - kk - kr - km - ki - rw - ky - ko - kv - lo - la - lv - ln - lt - lb - lg - mh - ml - mr - ms - mk - mg - mt - mn - mi - my - zh - nl - 'no' - 'no' - ne - ny - oc - om - or - os - pa - pl - pt - ms - ps - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - ro - rn - ru - sg - sk - sl - sm - sn - sd - so - es - sq - su - sv - sw - ta - tt - te - tg - tl - th - ti - ts - tr - uk - ms - vi - wo - xh - ms - yo - ms - zu - za license: cc-by-nc-4.0 metrics: - wer tags: - mms --- # Massively Multilingual Speech (MMS) - 1B Facebook's MMS counting *1 billion* parameters. MMS is Facebook AI's massive multilingual pretrained model for speech ("MMS"). It is pretrained in with [Wav2Vec2's self-supervised training objective](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) on about 500,000 hours of speech data in over 1,400 languages. When using the model make sure that your speech input is sampled at 16kHz. **Note**: This model should be fine-tuned on a downstream task, like Automatic Speech Recognition, Translation, or Classification. Check out the [**How-to-fine section](#how-to-finetune) or [**this blog**](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) for more information about ASR. ## Table Of Content - [How to Finetune](#how-to-finetune) - [Model details](#model-details) - [Additional links](#additional-links) ## How to finetune Coming soon... ## Model details - **Developed by:** Vineel Pratap et al. - **Model type:** Multi-Lingual Automatic Speech Recognition model - **Language(s):** 1000+ languages - **License:** CC-BY-NC 4.0 license - **Num parameters**: 1 billion - **Cite as:** @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ## Additional Links - [Blog post]( ) - [Transformers documentation](https://huggingface.co/docs/transformers/main/en/model_doc/mms). - [Paper](https://arxiv.org/abs/2305.13516) - [GitHub Repository](https://github.com/facebookresearch/fairseq/tree/main/examples/mms#asr) - [Other **MMS** checkpoints](https://huggingface.co/models?other=mms) - MMS ASR fine-tuned checkpoints: - [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) - [facebook/mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107) - [facebook/mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102) - [Official Space](https://huggingface.co/spaces/facebook/MMS)
null
Non_BioNLP
# Massively Multilingual Speech (MMS) - 1B Facebook's MMS counting *1 billion* parameters. MMS is Facebook AI's massive multilingual pretrained model for speech ("MMS"). It is pretrained in with [Wav2Vec2's self-supervised training objective](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) on about 500,000 hours of speech data in over 1,400 languages. When using the model make sure that your speech input is sampled at 16kHz. **Note**: This model should be fine-tuned on a downstream task, like Automatic Speech Recognition, Translation, or Classification. Check out the [**How-to-fine section](#how-to-finetune) or [**this blog**](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) for more information about ASR. ## Table Of Content - [How to Finetune](#how-to-finetune) - [Model details](#model-details) - [Additional links](#additional-links) ## How to finetune Coming soon... ## Model details - **Developed by:** Vineel Pratap et al. - **Model type:** Multi-Lingual Automatic Speech Recognition model - **Language(s):** 1000+ languages - **License:** CC-BY-NC 4.0 license - **Num parameters**: 1 billion - **Cite as:** @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ## Additional Links - [Blog post]( ) - [Transformers documentation](https://huggingface.co/docs/transformers/main/en/model_doc/mms). - [Paper](https://arxiv.org/abs/2305.13516) - [GitHub Repository](https://github.com/facebookresearch/fairseq/tree/main/examples/mms#asr) - [Other **MMS** checkpoints](https://huggingface.co/models?other=mms) - MMS ASR fine-tuned checkpoints: - [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) - [facebook/mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107) - [facebook/mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102) - [Official Space](https://huggingface.co/spaces/facebook/MMS)
{"datasets": ["google/fleurs"], "language": ["ab", "af", "ak", "am", "ar", "as", "av", "ay", "az", "ba", "bm", "be", "bn", "bi", "bo", "sh", "br", "bg", "ca", "cs", "ce", "cv", "ku", "cy", "da", "de", "dv", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fa", "fj", "fi", "fr", "fy", "ff", "ga", "gl", "gn", "gu", "zh", "ht", "ha", "he", "hi", "sh", "hu", "hy", "ig", "ia", "ms", "is", "it", "jv", "ja", "kn", "ka", "kk", "kr", "km", "ki", "rw", "ky", "ko", "kv", "lo", "la", "lv", "ln", "lt", "lb", "lg", "mh", "ml", "mr", "ms", "mk", "mg", "mt", "mn", "mi", "my", "zh", "nl", "no", "no", "ne", "ny", "oc", "om", "or", "os", "pa", "pl", "pt", "ms", "ps", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "qu", "ro", "rn", "ru", "sg", "sk", "sl", "sm", "sn", "sd", "so", "es", "sq", "su", "sv", "sw", "ta", "tt", "te", "tg", "tl", "th", "ti", "ts", "tr", "uk", "ms", "vi", "wo", "xh", "ms", "yo", "ms", "zu", "za"], "license": "cc-by-nc-4.0", "metrics": ["wer"], "tags": ["mms"]}
task
[ "TRANSLATION" ]
41,473
CamiloVega/Tweet-Classifier
CamiloVega
text-classification
[ "transformers", "safetensors", "bert", "text-classification", "autotrain", "dataset:autotrain-Tweet-Classifier/autotrain-data", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-02-23T15:27:53Z
2024-02-23T15:28:13+00:00
13
0
--- datasets: - autotrain-Tweet-Classifier/autotrain-data tags: - autotrain - text-classification widget: - text: I love AutoTrain --- # Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.1234140396118164 f1_macro: 0.17777777777777778 f1_micro: 0.36363636363636365 f1_weighted: 0.19393939393939394 precision_macro: 0.12121212121212122 precision_micro: 0.36363636363636365 precision_weighted: 0.1322314049586777 recall_macro: 0.3333333333333333 recall_micro: 0.36363636363636365 recall_weighted: 0.36363636363636365 accuracy: 0.36363636363636365
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.1234140396118164 f1_macro: 0.17777777777777778 f1_micro: 0.36363636363636365 f1_weighted: 0.19393939393939394 precision_macro: 0.12121212121212122 precision_micro: 0.36363636363636365 precision_weighted: 0.1322314049586777 recall_macro: 0.3333333333333333 recall_micro: 0.36363636363636365 recall_weighted: 0.36363636363636365 accuracy: 0.36363636363636365
{"datasets": ["autotrain-Tweet-Classifier/autotrain-data"], "tags": ["autotrain", "text-classification"], "widget": [{"text": "I love AutoTrain"}]}
task
[ "TEXT_CLASSIFICATION" ]
41,474
PedroRuizCode/ModernBERT-base-sst2-ft
PedroRuizCode
null
[ "tensorboard", "safetensors", "modernbert", "dataset:stanfordnlp/sst2", "base_model:answerdotai/ModernBERT-base", "base_model:finetune:answerdotai/ModernBERT-base", "license:apache-2.0", "region:us" ]
2024-12-26T00:57:09Z
2024-12-26T16:38:01+00:00
11
0
--- base_model: - answerdotai/ModernBERT-base datasets: - stanfordnlp/sst2 license: apache-2.0 metrics: - accuracy --- ModernBERT model trained with the sst2 dataset for text classification.
null
Non_BioNLP
ModernBERT model trained with the sst2 dataset for text classification.
{"base_model": ["answerdotai/ModernBERT-base"], "datasets": ["stanfordnlp/sst2"], "license": "apache-2.0", "metrics": ["accuracy"]}
task
[ "TEXT_CLASSIFICATION" ]
41,475
svenstahlmann/finetuned-distilbert-needmining
svenstahlmann
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "needmining", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-07-18T12:50:37Z
2022-07-18T13:15:23+00:00
116
0
--- language: en license: apache-2.0 tags: - distilbert - needmining metric: - f1 --- # Finetuned-Distilbert-needmining (uncased) This model is a finetuned version of the [Distilbert base model](https://huggingface.co/distilbert-base-uncased). It was trained to predict need-containing sentences from amazon product reviews. ## Model description This mode is part of ongoing research, after the publication of the research more information will be added. ## Intended uses & limitations You can use this model to identify sentences that contain customer needs in user-generated content. This can act as a filtering process to remove uninformative content for market research. ### How to use You can use this model directly with a pipeline for text classification: ```python >>> from transformers import pipeline >>> classifier = pipeline("text-classification", model="svenstahlmann/finetuned-distilbert-needmining") >>> classifier("the plasic feels super cheap.") [{'label': 'contains need', 'score': 0.9397542476654053}] ``` ### Limitations and bias We are not aware of any bias in the training data. ## Training data The training was done on a dataset of 6400 sentences. The sentences were taken from product reviews off amazon and coded if they express customer needs. ## Training procedure For the training, we used [Population Based Training (PBT)](https://www.deepmind.com/blog/population-based-training-of-neural-networks) and optimized for f1 score on a validation set of 1600 sentences. ### Preprocessing The preprocessing follows the [Distilbert base model](https://huggingface.co/distilbert-base-uncased). ### Pretraining The model was trained on a titan RTX for 1 hour. ## Evaluation results Results on the validation set: | F1 | |:----:| | 76.0 | ### BibTeX entry and citation info coming soon
null
Non_BioNLP
# Finetuned-Distilbert-needmining (uncased) This model is a finetuned version of the [Distilbert base model](https://huggingface.co/distilbert-base-uncased). It was trained to predict need-containing sentences from amazon product reviews. ## Model description This mode is part of ongoing research, after the publication of the research more information will be added. ## Intended uses & limitations You can use this model to identify sentences that contain customer needs in user-generated content. This can act as a filtering process to remove uninformative content for market research. ### How to use You can use this model directly with a pipeline for text classification: ```python >>> from transformers import pipeline >>> classifier = pipeline("text-classification", model="svenstahlmann/finetuned-distilbert-needmining") >>> classifier("the plasic feels super cheap.") [{'label': 'contains need', 'score': 0.9397542476654053}] ``` ### Limitations and bias We are not aware of any bias in the training data. ## Training data The training was done on a dataset of 6400 sentences. The sentences were taken from product reviews off amazon and coded if they express customer needs. ## Training procedure For the training, we used [Population Based Training (PBT)](https://www.deepmind.com/blog/population-based-training-of-neural-networks) and optimized for f1 score on a validation set of 1600 sentences. ### Preprocessing The preprocessing follows the [Distilbert base model](https://huggingface.co/distilbert-base-uncased). ### Pretraining The model was trained on a titan RTX for 1 hour. ## Evaluation results Results on the validation set: | F1 | |:----:| | 76.0 | ### BibTeX entry and citation info coming soon
{"language": "en", "license": "apache-2.0", "tags": ["distilbert", "needmining"], "metric": ["f1"]}
task
[ "TEXT_CLASSIFICATION" ]
41,476
csocsci/robeczech-base-binary-cs-iib
csocsci
feature-extraction
[ "transformers", "pytorch", "roberta", "feature-extraction", "cs", "license:mit", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-09-21T12:45:41Z
2024-10-15T08:20:24+00:00
9
0
--- language: - cs license: mit --- # Model Card for robeczech-base-binary-cs-iib <!-- Provide a quick summary of what the model is/does. --> This model is fine-tuned for binary text classification of Supportive Interactions in Instant Messenger dialogs of Adolescents in Czech. ### Model Description The model was fine-tuned on a Czech dataset of Instant Messenger dialogs of Adolescents. The classification is binary and the model outputs probablities for labels {0,1}: Supportive Interactions present or not. - **Developed by:** Anonymous - **Language(s):** cs - **Finetuned from:** ufal/robeczech-base ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/csocsci/supportive-interactions - **Paper:** Stay tuned! ## Usage Here is how to use this model to classify a context-window of a dialogue: ```python import numpy as np from transformers import AutoTokenizer, AutoModelForSequenceClassification # Prepare input texts. This model is pretrained and fine-tuned for Czech test_texts = ['Utterance1;Utterance2;Utterance3'] # Load the model and tokenizer model = AutoModelForSequenceClassification.from_pretrained( 'csocsci/robeczech-base-binary-cs-iib', num_labels=2).to("cuda") tokenizer = AutoTokenizer.from_pretrained( 'csocsci/robeczech-base-binary-cs-iib', use_fast=False, truncation_side='left') assert tokenizer.truncation_side == 'left' # Define helper functions def get_probs(text, tokenizer, model): inputs = tokenizer(text, padding=True, truncation=True, max_length=256, return_tensors="pt").to("cuda") outputs = model(**inputs) return outputs[0].softmax(1) def preds2class(probs, threshold=0.5): pclasses = np.zeros(probs.shape) pclasses[np.where(probs >= threshold)] = 1 return pclasses.argmax(-1) def print_predictions(texts): probabilities = [get_probs( texts[i], tokenizer, model).cpu().detach().numpy()[0] for i in range(len(texts))] predicted_classes = preds2class(np.array(probabilities)) for c, p in zip(predicted_classes, probabilities): print(f'{c}: {p}') # Run the prediction print_predictions(test_texts) ```
null
Non_BioNLP
# Model Card for robeczech-base-binary-cs-iib <!-- Provide a quick summary of what the model is/does. --> This model is fine-tuned for binary text classification of Supportive Interactions in Instant Messenger dialogs of Adolescents in Czech. ### Model Description The model was fine-tuned on a Czech dataset of Instant Messenger dialogs of Adolescents. The classification is binary and the model outputs probablities for labels {0,1}: Supportive Interactions present or not. - **Developed by:** Anonymous - **Language(s):** cs - **Finetuned from:** ufal/robeczech-base ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/csocsci/supportive-interactions - **Paper:** Stay tuned! ## Usage Here is how to use this model to classify a context-window of a dialogue: ```python import numpy as np from transformers import AutoTokenizer, AutoModelForSequenceClassification # Prepare input texts. This model is pretrained and fine-tuned for Czech test_texts = ['Utterance1;Utterance2;Utterance3'] # Load the model and tokenizer model = AutoModelForSequenceClassification.from_pretrained( 'csocsci/robeczech-base-binary-cs-iib', num_labels=2).to("cuda") tokenizer = AutoTokenizer.from_pretrained( 'csocsci/robeczech-base-binary-cs-iib', use_fast=False, truncation_side='left') assert tokenizer.truncation_side == 'left' # Define helper functions def get_probs(text, tokenizer, model): inputs = tokenizer(text, padding=True, truncation=True, max_length=256, return_tensors="pt").to("cuda") outputs = model(**inputs) return outputs[0].softmax(1) def preds2class(probs, threshold=0.5): pclasses = np.zeros(probs.shape) pclasses[np.where(probs >= threshold)] = 1 return pclasses.argmax(-1) def print_predictions(texts): probabilities = [get_probs( texts[i], tokenizer, model).cpu().detach().numpy()[0] for i in range(len(texts))] predicted_classes = preds2class(np.array(probabilities)) for c, p in zip(predicted_classes, probabilities): print(f'{c}: {p}') # Run the prediction print_predictions(test_texts) ```
{"language": ["cs"], "license": "mit"}
task
[ "TEXT_CLASSIFICATION" ]
41,477
Finnish-NLP/t5-small-nl16-finnish
Finnish-NLP
text2text-generation
[ "transformers", "pytorch", "jax", "tensorboard", "t5", "text2text-generation", "finnish", "t5x", "seq2seq", "fi", "dataset:Finnish-NLP/mc4_fi_cleaned", "dataset:wikipedia", "arxiv:1910.10683", "arxiv:2002.05202", "arxiv:2109.10686", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
2022-08-18T10:51:43Z
2022-10-02T15:55:05+00:00
12
0
--- datasets: - Finnish-NLP/mc4_fi_cleaned - wikipedia language: - fi license: apache-2.0 tags: - finnish - t5 - t5x - seq2seq inference: false --- # T5-small-nl16 for Finnish Pretrained T5 model on Finnish language using a span-based masked language modeling (MLM) objective. T5 was introduced in [this paper](https://arxiv.org/abs/1910.10683) and first released at [this page](https://github.com/google-research/text-to-text-transfer-transformer). **Note:** The Hugging Face inference widget is deactivated because this model needs a text-to-text fine-tuning on a specific downstream task to be useful in practice. As an example of a fine-tuned Finnish T5 model, you can check [Finnish-NLP/t5-small-nl24-casing-punctuation-correction](https://huggingface.co/Finnish-NLP/t5-small-nl24-casing-punctuation-correction) which has been fine-tuned to correct missing casing and punctuation for Finnish text. ## Model description T5 is an encoder-decoder model and treats all NLP problems in a text-to-text format. Finnish T5 is a transformers model pretrained on a very large corpus of Finnish data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and outputs from those texts. More precisely, it was pretrained with the span-based masked language modeling (MLM) objective. Spans of the input sequence are masked by so-called sentinel tokens (a.k.a unique mask tokens) and the output sequence is formed as a concatenation of the same sentinel tokens and the real masked tokens. This way, the model learns an inner representation of the Finnish language. This model used the [T5 v1.1](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) improvements compared to the original T5 model during the pretraining: - GEGLU activation in feed-forward hidden layer, rather than ReLU - see [here](https://arxiv.org/abs/2002.05202) - Dropout was turned off in pretraining (quality win). Dropout should be re-enabled during fine-tuning - Pretrained on span-based masked language modeling (MLM) objective only without mixing in the downstream tasks - No parameter sharing between embedding and classifier layer This model also used the "efficient" T5 architecture findings presented in [this paper](https://arxiv.org/abs/2109.10686). In a nutshell, the paper indicates that a Deep-Narrow model architecture is favorable for downstream performance compared to other model architectures of similar parameter count. To be more precise, model depth is defined as the number of transformer blocks that are stacked sequentially. This model uses the [t5-efficient-small-nl16](https://huggingface.co/google/t5-efficient-small-nl16) architecture's layer depth which means both the encoder and the decoder have 16 transformer layers compared to the original T5 "small" model's architecture of 6 transformer layers. In total, this model has 184 million parameters. ## Intended uses & limitations This model was only pretrained in a self-supervised way excluding any supervised training. Therefore, this model has to be fine-tuned before it is usable on a downstream task, like text classification, unlike the Google's original T5 model. **Note:** You most likely need to fine-tune these T5 models without mixed precision so fine-tune them with full fp32 precision. You can also find more fine-tuning tips from [here](https://discuss.huggingface.co/t/t5-finetuning-tips), for example. ### How to use Here is how to use this model in PyTorch: ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("Finnish-NLP/t5-small-nl16-finnish") model = T5ForConditionalGeneration.from_pretrained("Finnish-NLP/t5-small-nl16-finnish") ``` and in TensorFlow: ```python from transformers import T5Tokenizer, TFT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("Finnish-NLP/t5-small-nl16-finnish") model = T5ForConditionalGeneration.from_pretrained("Finnish-NLP/t5-small-nl16-finnish", from_pt=True) ``` ### Limitations and bias The training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral. Therefore, the model can have biased predictions. This bias will also affect all fine-tuned versions of this model. ## Training data This Finnish T5 model was pretrained on the combination of six datasets: - [mc4_fi_cleaned](https://huggingface.co/datasets/Finnish-NLP/mc4_fi_cleaned), the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset and further cleaned it with our own text data cleaning codes (check the dataset repo). - [wikipedia](https://huggingface.co/datasets/wikipedia) We used the Finnish subset of the wikipedia (August 2021) dataset - [Yle Finnish News Archive 2011-2018](http://urn.fi/urn:nbn:fi:lb-2017070501) - [Yle Finnish News Archive 2019-2020](http://urn.fi/urn:nbn:fi:lb-2021050401) - [Finnish News Agency Archive (STT)](http://urn.fi/urn:nbn:fi:lb-2018121001) - [The Suomi24 Sentences Corpus](http://urn.fi/urn:nbn:fi:lb-2020021803) Raw datasets were automatically cleaned to filter out bad quality and non-Finnish examples. Also, a [perplexity](https://huggingface.co/course/chapter7/3#perplexity-for-language-models) score was calculated for all texts with a KenLM model which was trained with very clean Finnish texts only. This perplexity score can then be used to determine how "clean" Finnish language the text contains. Lastly, all datasets were concatenated and the top 90% perplexity score was used as a filtering threshold to filter out the worst quality 10% of texts. Together these cleaned datasets were around 76GB of text. ## Training procedure ### Preprocessing The texts are tokenized using WordPiece and a vocabulary size of 32000. The inputs and the outputs are sequences of 512 consecutive tokens. Texts are not lower cased so this model is case-sensitive: it makes a difference between finnish and Finnish. ### Pretraining The model was trained on TPUv3-8 VM, sponsored by the [Google TPU Research Cloud](https://sites.research.google/trc/about/), for 500K steps with a batch size of 256 (in total 66B tokens). The optimizer used was a AdaFactor with learning rate warmup for 10K steps with a constant learning rate of 1e-2, and then an inverse square root decay (exponential decay) of the learning rate after. Training code was from the Google's Jax/Flax based [t5x framework](https://github.com/google-research/t5x) and also some t5x task definitions were adapted from [Per's t5x work](https://huggingface.co/pere). ## Evaluation results Evaluation was done by fine-tuning the model on a downstream text classification task with two different labeled Finnish datasets: [Yle News](https://github.com/spyysalo/yle-corpus) and [Eduskunta](https://github.com/aajanki/eduskunta-vkk). Classification fine-tuning was done with a sequence length of 128 tokens. When fine-tuned on those datasets, this model (the third row of the table) achieves the following accuracy results compared to our other T5 models and their parameter counts: | | Model parameters | Yle News accuracy | Eduskunta accuracy | |-------------------------------------------------------|------------------|---------------------|----------------------| |Finnish-NLP/t5-tiny-nl6-finnish | 31 million |92.80 |69.07 | |Finnish-NLP/t5-mini-nl8-finnish | 72 million |93.89 |71.43 | |Finnish-NLP/t5-small-nl16-finnish | 184 million |94.46 |74.00 | |Finnish-NLP/t5-small-nl24-finnish | 260 million |**94.68** |74.90 | |Finnish-NLP/byt5-base-finnish | 582 million |92.33 |73.13 | |Finnish-NLP/t5-base-nl36-finnish | 814 million |94.40 |**75.97** | |Finnish-NLP/t5-large-nl36-finnish | 1425 million |94.17 |73.50 | Fine-tuning Google's multilingual mT5 models on the same datasets we can clearly see that our monolingual Finnish T5 models achieve much better results on Finnish text classification: | | Model parameters | Yle News accuracy | Eduskunta accuracy | |-------------------------------------------------------|------------------|---------------------|----------------------| |google/mt5-small | 301 million |91.51 |64.10 | |google/mt5-base | 583 million |92.71 |68.40 | ## Acknowledgements This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/). ## Team Members - Aapo Tanskanen, [Hugging Face profile](https://huggingface.co/aapot), [LinkedIn profile](https://www.linkedin.com/in/aapotanskanen/) - Rasmus Toivanen, [Hugging Face profile](https://huggingface.co/RASMUS), [LinkedIn profile](https://www.linkedin.com/in/rasmustoivanen/) Feel free to contact us for more details 🤗
null
Non_BioNLP
# T5-small-nl16 for Finnish Pretrained T5 model on Finnish language using a span-based masked language modeling (MLM) objective. T5 was introduced in [this paper](https://arxiv.org/abs/1910.10683) and first released at [this page](https://github.com/google-research/text-to-text-transfer-transformer). **Note:** The Hugging Face inference widget is deactivated because this model needs a text-to-text fine-tuning on a specific downstream task to be useful in practice. As an example of a fine-tuned Finnish T5 model, you can check [Finnish-NLP/t5-small-nl24-casing-punctuation-correction](https://huggingface.co/Finnish-NLP/t5-small-nl24-casing-punctuation-correction) which has been fine-tuned to correct missing casing and punctuation for Finnish text. ## Model description T5 is an encoder-decoder model and treats all NLP problems in a text-to-text format. Finnish T5 is a transformers model pretrained on a very large corpus of Finnish data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and outputs from those texts. More precisely, it was pretrained with the span-based masked language modeling (MLM) objective. Spans of the input sequence are masked by so-called sentinel tokens (a.k.a unique mask tokens) and the output sequence is formed as a concatenation of the same sentinel tokens and the real masked tokens. This way, the model learns an inner representation of the Finnish language. This model used the [T5 v1.1](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) improvements compared to the original T5 model during the pretraining: - GEGLU activation in feed-forward hidden layer, rather than ReLU - see [here](https://arxiv.org/abs/2002.05202) - Dropout was turned off in pretraining (quality win). Dropout should be re-enabled during fine-tuning - Pretrained on span-based masked language modeling (MLM) objective only without mixing in the downstream tasks - No parameter sharing between embedding and classifier layer This model also used the "efficient" T5 architecture findings presented in [this paper](https://arxiv.org/abs/2109.10686). In a nutshell, the paper indicates that a Deep-Narrow model architecture is favorable for downstream performance compared to other model architectures of similar parameter count. To be more precise, model depth is defined as the number of transformer blocks that are stacked sequentially. This model uses the [t5-efficient-small-nl16](https://huggingface.co/google/t5-efficient-small-nl16) architecture's layer depth which means both the encoder and the decoder have 16 transformer layers compared to the original T5 "small" model's architecture of 6 transformer layers. In total, this model has 184 million parameters. ## Intended uses & limitations This model was only pretrained in a self-supervised way excluding any supervised training. Therefore, this model has to be fine-tuned before it is usable on a downstream task, like text classification, unlike the Google's original T5 model. **Note:** You most likely need to fine-tune these T5 models without mixed precision so fine-tune them with full fp32 precision. You can also find more fine-tuning tips from [here](https://discuss.huggingface.co/t/t5-finetuning-tips), for example. ### How to use Here is how to use this model in PyTorch: ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("Finnish-NLP/t5-small-nl16-finnish") model = T5ForConditionalGeneration.from_pretrained("Finnish-NLP/t5-small-nl16-finnish") ``` and in TensorFlow: ```python from transformers import T5Tokenizer, TFT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("Finnish-NLP/t5-small-nl16-finnish") model = T5ForConditionalGeneration.from_pretrained("Finnish-NLP/t5-small-nl16-finnish", from_pt=True) ``` ### Limitations and bias The training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral. Therefore, the model can have biased predictions. This bias will also affect all fine-tuned versions of this model. ## Training data This Finnish T5 model was pretrained on the combination of six datasets: - [mc4_fi_cleaned](https://huggingface.co/datasets/Finnish-NLP/mc4_fi_cleaned), the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset and further cleaned it with our own text data cleaning codes (check the dataset repo). - [wikipedia](https://huggingface.co/datasets/wikipedia) We used the Finnish subset of the wikipedia (August 2021) dataset - [Yle Finnish News Archive 2011-2018](http://urn.fi/urn:nbn:fi:lb-2017070501) - [Yle Finnish News Archive 2019-2020](http://urn.fi/urn:nbn:fi:lb-2021050401) - [Finnish News Agency Archive (STT)](http://urn.fi/urn:nbn:fi:lb-2018121001) - [The Suomi24 Sentences Corpus](http://urn.fi/urn:nbn:fi:lb-2020021803) Raw datasets were automatically cleaned to filter out bad quality and non-Finnish examples. Also, a [perplexity](https://huggingface.co/course/chapter7/3#perplexity-for-language-models) score was calculated for all texts with a KenLM model which was trained with very clean Finnish texts only. This perplexity score can then be used to determine how "clean" Finnish language the text contains. Lastly, all datasets were concatenated and the top 90% perplexity score was used as a filtering threshold to filter out the worst quality 10% of texts. Together these cleaned datasets were around 76GB of text. ## Training procedure ### Preprocessing The texts are tokenized using WordPiece and a vocabulary size of 32000. The inputs and the outputs are sequences of 512 consecutive tokens. Texts are not lower cased so this model is case-sensitive: it makes a difference between finnish and Finnish. ### Pretraining The model was trained on TPUv3-8 VM, sponsored by the [Google TPU Research Cloud](https://sites.research.google/trc/about/), for 500K steps with a batch size of 256 (in total 66B tokens). The optimizer used was a AdaFactor with learning rate warmup for 10K steps with a constant learning rate of 1e-2, and then an inverse square root decay (exponential decay) of the learning rate after. Training code was from the Google's Jax/Flax based [t5x framework](https://github.com/google-research/t5x) and also some t5x task definitions were adapted from [Per's t5x work](https://huggingface.co/pere). ## Evaluation results Evaluation was done by fine-tuning the model on a downstream text classification task with two different labeled Finnish datasets: [Yle News](https://github.com/spyysalo/yle-corpus) and [Eduskunta](https://github.com/aajanki/eduskunta-vkk). Classification fine-tuning was done with a sequence length of 128 tokens. When fine-tuned on those datasets, this model (the third row of the table) achieves the following accuracy results compared to our other T5 models and their parameter counts: | | Model parameters | Yle News accuracy | Eduskunta accuracy | |-------------------------------------------------------|------------------|---------------------|----------------------| |Finnish-NLP/t5-tiny-nl6-finnish | 31 million |92.80 |69.07 | |Finnish-NLP/t5-mini-nl8-finnish | 72 million |93.89 |71.43 | |Finnish-NLP/t5-small-nl16-finnish | 184 million |94.46 |74.00 | |Finnish-NLP/t5-small-nl24-finnish | 260 million |**94.68** |74.90 | |Finnish-NLP/byt5-base-finnish | 582 million |92.33 |73.13 | |Finnish-NLP/t5-base-nl36-finnish | 814 million |94.40 |**75.97** | |Finnish-NLP/t5-large-nl36-finnish | 1425 million |94.17 |73.50 | Fine-tuning Google's multilingual mT5 models on the same datasets we can clearly see that our monolingual Finnish T5 models achieve much better results on Finnish text classification: | | Model parameters | Yle News accuracy | Eduskunta accuracy | |-------------------------------------------------------|------------------|---------------------|----------------------| |google/mt5-small | 301 million |91.51 |64.10 | |google/mt5-base | 583 million |92.71 |68.40 | ## Acknowledgements This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/). ## Team Members - Aapo Tanskanen, [Hugging Face profile](https://huggingface.co/aapot), [LinkedIn profile](https://www.linkedin.com/in/aapotanskanen/) - Rasmus Toivanen, [Hugging Face profile](https://huggingface.co/RASMUS), [LinkedIn profile](https://www.linkedin.com/in/rasmustoivanen/) Feel free to contact us for more details 🤗
{"datasets": ["Finnish-NLP/mc4_fi_cleaned", "wikipedia"], "language": ["fi"], "license": "apache-2.0", "tags": ["finnish", "t5", "t5x", "seq2seq"], "inference": false}
task
[ "TEXT_CLASSIFICATION" ]
41,478
laxsvips/minilm-finetuned-emotion
laxsvips
text-classification
[ "transformers", "tf", "bert", "text-classification", "generated_from_keras_callback", "en", "dataset:emotion", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-02-14T23:37:32Z
2023-02-15T19:08:29+00:00
27
0
--- datasets: - emotion language: - en license: mit metrics: - f1 tags: - generated_from_keras_callback model-index: - name: laxsvips/minilm-finetuned-emotion results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # laxsvips/minilm-finetuned-emotion This model is a fine-tuned version of [microsoft/MiniLM-L12-H384-uncased](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased) on the Hugging Face emotion (https://huggingface.co/datasets/emotion) dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1168 - Train Accuracy: 0.9446 - Validation Loss: 0.1709 - Validation Accuracy: 0.9350 - Epoch: 4 ## Model description # MiniLM: Small and Fast Pre-trained Models for Language Understanding and Generation MiniLM is a distilled model from the paper "MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers". ## Intended uses & limitations This model has been created as a learning guide on: - How to implement a text classification model using Hugging Face Transformers in TensorFlow - How to handle imbalanced class distribution # How to use the model ``` from transformers import pipeline model_cpt = "laxsvips/minilm-finetuned-emotion" pipe = pipeline("text-classification", model=model_cpt) predicted_scores = pipe("I am so glad you could help me") print(predicted_scores) ```` The results: ``` [[{'label': 'sadness', 'score': 0.003758953418582678}, {'label': 'joy', 'score': 0.9874302744865417}, {'label': 'love', 'score': 0.00610917154699564}, {'label': 'anger', 'score': 9.696640336187556e-05}, {'label': 'fear', 'score': 0.0006420552381314337}, {'label': 'surprise', 'score': 0.00196251692250371}]] ``` ## Training and evaluation data [Emotion](https://huggingface.co/datasets/emotion) Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. ## Training procedure Refer to the [Colab](https://colab.research.google.com/github/laxmiharikumar/transformers/blob/main/TextClassification_Emotions_TF.ipynb) notebook ### Training hyperparameters The following hyperparameters were used during training: - optimizer: 'Adam', - learning_rate': 5e-05, - batch_size : 64 - num_epochs - 5 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.9485 | 0.5543 | 0.8404 | 0.6870 | 0 | | 0.4192 | 0.8347 | 0.3450 | 0.9040 | 1 | | 0.2132 | 0.9178 | 0.2288 | 0.9240 | 2 | | 0.1465 | 0.9364 | 0.1838 | 0.9295 | 3 | | 0.1168 | 0.9446 | 0.1709 | 0.9350 | 4 | ### Evaluation Metrics ``` {'accuracy': 0.935, 'precision': 0.937365614416424, 'recall': 0.935, 'f1_score': 0.9355424419858925} ``` ### Framework versions - Transformers 4.26.1 - TensorFlow 2.11.0 - Datasets 2.9.0 - Tokenizers 0.13.2 ### References 1. https://www.youtube.com/watch?v=u--UVvH-LIQ 2. https://huggingface.co/docs/transformers 3. https://www.tensorflow.org/api_docs/python/tf
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # laxsvips/minilm-finetuned-emotion This model is a fine-tuned version of [microsoft/MiniLM-L12-H384-uncased](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased) on the Hugging Face emotion (https://huggingface.co/datasets/emotion) dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1168 - Train Accuracy: 0.9446 - Validation Loss: 0.1709 - Validation Accuracy: 0.9350 - Epoch: 4 ## Model description # MiniLM: Small and Fast Pre-trained Models for Language Understanding and Generation MiniLM is a distilled model from the paper "MiniLM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers". ## Intended uses & limitations This model has been created as a learning guide on: - How to implement a text classification model using Hugging Face Transformers in TensorFlow - How to handle imbalanced class distribution # How to use the model ``` from transformers import pipeline model_cpt = "laxsvips/minilm-finetuned-emotion" pipe = pipeline("text-classification", model=model_cpt) predicted_scores = pipe("I am so glad you could help me") print(predicted_scores) ```` The results: ``` [[{'label': 'sadness', 'score': 0.003758953418582678}, {'label': 'joy', 'score': 0.9874302744865417}, {'label': 'love', 'score': 0.00610917154699564}, {'label': 'anger', 'score': 9.696640336187556e-05}, {'label': 'fear', 'score': 0.0006420552381314337}, {'label': 'surprise', 'score': 0.00196251692250371}]] ``` ## Training and evaluation data [Emotion](https://huggingface.co/datasets/emotion) Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. ## Training procedure Refer to the [Colab](https://colab.research.google.com/github/laxmiharikumar/transformers/blob/main/TextClassification_Emotions_TF.ipynb) notebook ### Training hyperparameters The following hyperparameters were used during training: - optimizer: 'Adam', - learning_rate': 5e-05, - batch_size : 64 - num_epochs - 5 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.9485 | 0.5543 | 0.8404 | 0.6870 | 0 | | 0.4192 | 0.8347 | 0.3450 | 0.9040 | 1 | | 0.2132 | 0.9178 | 0.2288 | 0.9240 | 2 | | 0.1465 | 0.9364 | 0.1838 | 0.9295 | 3 | | 0.1168 | 0.9446 | 0.1709 | 0.9350 | 4 | ### Evaluation Metrics ``` {'accuracy': 0.935, 'precision': 0.937365614416424, 'recall': 0.935, 'f1_score': 0.9355424419858925} ``` ### Framework versions - Transformers 4.26.1 - TensorFlow 2.11.0 - Datasets 2.9.0 - Tokenizers 0.13.2 ### References 1. https://www.youtube.com/watch?v=u--UVvH-LIQ 2. https://huggingface.co/docs/transformers 3. https://www.tensorflow.org/api_docs/python/tf
{"datasets": ["emotion"], "language": ["en"], "license": "mit", "metrics": ["f1"], "tags": ["generated_from_keras_callback"], "model-index": [{"name": "laxsvips/minilm-finetuned-emotion", "results": []}]}
task
[ "TEXT_CLASSIFICATION" ]
41,479
mini1013/master_cate_fi3
mini1013
text-classification
[ "setfit", "safetensors", "roberta", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:mini1013/master_domain", "base_model:finetune:mini1013/master_domain", "model-index", "region:us" ]
2025-01-21T19:15:20Z
2025-01-21T19:15:42+00:00
63
0
--- base_model: mini1013/master_domain library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: 45T PVC 원톤파티션 사무실파티션 책상 칸막이 패브릭 천파티션 가림막 W600 H1000 가구/인테리어>서재/사무용가구>사무/교구용가구>파티션 - text: GOYA 고야 크맘 곰 자작나무 책상 파티션 600 학교 칸막이 가구/인테리어>서재/사무용가구>사무/교구용가구>파티션 - text: 와이디 로아 모던 책상 미드센츄리 테이블 800 가구/인테리어>서재/사무용가구>책상>일자형 책상 - text: 컴퓨터 의자 가정용 앉은 기숙사 대학생 소파 사무실 거짓말 가구/인테리어>서재/사무용가구>의자>하이팩의자 - text: 한샘 레그핏 쿠션형 책상 발받침대 의자발받침 다리받침대 가구/인테리어>서재/사무용가구>의자>의자발받침대 inference: true model-index: - name: SetFit with mini1013/master_domain results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 1.0 name: Accuracy --- # SetFit with mini1013/master_domain This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [mini1013/master_domain](https://huggingface.co/mini1013/master_domain) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [mini1013/master_domain](https://huggingface.co/mini1013/master_domain) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 5 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | 4.0 | <ul><li>'스코나 밀러튼 LPM 1400 멀티 교구장 책장 가구/인테리어>서재/사무용가구>책장'</li><li>'이케아 BILLY 빌리 3단 책장 40cm 가구/인테리어>서재/사무용가구>책장'</li><li>'에보니아 로엠 600 3단 하부 도어 책장 가구/인테리어>서재/사무용가구>책장'</li></ul> | | 2.0 | <ul><li>'선반 철제 책꽂이 수납 타공판 책상위정리 책장 세트-후크 3 흰색 단층 홀 보드 가구/인테리어>서재/사무용가구>책꽂이'</li><li>'델리 2단 서랍 겸 책꽂이 데스크 손잡이 오거나이저 가구/인테리어>서재/사무용가구>책꽂이'</li><li>'북케이스 책장 수납 선반 북 보관 책꽂이 가구/인테리어>서재/사무용가구>책꽂이'</li></ul> | | 3.0 | <ul><li>'209애비뉴 제로데스크 에보 멀티 컴퓨터책상 1600x800 가구/인테리어>서재/사무용가구>책상>컴퓨터책상'</li><li>'한샘 티오 일자책상세트 5단 120x60cm 콘센트형 조명 가구/인테리어>서재/사무용가구>책상>일자형 책상'</li><li>'아씨방 마일드 모션데스크 120cm 가구/인테리어>서재/사무용가구>책상>스탠딩책상'</li></ul> | | 0.0 | <ul><li>'하이솔로몬 강의대 LS13 가구/인테리어>서재/사무용가구>사무/교구용가구>사무용책상'</li><li>'사무실쇼파 제논 2인용 소파 가구/인테리어>서재/사무용가구>사무/교구용가구>사무용소파'</li><li>'스테인리스 서랍장 캐비닛 미용실 매장용 사물함 스텐 가구/인테리어>서재/사무용가구>사무/교구용가구>캐비닛'</li></ul> | | 1.0 | <ul><li>'접이식 썬베드 간이 낮잠 의자 휴대용 리클라이너 경량 가구/인테리어>서재/사무용가구>의자>안락의자'</li><li>'체스좌식의자 엠보싱 가구/인테리어>서재/사무용가구>의자>좌식의자'</li><li>'나른인 쇼파 손잡이가 달린 침대 위 나부끼창 커밋의자 껴안다 건산수유 의자와 다다미 좌석 가구/인테리어>서재/사무용가구>의자>하이팩의자'</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 1.0 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("mini1013/master_cate_fi3") # Run inference preds = model("와이디 로아 모던 책상 미드센츄리 테이블 800 가구/인테리어>서재/사무용가구>책상>일자형 책상") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 2 | 8.5543 | 22 | | Label | Training Sample Count | |:------|:----------------------| | 0.0 | 70 | | 1.0 | 70 | | 2.0 | 70 | | 3.0 | 70 | | 4.0 | 70 | ### Training Hyperparameters - batch_size: (256, 256) - num_epochs: (30, 30) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 50 - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:-------:|:----:|:-------------:|:---------------:| | 0.0145 | 1 | 0.4825 | - | | 0.7246 | 50 | 0.4985 | - | | 1.4493 | 100 | 0.4783 | - | | 2.1739 | 150 | 0.1925 | - | | 2.8986 | 200 | 0.0024 | - | | 3.6232 | 250 | 0.0001 | - | | 4.3478 | 300 | 0.0001 | - | | 5.0725 | 350 | 0.0001 | - | | 5.7971 | 400 | 0.0 | - | | 6.5217 | 450 | 0.0 | - | | 7.2464 | 500 | 0.0 | - | | 7.9710 | 550 | 0.0 | - | | 8.6957 | 600 | 0.0 | - | | 9.4203 | 650 | 0.0 | - | | 10.1449 | 700 | 0.0 | - | | 10.8696 | 750 | 0.0 | - | | 11.5942 | 800 | 0.0 | - | | 12.3188 | 850 | 0.0 | - | | 13.0435 | 900 | 0.0 | - | | 13.7681 | 950 | 0.0 | - | | 14.4928 | 1000 | 0.0 | - | | 15.2174 | 1050 | 0.0 | - | | 15.9420 | 1100 | 0.0 | - | | 16.6667 | 1150 | 0.0 | - | | 17.3913 | 1200 | 0.0 | - | | 18.1159 | 1250 | 0.0 | - | | 18.8406 | 1300 | 0.0 | - | | 19.5652 | 1350 | 0.0 | - | | 20.2899 | 1400 | 0.0 | - | | 21.0145 | 1450 | 0.0 | - | | 21.7391 | 1500 | 0.0 | - | | 22.4638 | 1550 | 0.0 | - | | 23.1884 | 1600 | 0.0 | - | | 23.9130 | 1650 | 0.0 | - | | 24.6377 | 1700 | 0.0 | - | | 25.3623 | 1750 | 0.0 | - | | 26.0870 | 1800 | 0.0 | - | | 26.8116 | 1850 | 0.0 | - | | 27.5362 | 1900 | 0.0 | - | | 28.2609 | 1950 | 0.0 | - | | 28.9855 | 2000 | 0.0 | - | | 29.7101 | 2050 | 0.0 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.1.0 - Sentence Transformers: 3.3.1 - Transformers: 4.44.2 - PyTorch: 2.2.0a0+81ea7a4 - Datasets: 3.2.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with mini1013/master_domain This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [mini1013/master_domain](https://huggingface.co/mini1013/master_domain) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [mini1013/master_domain](https://huggingface.co/mini1013/master_domain) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 5 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | 4.0 | <ul><li>'스코나 밀러튼 LPM 1400 멀티 교구장 책장 가구/인테리어>서재/사무용가구>책장'</li><li>'이케아 BILLY 빌리 3단 책장 40cm 가구/인테리어>서재/사무용가구>책장'</li><li>'에보니아 로엠 600 3단 하부 도어 책장 가구/인테리어>서재/사무용가구>책장'</li></ul> | | 2.0 | <ul><li>'선반 철제 책꽂이 수납 타공판 책상위정리 책장 세트-후크 3 흰색 단층 홀 보드 가구/인테리어>서재/사무용가구>책꽂이'</li><li>'델리 2단 서랍 겸 책꽂이 데스크 손잡이 오거나이저 가구/인테리어>서재/사무용가구>책꽂이'</li><li>'북케이스 책장 수납 선반 북 보관 책꽂이 가구/인테리어>서재/사무용가구>책꽂이'</li></ul> | | 3.0 | <ul><li>'209애비뉴 제로데스크 에보 멀티 컴퓨터책상 1600x800 가구/인테리어>서재/사무용가구>책상>컴퓨터책상'</li><li>'한샘 티오 일자책상세트 5단 120x60cm 콘센트형 조명 가구/인테리어>서재/사무용가구>책상>일자형 책상'</li><li>'아씨방 마일드 모션데스크 120cm 가구/인테리어>서재/사무용가구>책상>스탠딩책상'</li></ul> | | 0.0 | <ul><li>'하이솔로몬 강의대 LS13 가구/인테리어>서재/사무용가구>사무/교구용가구>사무용책상'</li><li>'사무실쇼파 제논 2인용 소파 가구/인테리어>서재/사무용가구>사무/교구용가구>사무용소파'</li><li>'스테인리스 서랍장 캐비닛 미용실 매장용 사물함 스텐 가구/인테리어>서재/사무용가구>사무/교구용가구>캐비닛'</li></ul> | | 1.0 | <ul><li>'접이식 썬베드 간이 낮잠 의자 휴대용 리클라이너 경량 가구/인테리어>서재/사무용가구>의자>안락의자'</li><li>'체스좌식의자 엠보싱 가구/인테리어>서재/사무용가구>의자>좌식의자'</li><li>'나른인 쇼파 손잡이가 달린 침대 위 나부끼창 커밋의자 껴안다 건산수유 의자와 다다미 좌석 가구/인테리어>서재/사무용가구>의자>하이팩의자'</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 1.0 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("mini1013/master_cate_fi3") # Run inference preds = model("와이디 로아 모던 책상 미드센츄리 테이블 800 가구/인테리어>서재/사무용가구>책상>일자형 책상") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:-------|:----| | Word count | 2 | 8.5543 | 22 | | Label | Training Sample Count | |:------|:----------------------| | 0.0 | 70 | | 1.0 | 70 | | 2.0 | 70 | | 3.0 | 70 | | 4.0 | 70 | ### Training Hyperparameters - batch_size: (256, 256) - num_epochs: (30, 30) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 50 - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:-------:|:----:|:-------------:|:---------------:| | 0.0145 | 1 | 0.4825 | - | | 0.7246 | 50 | 0.4985 | - | | 1.4493 | 100 | 0.4783 | - | | 2.1739 | 150 | 0.1925 | - | | 2.8986 | 200 | 0.0024 | - | | 3.6232 | 250 | 0.0001 | - | | 4.3478 | 300 | 0.0001 | - | | 5.0725 | 350 | 0.0001 | - | | 5.7971 | 400 | 0.0 | - | | 6.5217 | 450 | 0.0 | - | | 7.2464 | 500 | 0.0 | - | | 7.9710 | 550 | 0.0 | - | | 8.6957 | 600 | 0.0 | - | | 9.4203 | 650 | 0.0 | - | | 10.1449 | 700 | 0.0 | - | | 10.8696 | 750 | 0.0 | - | | 11.5942 | 800 | 0.0 | - | | 12.3188 | 850 | 0.0 | - | | 13.0435 | 900 | 0.0 | - | | 13.7681 | 950 | 0.0 | - | | 14.4928 | 1000 | 0.0 | - | | 15.2174 | 1050 | 0.0 | - | | 15.9420 | 1100 | 0.0 | - | | 16.6667 | 1150 | 0.0 | - | | 17.3913 | 1200 | 0.0 | - | | 18.1159 | 1250 | 0.0 | - | | 18.8406 | 1300 | 0.0 | - | | 19.5652 | 1350 | 0.0 | - | | 20.2899 | 1400 | 0.0 | - | | 21.0145 | 1450 | 0.0 | - | | 21.7391 | 1500 | 0.0 | - | | 22.4638 | 1550 | 0.0 | - | | 23.1884 | 1600 | 0.0 | - | | 23.9130 | 1650 | 0.0 | - | | 24.6377 | 1700 | 0.0 | - | | 25.3623 | 1750 | 0.0 | - | | 26.0870 | 1800 | 0.0 | - | | 26.8116 | 1850 | 0.0 | - | | 27.5362 | 1900 | 0.0 | - | | 28.2609 | 1950 | 0.0 | - | | 28.9855 | 2000 | 0.0 | - | | 29.7101 | 2050 | 0.0 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.1.0 - Sentence Transformers: 3.3.1 - Transformers: 4.44.2 - PyTorch: 2.2.0a0+81ea7a4 - Datasets: 3.2.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "mini1013/master_domain", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "45T PVC 원톤파티션 사무실파티션 책상 칸막이 패브릭 천파티션 가림막 W600 H1000 가구/인테리어>서재/사무용가구>사무/교구용가구>파티션"}, {"text": "GOYA 고야 크맘 곰 자작나무 책상 파티션 600 학교 칸막이 가구/인테리어>서재/사무용가구>사무/교구용가구>파티션"}, {"text": "와이디 로아 모던 책상 미드센츄리 테이블 800 가구/인테리어>서재/사무용가구>책상>일자형 책상"}, {"text": "컴퓨터 의자 가정용 앉은 기숙사 대학생 소파 사무실 거짓말 가구/인테리어>서재/사무용가구>의자>하이팩의자"}, {"text": "한샘 레그핏 쿠션형 책상 발받침대 의자발받침 다리받침대 가구/인테리어>서재/사무용가구>의자>의자발받침대"}], "inference": true, "model-index": [{"name": "SetFit with mini1013/master_domain", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 1.0, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,480
jkminder/SwissDomainBERT2
jkminder
sentence-similarity
[ "sentence-transformers", "safetensors", "xmod", "sentence-similarity", "feature-extraction", "democrasci", "swiss part", "base_model:ZurichNLP/swissbert", "base_model:finetune:ZurichNLP/swissbert", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-12-29T10:48:13Z
2024-12-29T11:05:04+00:00
10
0
--- base_model: - ZurichNLP/swissbert library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - democrasci - swiss part --- This is a SwissBERT model, finetuned on Topic Domain prediction of texts (speeches & billtexts) of the swiss parliament. It is recommended to use it with EmbeddingModel from democrasci_embedding_hub to properly deal with the language setting of the XMOD models. # SentenceTransformer This is a [sentence-transformers](https://www.SBERT.net) model trained. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer <!-- - **Base model:** [Unknown](https://huggingface.co/unknown) --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XmodModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("jkminder/SwissDomainBERT2") # Run inference sentences = [ 'The weather is lovely today.', "It's so sunny outside!", 'He drove to the stadium.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Framework Versions - Python: 3.11.8 - Sentence Transformers: 3.3.1 - Transformers: 4.47.1 - PyTorch: 2.2.1 - Accelerate: 1.2.1 - Datasets: 2.18.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
This is a SwissBERT model, finetuned on Topic Domain prediction of texts (speeches & billtexts) of the swiss parliament. It is recommended to use it with EmbeddingModel from democrasci_embedding_hub to properly deal with the language setting of the XMOD models. # SentenceTransformer This is a [sentence-transformers](https://www.SBERT.net) model trained. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer <!-- - **Base model:** [Unknown](https://huggingface.co/unknown) --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XmodModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("jkminder/SwissDomainBERT2") # Run inference sentences = [ 'The weather is lovely today.', "It's so sunny outside!", 'He drove to the stadium.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Framework Versions - Python: 3.11.8 - Sentence Transformers: 3.3.1 - Transformers: 4.47.1 - PyTorch: 2.2.1 - Accelerate: 1.2.1 - Datasets: 2.18.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": ["ZurichNLP/swissbert"], "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "democrasci", "swiss part"]}
task
[ "TEXT_CLASSIFICATION" ]
41,481
saifamdouni/TunCHAT-V0.2
saifamdouni
text-generation
[ "transformers", "safetensors", "gemma2", "text-generation", "conversational", "ar", "base_model:unsloth/gemma-2-2b-it", "base_model:finetune:unsloth/gemma-2-2b-it", "license:gemma", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2025-01-03T00:10:38Z
2025-01-07T09:53:02+00:00
502
0
--- base_model: - unsloth/gemma-2-2b-it language: - ar library_name: transformers license: gemma --- **Model Card: TunChat-V0.2** **Model Overview:** - **Model Name:** TunChat-V0.1 - **Model Size:** 2B parameters - **Instruction-Tuned:** Yes - **Language:** Tunisian Dialect - **Use Case Focus:** Conversational exchanges, translation, summarization, content generation, and cultural research. **Model Description:** TunChat-V0.1 is a 2-billion parameter language model specifically instruction-tuned for the Tunisian dialect. It is designed to handle tasks such as conversational exchanges, informal text summarization, and culturally-aware content generation. The model is optimized to understand and generate text in Tunisian Dialect, enabling enhanced performance for applications targeting Tunisian users. **Intended Use:** - Conversational agents and chatbots operating in Tunisian Dialect. - Translation, summarization, and content generation in informal Tunisian dialect. - Supporting cultural research related to Tunisian language and heritage. **How to Use:** ```python import torch from transformers import pipeline pipe = pipeline( "text-generation", model="saifamdouni/TunChat-V0.2", model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda" # replace with "mps" to run on a Mac device ) messages = [ {"role": "user", "content": 'شكون صنعك'}, ] outputs = pipe(messages, max_new_tokens=2048, do_sample=True, top_p=0.95, temperature=0.7, top_k=50) assistant_response = outputs[0]["generated_text"][-1]["content"].strip() print(assistant_response) ``` >صنعوني جماعة من المهندسين والمطورين التوانسة. يحبوا يطوّروا الذكاء الاصطناعي في تونس و يسهلوا استخدامه باللهجة متاعنا. **Quantized Versions:** - GGUF quantized versions will be released later. **Training Dataset:** - Tun-SFT dataset (to be released later): - A mix between organically collected and synthetically generated data **Limitations and Ethical Considerations:** - The model may occasionally produce incorrect or biased responses. - The model may occasionally produce culturally inappropriate responses. - It may not perform optimally on formal Tunisian Arabic texts. **Future Plans:** - Release of GGUF quantized versions. - Open-source availability of the Tun-SFT dataset. **Author:** Saif Eddine Amdouni
null
Non_BioNLP
**Model Card: TunChat-V0.2** **Model Overview:** - **Model Name:** TunChat-V0.1 - **Model Size:** 2B parameters - **Instruction-Tuned:** Yes - **Language:** Tunisian Dialect - **Use Case Focus:** Conversational exchanges, translation, summarization, content generation, and cultural research. **Model Description:** TunChat-V0.1 is a 2-billion parameter language model specifically instruction-tuned for the Tunisian dialect. It is designed to handle tasks such as conversational exchanges, informal text summarization, and culturally-aware content generation. The model is optimized to understand and generate text in Tunisian Dialect, enabling enhanced performance for applications targeting Tunisian users. **Intended Use:** - Conversational agents and chatbots operating in Tunisian Dialect. - Translation, summarization, and content generation in informal Tunisian dialect. - Supporting cultural research related to Tunisian language and heritage. **How to Use:** ```python import torch from transformers import pipeline pipe = pipeline( "text-generation", model="saifamdouni/TunChat-V0.2", model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda" # replace with "mps" to run on a Mac device ) messages = [ {"role": "user", "content": 'شكون صنعك'}, ] outputs = pipe(messages, max_new_tokens=2048, do_sample=True, top_p=0.95, temperature=0.7, top_k=50) assistant_response = outputs[0]["generated_text"][-1]["content"].strip() print(assistant_response) ``` >صنعوني جماعة من المهندسين والمطورين التوانسة. يحبوا يطوّروا الذكاء الاصطناعي في تونس و يسهلوا استخدامه باللهجة متاعنا. **Quantized Versions:** - GGUF quantized versions will be released later. **Training Dataset:** - Tun-SFT dataset (to be released later): - A mix between organically collected and synthetically generated data **Limitations and Ethical Considerations:** - The model may occasionally produce incorrect or biased responses. - The model may occasionally produce culturally inappropriate responses. - It may not perform optimally on formal Tunisian Arabic texts. **Future Plans:** - Release of GGUF quantized versions. - Open-source availability of the Tun-SFT dataset. **Author:** Saif Eddine Amdouni
{"base_model": ["unsloth/gemma-2-2b-it"], "language": ["ar"], "library_name": "transformers", "license": "gemma"}
task
[ "TRANSLATION", "SUMMARIZATION" ]
41,482
THU-KEG/Mistral-Crab-DPO
THU-KEG
text-generation
[ "safetensors", "mistral", "alignment-handbook", "generated_from_trainer", "text-generation", "conversational", "en", "arxiv:2410.24175", "license:apache-2.0", "region:us" ]
2024-11-01T07:03:34Z
2024-11-01T11:48:52+00:00
8
4
--- language: - en license: apache-2.0 metrics: - accuracy pipeline_tag: text-generation tags: - alignment-handbook - generated_from_trainer --- # Model Card for Mistral-Crab-DPO <!-- Provide a quick summary of what the model is/does. --> <p align="justify"> Large language models (LLMs) struggle to follow instructions with complex constraints in format, length, etc. Following the conventional instruction-tuning practice, previous works conduct post-training on complex instruction-response pairs generated by feeding complex instructions to advanced LLMs. However, even advanced LLMs cannot follow complex instructions well, thus limiting the quality of generated data. In this work, we find that <b><i>existing datasets inherently contain implicit complex constraints</i></b> and propose a novel data generation technique, <b><i>constraint back-translation</i></b>. Specifically, we take the high-quality instruction-response pairs in existing datasets and only adopt advanced LLMs to add complex constraints already met by the responses to the instructions, which naturally reduces costs and data noise. In the experiments, we adopt Llama3-70B-Instruct to back-translate constraints and create a high-quality complex instruction-response dataset, named <b>CRAB</b>. We present that post-training on <font face="Verdana">CRAB</font> improves multiple backbone LLMs' complex instruction-following ability, evaluated on extensive instruction-following benchmarks. We further find that constraint back-translation also serves as a useful auxiliary training objective in post-training. - 📖 Paper: [Constraint Back-translation Improves Complex Instruction Following of Large Language Models](https://arxiv.org/abs/2410.24175) </p> - 🦀 Github: [THU/Crab](https://github.com/THU-KEG/Crab) ### Model Performance | Models | BaseModel | IFEval | FollowBench(HSR) | | | AVG | |--------------------|-----------|--------|------------------|-------|------|------| | | | AVG | L1-L2 | L3-L5 | AVG | | | GPT-3.5-turbo | GPT | 66.3 | 74.2 | 61 | 66.2 | 66.3 | | GPT-4 | GPT | 81.3 | 80.4 | 69.4 | 73.8 | 77.6 | | Vicuna-13b-V1.5 | Llama2 | 50.3 | 66.3 | 39.8 | 50.4 | 50.4 | | WizardLM-13B-V1.2 | Llama2 | 51.4 | 56.5 | 36.9 | 44.7 | 48 | | Conifer-13B | Llama2 | 50.2 | 57.1 | 40.3 | 47 | 48.6 | | Zephyr-7B-beta | Mistral | 45.4 | 54.8 | 38.2 | 44.8 | 45.1 | | Conifer-7B | Mistral | 53.9 | 51.9 | 40.2 | 44.9 | 49.4 | | Conifer-7B-DPO | Mistral | 55.7 | 57 | 45.4 | 50 | 52.9 | | Llama3 8B | Llama3 | 31.4 | 6.8 | 8.2 | 7.6 | 19.5 | | Llama3-crab | Llama3 | 46.9 | 51.2 | 26.7 | 36.5 | 41.7 | | Llama3-crab + DPO | Llama3 | 49.7 | 56.8 | 38.1 | 45.5 | 47.6 | | Mistral 7B | Mistral | 25.2 | 15.5 | 6.5 | 10.1 | 17.7 | | Mistral-crab | Mistral | 54.5 | 59.2 | 32.8 | 43.3 | 48.9 | | Mistral-crab + DPO | Mistral | 59.4 | 59.9 | 42.5 | 49.4 | 54.4 | ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** Yunjia Qi, Hao Peng, Xiaozhi Wang, Bin Xu, Lei Hou, Juanzi Li - **Model type:** Text Generation - **Language(s) (NLP):** English - **Finetuned from model [optional]:** Mistral-7B-v0.3
null
Non_BioNLP
# Model Card for Mistral-Crab-DPO <!-- Provide a quick summary of what the model is/does. --> <p align="justify"> Large language models (LLMs) struggle to follow instructions with complex constraints in format, length, etc. Following the conventional instruction-tuning practice, previous works conduct post-training on complex instruction-response pairs generated by feeding complex instructions to advanced LLMs. However, even advanced LLMs cannot follow complex instructions well, thus limiting the quality of generated data. In this work, we find that <b><i>existing datasets inherently contain implicit complex constraints</i></b> and propose a novel data generation technique, <b><i>constraint back-translation</i></b>. Specifically, we take the high-quality instruction-response pairs in existing datasets and only adopt advanced LLMs to add complex constraints already met by the responses to the instructions, which naturally reduces costs and data noise. In the experiments, we adopt Llama3-70B-Instruct to back-translate constraints and create a high-quality complex instruction-response dataset, named <b>CRAB</b>. We present that post-training on <font face="Verdana">CRAB</font> improves multiple backbone LLMs' complex instruction-following ability, evaluated on extensive instruction-following benchmarks. We further find that constraint back-translation also serves as a useful auxiliary training objective in post-training. - 📖 Paper: [Constraint Back-translation Improves Complex Instruction Following of Large Language Models](https://arxiv.org/abs/2410.24175) </p> - 🦀 Github: [THU/Crab](https://github.com/THU-KEG/Crab) ### Model Performance | Models | BaseModel | IFEval | FollowBench(HSR) | | | AVG | |--------------------|-----------|--------|------------------|-------|------|------| | | | AVG | L1-L2 | L3-L5 | AVG | | | GPT-3.5-turbo | GPT | 66.3 | 74.2 | 61 | 66.2 | 66.3 | | GPT-4 | GPT | 81.3 | 80.4 | 69.4 | 73.8 | 77.6 | | Vicuna-13b-V1.5 | Llama2 | 50.3 | 66.3 | 39.8 | 50.4 | 50.4 | | WizardLM-13B-V1.2 | Llama2 | 51.4 | 56.5 | 36.9 | 44.7 | 48 | | Conifer-13B | Llama2 | 50.2 | 57.1 | 40.3 | 47 | 48.6 | | Zephyr-7B-beta | Mistral | 45.4 | 54.8 | 38.2 | 44.8 | 45.1 | | Conifer-7B | Mistral | 53.9 | 51.9 | 40.2 | 44.9 | 49.4 | | Conifer-7B-DPO | Mistral | 55.7 | 57 | 45.4 | 50 | 52.9 | | Llama3 8B | Llama3 | 31.4 | 6.8 | 8.2 | 7.6 | 19.5 | | Llama3-crab | Llama3 | 46.9 | 51.2 | 26.7 | 36.5 | 41.7 | | Llama3-crab + DPO | Llama3 | 49.7 | 56.8 | 38.1 | 45.5 | 47.6 | | Mistral 7B | Mistral | 25.2 | 15.5 | 6.5 | 10.1 | 17.7 | | Mistral-crab | Mistral | 54.5 | 59.2 | 32.8 | 43.3 | 48.9 | | Mistral-crab + DPO | Mistral | 59.4 | 59.9 | 42.5 | 49.4 | 54.4 | ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** Yunjia Qi, Hao Peng, Xiaozhi Wang, Bin Xu, Lei Hou, Juanzi Li - **Model type:** Text Generation - **Language(s) (NLP):** English - **Finetuned from model [optional]:** Mistral-7B-v0.3
{"language": ["en"], "license": "apache-2.0", "metrics": ["accuracy"], "pipeline_tag": "text-generation", "tags": ["alignment-handbook", "generated_from_trainer"]}
task
[ "TRANSLATION" ]
41,483
pinzhenchen/sft-lora-bg-bloom-560m
pinzhenchen
null
[ "generation", "question answering", "instruction tuning", "bg", "arxiv:2309.08958", "license:cc-by-nc-4.0", "region:us" ]
2024-03-05T23:45:23Z
2024-03-05T23:45:26+00:00
0
0
--- language: - bg license: cc-by-nc-4.0 tags: - generation - question answering - instruction tuning --- ### Model Description This HF repository contains base LLMs instruction tuned (SFT) with LoRA and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) * Instruction tuning language: Bulgarian * Training method: LoRA. * LoRA details: rank=8, alpha=16, target modules={key, query, value}. * Best checkpoint: best cross-entropy on a validation set, trained for 5 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded with the base model together using `transformers` and `peft` libraries. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/loraft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
null
Non_BioNLP
### Model Description This HF repository contains base LLMs instruction tuned (SFT) with LoRA and then used to study whether monolingual or multilingual instruction tuning is more favourable. * [GitHub](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main) * [Paper](https://arxiv.org/abs/2309.08958) #### Instruction tuning details * Base model: [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) * Instruction tuning language: Bulgarian * Training method: LoRA. * LoRA details: rank=8, alpha=16, target modules={key, query, value}. * Best checkpoint: best cross-entropy on a validation set, trained for 5 epochs. * Dataset: machine-translated from [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). You can download our data [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/training-data). #### Usage The model checkpoint should be loaded with the base model together using `transformers` and `peft` libraries. Please refer to our Github repository [HERE](https://github.com/hplt-project/monolingual-multilingual-instruction-tuning/tree/main/loraft) for inference and training instructions. #### Citation ``` @inproceedings{chen-etal-2024-monolingual, title="Monolingual or multilingual instruction tuning: Which makes a better {Alpaca}", author="Pinzhen Chen and Shaoxiong Ji and Nikolay Bogoychev and Andrey Kutuzov and Barry Haddow and Kenneth Heafield", year="2024", booktitle = "Findings of the Association for Computational Linguistics: EACL 2024", } ```
{"language": ["bg"], "license": "cc-by-nc-4.0", "tags": ["generation", "question answering", "instruction tuning"]}
task
[ "QUESTION_ANSWERING" ]
41,485
Adwait1199/finetuned-hi-to-en
Adwait1199
text2text-generation
[ "transformers", "pytorch", "tensorboard", "marian", "text2text-generation", "translation-hi-en", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-24T19:57:15Z
2023-03-24T21:32:05+00:00
10
0
--- license: apache-2.0 metrics: - bleu tags: - translation-hi-en - generated_from_trainer model-index: - name: finetuned-hi-to-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-hi-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-hi-en](https://huggingface.co/Helsinki-NLP/opus-mt-hi-en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4444 - Bleu: 29.4655 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-hi-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-hi-en](https://huggingface.co/Helsinki-NLP/opus-mt-hi-en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4444 - Bleu: 29.4655 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
{"license": "apache-2.0", "metrics": ["bleu"], "tags": ["translation-hi-en", "generated_from_trainer"], "model-index": [{"name": "finetuned-hi-to-en", "results": []}]}
task
[ "TRANSLATION" ]
41,486
Triangle104/Unsloth_Llama-3.2-1B-Instruct-Q5_K_S-GGUF
Triangle104
null
[ "transformers", "gguf", "llama-3", "llama", "meta", "facebook", "unsloth", "llama-cpp", "gguf-my-repo", "en", "base_model:unsloth/Llama-3.2-1B-Instruct", "base_model:quantized:unsloth/Llama-3.2-1B-Instruct", "license:llama3.2", "endpoints_compatible", "region:us", "conversational" ]
2024-11-11T01:48:40Z
2025-02-17T12:11:10+00:00
9
0
--- base_model: unsloth/Llama-3.2-1B-Instruct language: - en library_name: transformers license: llama3.2 tags: - llama-3 - llama - meta - facebook - unsloth - transformers - llama-cpp - gguf-my-repo --- # Triangle104/Unsloth_Llama-3.2-1B-Instruct-Q5_K_S-GGUF This model was converted to GGUF format from [`unsloth/Llama-3.2-1B-Instruct`](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct) for more details on the model. --- Model details: - Finetune Llama 3.2, Gemma 2, Mistral 2-5x faster with 70% less memory via Unsloth! Special Thanks - A huge thank you to the Meta and Llama team for creating and releasing these models. Model Information - The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks. Model developer: Meta Model Architecture: Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. Supported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai are officially supported. Llama 3.2 has been trained on a broader collection of languages than these 8 supported languages. Developers may fine-tune Llama 3.2 models for languages beyond these supported languages, provided they comply with the Llama 3.2 Community License and the Acceptable Use Policy. Developers are always expected to ensure that their deployments, including those that involve additional languages, are completed safely and responsibly. Llama 3.2 family of models Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability. Model Release Date: Sept 25, 2024 Status: This is a static model trained on an offline dataset. Future versions may be released that improve model capabilities and safety. License: Use of Llama 3.2 is governed by the Llama 3.2 Community License (a custom, commercial license agreement). Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model README. For more technical information about generation parameters and recipes for how to use Llama 3.1 in applications, please go here. --- ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -c 2048 ```
null
Non_BioNLP
# Triangle104/Unsloth_Llama-3.2-1B-Instruct-Q5_K_S-GGUF This model was converted to GGUF format from [`unsloth/Llama-3.2-1B-Instruct`](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct) for more details on the model. --- Model details: - Finetune Llama 3.2, Gemma 2, Mistral 2-5x faster with 70% less memory via Unsloth! Special Thanks - A huge thank you to the Meta and Llama team for creating and releasing these models. Model Information - The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks. Model developer: Meta Model Architecture: Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. Supported languages: English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai are officially supported. Llama 3.2 has been trained on a broader collection of languages than these 8 supported languages. Developers may fine-tune Llama 3.2 models for languages beyond these supported languages, provided they comply with the Llama 3.2 Community License and the Acceptable Use Policy. Developers are always expected to ensure that their deployments, including those that involve additional languages, are completed safely and responsibly. Llama 3.2 family of models Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability. Model Release Date: Sept 25, 2024 Status: This is a static model trained on an offline dataset. Future versions may be released that improve model capabilities and safety. License: Use of Llama 3.2 is governed by the Llama 3.2 Community License (a custom, commercial license agreement). Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model README. For more technical information about generation parameters and recipes for how to use Llama 3.1 in applications, please go here. --- ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Triangle104/Llama-3.2-1B-Instruct-Q5_K_S-GGUF --hf-file llama-3.2-1b-instruct-q5_k_s.gguf -c 2048 ```
{"base_model": "unsloth/Llama-3.2-1B-Instruct", "language": ["en"], "library_name": "transformers", "license": "llama3.2", "tags": ["llama-3", "llama", "meta", "facebook", "unsloth", "transformers", "llama-cpp", "gguf-my-repo"]}
task
[ "SUMMARIZATION" ]
41,487
four-two-labs/lynx-micro
four-two-labs
text-generation
[ "transformers", "safetensors", "gemma", "text-generation", "conversational", "sv", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-06-01T21:12:06Z
2024-06-07T14:31:23+00:00
60
4
--- language: - sv - en library_name: transformers --- # Lynx 2B (micro) ![Mascot](assets/lynx_mascot.png) ## Model Details ### Model Description This is the first release of a series of Swedish large language models we call "Lynx". Micro is a small model (2 billion params), but punches way above its weight! Lynx micro is a fine-tune of Google DeepMind Gemma 2B, scores just below GPT-3.5 Turbo on [Scandeval](https://scandeval.com/swedish-nlg/). In fact, the only non OpenAI model (currently) topping the Swedish NLG board on scandeval is a fine-tune of Llama-3 by AI Sweden based on our data recipe. We believe that this is a really capable model (for its size), but keep in mind that it is still a small model and hasn't memorized as much as larger models tend to do. - **Funded, Developed and shared by:** [42 Labs](https://www.42labs.ai) - **Model type:** Auto-regressive transformer - **Language(s) (NLP):** Swedish and English - **License:** Gemma terms of use - **Finetuned from model:** [Gemma 2B, 1.1 instruct](https://huggingface.co/google/gemma-1.1-2b-it) ## How to Get Started with the Model ```python import torch from transformers import pipeline from transformers import TextStreamer from transformers import AutoTokenizer from transformers import AutoModelForCausalLM model_name = 'four-two-labs/lynx-micro' tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, device_map='cuda', torch_dtype=torch.bfloat16, use_flash_attention_2=True, # Remove if flash attention isn't available ) pipe = pipeline( 'text-generation', model=model, tokenizer=tokenizer, streamer=TextStreamer(tokenizer=tokenizer) ) messages = [ #{'role': 'user', 'content': 'Lös ekvationen 2x^2-5 = 9'}, #{'role': 'user', 'content': 'Vad är fel med denna mening: "Hej! Jag idag bra mår."'}, #{'role': 'user', 'content': """Översätt till svenska: Hayashi, the Japanese government spokesperson, said Monday that Tokyo is answering the Chinese presence around the islands with vessels of its own.\n\n“We ensure a comprehensive security system for territorial waters by deploying Coast Guard patrol vessels that are consistently superior to other party’s capacity,” Hayashi said.\n\nAny Japanese-Chinese incident in the Senkakus raises the risk of a wider conflict, analysts note, due to Japan’s mutual defense treaty with the United States.\n\nWashington has made clear on numerous occasions that it considers the Senkakus to be covered by the mutual defense pact."""}, #{'role': 'user', 'content': """Vad handlar texten om?\n\nHayashi, the Japanese government spokesperson, said Monday that Tokyo is answering the Chinese presence around the islands with vessels of its own.\n\n“We ensure a comprehensive security system for territorial waters by deploying Coast Guard patrol vessels that are consistently superior to other party’s capacity,” Hayashi said.\n\nAny Japanese-Chinese incident in the Senkakus raises the risk of a wider conflict, analysts note, due to Japan’s mutual defense treaty with the United States.\n\nWashington has made clear on numerous occasions that it considers the Senkakus to be covered by the mutual defense pact."""}, #{'role': 'user', 'content': """Skriv en sci-fi novell som utspelar sig över millenium på en planet runt ett binärt stjärnsystem."""}, {'role': 'user', 'content': 'Hur många helikoptrar kan en människa äta på en gång?'}, ] r = pipe( messages, max_length=4096, do_sample=False, eos_token_id=[tokenizer.vocab['<end_of_turn>'], tokenizer.eos_token_id], ) ``` ## Training Details ### Training Data The model has been trained on a proprietary dataset of ~1.35M examples consisting of * High quality swedish instruct data * Single turn * Multi-turn * High quality swe <-> eng translations ### Training Procedure For training we used hugginface Accelerate and TRL. #### Preprocessing For efficiency, we packed all the examples into 8K context windows, reducing the number examples to ~12% of their original count. #### Training Hyperparameters - **Training regime:** [More Information Needed] ## Evaluation The model has been evaluated on [Scandeval](https://scandeval.com/swedish-nlg/) swedish subset. The result of the individual metrics compared to other top scoring models ![metrics](assets/metrics.png) The mean score of all metrics compared to other models in the Swedish NLG category. ![mean-score](assets/mean_score.png) ## Environmental Impact - **Hardware Type:** 8xH100 - **Hours used:** ~96 GPU hours - **Cloud Provider:** runpod.io - **Compute Region:** Canada - **Carbon Emitted:** Minimal
null
Non_BioNLP
# Lynx 2B (micro) ![Mascot](assets/lynx_mascot.png) ## Model Details ### Model Description This is the first release of a series of Swedish large language models we call "Lynx". Micro is a small model (2 billion params), but punches way above its weight! Lynx micro is a fine-tune of Google DeepMind Gemma 2B, scores just below GPT-3.5 Turbo on [Scandeval](https://scandeval.com/swedish-nlg/). In fact, the only non OpenAI model (currently) topping the Swedish NLG board on scandeval is a fine-tune of Llama-3 by AI Sweden based on our data recipe. We believe that this is a really capable model (for its size), but keep in mind that it is still a small model and hasn't memorized as much as larger models tend to do. - **Funded, Developed and shared by:** [42 Labs](https://www.42labs.ai) - **Model type:** Auto-regressive transformer - **Language(s) (NLP):** Swedish and English - **License:** Gemma terms of use - **Finetuned from model:** [Gemma 2B, 1.1 instruct](https://huggingface.co/google/gemma-1.1-2b-it) ## How to Get Started with the Model ```python import torch from transformers import pipeline from transformers import TextStreamer from transformers import AutoTokenizer from transformers import AutoModelForCausalLM model_name = 'four-two-labs/lynx-micro' tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, device_map='cuda', torch_dtype=torch.bfloat16, use_flash_attention_2=True, # Remove if flash attention isn't available ) pipe = pipeline( 'text-generation', model=model, tokenizer=tokenizer, streamer=TextStreamer(tokenizer=tokenizer) ) messages = [ #{'role': 'user', 'content': 'Lös ekvationen 2x^2-5 = 9'}, #{'role': 'user', 'content': 'Vad är fel med denna mening: "Hej! Jag idag bra mår."'}, #{'role': 'user', 'content': """Översätt till svenska: Hayashi, the Japanese government spokesperson, said Monday that Tokyo is answering the Chinese presence around the islands with vessels of its own.\n\n“We ensure a comprehensive security system for territorial waters by deploying Coast Guard patrol vessels that are consistently superior to other party’s capacity,” Hayashi said.\n\nAny Japanese-Chinese incident in the Senkakus raises the risk of a wider conflict, analysts note, due to Japan’s mutual defense treaty with the United States.\n\nWashington has made clear on numerous occasions that it considers the Senkakus to be covered by the mutual defense pact."""}, #{'role': 'user', 'content': """Vad handlar texten om?\n\nHayashi, the Japanese government spokesperson, said Monday that Tokyo is answering the Chinese presence around the islands with vessels of its own.\n\n“We ensure a comprehensive security system for territorial waters by deploying Coast Guard patrol vessels that are consistently superior to other party’s capacity,” Hayashi said.\n\nAny Japanese-Chinese incident in the Senkakus raises the risk of a wider conflict, analysts note, due to Japan’s mutual defense treaty with the United States.\n\nWashington has made clear on numerous occasions that it considers the Senkakus to be covered by the mutual defense pact."""}, #{'role': 'user', 'content': """Skriv en sci-fi novell som utspelar sig över millenium på en planet runt ett binärt stjärnsystem."""}, {'role': 'user', 'content': 'Hur många helikoptrar kan en människa äta på en gång?'}, ] r = pipe( messages, max_length=4096, do_sample=False, eos_token_id=[tokenizer.vocab['<end_of_turn>'], tokenizer.eos_token_id], ) ``` ## Training Details ### Training Data The model has been trained on a proprietary dataset of ~1.35M examples consisting of * High quality swedish instruct data * Single turn * Multi-turn * High quality swe <-> eng translations ### Training Procedure For training we used hugginface Accelerate and TRL. #### Preprocessing For efficiency, we packed all the examples into 8K context windows, reducing the number examples to ~12% of their original count. #### Training Hyperparameters - **Training regime:** [More Information Needed] ## Evaluation The model has been evaluated on [Scandeval](https://scandeval.com/swedish-nlg/) swedish subset. The result of the individual metrics compared to other top scoring models ![metrics](assets/metrics.png) The mean score of all metrics compared to other models in the Swedish NLG category. ![mean-score](assets/mean_score.png) ## Environmental Impact - **Hardware Type:** 8xH100 - **Hours used:** ~96 GPU hours - **Cloud Provider:** runpod.io - **Compute Region:** Canada - **Carbon Emitted:** Minimal
{"language": ["sv", "en"], "library_name": "transformers"}
task
[ "TRANSLATION" ]
41,488
fkr1306/distilbert-base-uncased-finetuned-emotion
fkr1306
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-14T14:39:29Z
2023-10-29T02:58:18+00:00
110
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.927 name: Accuracy - type: f1 value: 0.9269870944171579 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2192 - Accuracy: 0.927 - F1: 0.9270 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8221 | 1.0 | 250 | 0.3228 | 0.9055 | 0.9042 | | 0.2483 | 2.0 | 500 | 0.2192 | 0.927 | 0.9270 | ### Framework versions - Transformers 4.34.1 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2192 - Accuracy: 0.927 - F1: 0.9270 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8221 | 1.0 | 250 | 0.3228 | 0.9055 | 0.9042 | | 0.2483 | 2.0 | 500 | 0.2192 | 0.927 | 0.9270 | ### Framework versions - Transformers 4.34.1 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.927, "name": "Accuracy"}, {"type": "f1", "value": 0.9269870944171579, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
41,489